You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jl...@apache.org on 2017/10/09 06:00:59 UTC

[01/50] [abbrv] ambari git commit: AMBARI-22114. Ambari 3.0 : Outstanding UI issues - Part 1 (Ishan via Jaimin)

Repository: ambari
Updated Branches:
  refs/heads/branch-feature-AMBARI-14714 ba1ec6ddb -> c36afcdd0


AMBARI-22114. Ambari 3.0 : Outstanding UI issues - Part 1 (Ishan via Jaimin)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/969ecfc9
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/969ecfc9
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/969ecfc9

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 969ecfc9200efec6e4e6e45f397e0af0786c3003
Parents: 1116315
Author: Jaimin Jetly <ja...@hortonworks.com>
Authored: Mon Oct 2 22:27:02 2017 -0700
Committer: Jaimin Jetly <ja...@hortonworks.com>
Committed: Mon Oct 2 22:27:02 2017 -0700

----------------------------------------------------------------------
 .../app/controllers/main/service/info/summary.js   |  1 +
 ambari-web/app/styles/alerts.less                  |  6 +++---
 ambari-web/app/styles/application.less             |  5 +++++
 ambari-web/app/styles/bootstrap_overrides.less     |  4 ++++
 ambari-web/app/styles/modal_popups.less            |  2 +-
 ambari-web/app/styles/theme/bootstrap-ambari.css   | 17 +++++++++--------
 ambari-web/app/styles/top-nav.less                 |  5 +++--
 ambari-web/app/styles/wizard.less                  |  9 +++++++++
 ambari-web/app/templates/common/breadcrumbs.hbs    |  4 +++-
 .../main/service/info/service_alert_popup.hbs      |  6 +++---
 ambari-web/app/templates/wizard/step6.hbs          |  2 +-
 ambari-web/app/templates/wizard/step9.hbs          | 12 ++++++------
 12 files changed, 48 insertions(+), 25 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/969ecfc9/ambari-web/app/controllers/main/service/info/summary.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/service/info/summary.js b/ambari-web/app/controllers/main/service/info/summary.js
index d7fff18..d696334 100644
--- a/ambari-web/app/controllers/main/service/info/summary.js
+++ b/ambari-web/app/controllers/main/service/info/summary.js
@@ -420,6 +420,7 @@ App.MainServiceInfoSummaryController = Em.Controller.extend(App.WidgetSectionMix
       }),
       isHideBodyScroll: false,
       primary: Em.I18n.t('common.close'),
+      primaryClass: 'btn-default',
       secondary: null
     });
   },

http://git-wip-us.apache.org/repos/asf/ambari/blob/969ecfc9/ambari-web/app/styles/alerts.less
----------------------------------------------------------------------
diff --git a/ambari-web/app/styles/alerts.less b/ambari-web/app/styles/alerts.less
index 7db45f1..775c0ef 100644
--- a/ambari-web/app/styles/alerts.less
+++ b/ambari-web/app/styles/alerts.less
@@ -426,7 +426,7 @@
 
 #summary-alerts-popup {
   .alert-list-wrap {
-    padding: 10px 5px;
+    padding: 10px 5px 5px;
     .status-icon {
       padding-right: 5px;
       min-width: 20px;
@@ -447,9 +447,9 @@
         .modal-body {
           padding-left: 0;
           padding-right: 0;
-          height: 70%;
+          height: 75%;
           font-size: 14px;
-          .definition-latest-text {
+          .definition-latest-text, .timeago {
             font-size: 12px;
           }
         }

http://git-wip-us.apache.org/repos/asf/ambari/blob/969ecfc9/ambari-web/app/styles/application.less
----------------------------------------------------------------------
diff --git a/ambari-web/app/styles/application.less b/ambari-web/app/styles/application.less
index 746673a..8181996 100644
--- a/ambari-web/app/styles/application.less
+++ b/ambari-web/app/styles/application.less
@@ -2797,4 +2797,9 @@ a.abort-icon:hover {
   color: #b94a48;
   background-color: #f2dede;
   border: 1px solid #b94a48;
+}
+
+.breadcrumbs-forward-slash {
+  display: inline;
+  color: #D2D3D5;
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/969ecfc9/ambari-web/app/styles/bootstrap_overrides.less
----------------------------------------------------------------------
diff --git a/ambari-web/app/styles/bootstrap_overrides.less b/ambari-web/app/styles/bootstrap_overrides.less
index 8ba49d1..e1a5ab5 100644
--- a/ambari-web/app/styles/bootstrap_overrides.less
+++ b/ambari-web/app/styles/bootstrap_overrides.less
@@ -437,4 +437,8 @@ select.form-control {
 
 .wizard .wizard-body {
   padding: 0 !important;
+}
+
+.table.table-hover .action {
+  padding-right: 10px;
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/969ecfc9/ambari-web/app/styles/modal_popups.less
----------------------------------------------------------------------
diff --git a/ambari-web/app/styles/modal_popups.less b/ambari-web/app/styles/modal_popups.less
index 97083d2..abe1c63 100644
--- a/ambari-web/app/styles/modal_popups.less
+++ b/ambari-web/app/styles/modal_popups.less
@@ -389,7 +389,7 @@
 }
 
 .modal-xlg {
-  width: 1280px;
+  width: 100%;
   .wizard {
     .container {
       width: 1236px;

http://git-wip-us.apache.org/repos/asf/ambari/blob/969ecfc9/ambari-web/app/styles/theme/bootstrap-ambari.css
----------------------------------------------------------------------
diff --git a/ambari-web/app/styles/theme/bootstrap-ambari.css b/ambari-web/app/styles/theme/bootstrap-ambari.css
index f868744..a223949 100644
--- a/ambari-web/app/styles/theme/bootstrap-ambari.css
+++ b/ambari-web/app/styles/theme/bootstrap-ambari.css
@@ -465,7 +465,7 @@ h2.table-title {
   border-width: 0;
   border-radius: 0;
   border-bottom: 2px solid transparent;
-  color: #666;
+  color: #6B6C6C;
   text-transform: uppercase;
 }
 .nav.nav-tabs li a:hover,
@@ -482,7 +482,8 @@ h2.table-title {
 }
 .nav.nav-tabs li.active a {
   color: #333;
-  border-bottom: 2px solid #3FAE2A;
+  border-bottom: 3px solid #3FAE2A;
+  padding-bottom: 1px;
 }
 .nav-tabs-left li,
 .nav-tabs-right li {
@@ -817,7 +818,7 @@ input.radio:checked + label:after {
   font-size: 13px;
   display: inline-block;
   vertical-align: middle;
-  color: #b8bec4;
+  color: #43AD49;
 }
 .navigation-bar-container ul.nav.side-nav-header li.navigation-header .btn-group.open .dropdown-toggle {
   -webkit-box-shadow: none;
@@ -889,7 +890,7 @@ input.radio:checked + label:after {
 .navigation-bar-container ul.nav.side-nav-menu li.mainmenu-li > a .navigation-icon,
 .navigation-bar-container ul.nav.side-nav-footer li.mainmenu-li > a .navigation-icon {
   line-height: 18px;
-  font-size: 18px;
+  font-size: 14px;
   color: #b8bec4;
 }
 .navigation-bar-container ul.nav.side-nav-menu li.navigation-footer > a .toggle-icon,
@@ -916,11 +917,11 @@ input.radio:checked + label:after {
 .navigation-bar-container ul.nav.side-nav-menu li.navigation-footer,
 .navigation-bar-container ul.nav.side-nav-footer li.navigation-footer {
   background: #313d54;
-  height: 50px;
+  height: 48px;
 }
 .navigation-bar-container ul.nav.side-nav-menu li.navigation-footer a .navigation-icon,
 .navigation-bar-container ul.nav.side-nav-footer li.navigation-footer a .navigation-icon {
-  color: #31823a;
+  color: #3fae2a;
   font-size: 20px;
   position: relative;
   padding: 0 15px;
@@ -1189,8 +1190,7 @@ input.radio:checked + label:after {
   top: 1px;
 }
 #notifications-dropdown.dropdown-menu {
-  min-width: 300px;
-  max-width: 300px;
+  width: 400px;
   min-height: 150px;
   padding: 0px;
   z-index: 1000;
@@ -1379,6 +1379,7 @@ input.radio:checked + label:after {
   border-radius: 0px;
   border: none;
   margin-top: 0px;
+  padding: 0 10px;
 }
 .accordion .panel-group .panel .panel-heading,
 .wizard .wizard-body .wizard-content .accordion .panel-group .panel .panel-heading {

http://git-wip-us.apache.org/repos/asf/ambari/blob/969ecfc9/ambari-web/app/styles/top-nav.less
----------------------------------------------------------------------
diff --git a/ambari-web/app/styles/top-nav.less b/ambari-web/app/styles/top-nav.less
index 2fd7db0..0644e52 100644
--- a/ambari-web/app/styles/top-nav.less
+++ b/ambari-web/app/styles/top-nav.less
@@ -23,10 +23,11 @@
     background: #fff;
     border-top: 1px solid #e3e3e3;
     border-bottom: 1px solid #e3e3e3;
+    margin-bottom: 10px;
 
     .navbar-header {
-      padding: 15px 15px 15px 0px;
-      margin-top: 5px;
+      padding: 19px 15px 19px 0px;
+      margin-top: -5px;
       font-size: 20px;
       a {
         color: #313D54;

http://git-wip-us.apache.org/repos/asf/ambari/blob/969ecfc9/ambari-web/app/styles/wizard.less
----------------------------------------------------------------------
diff --git a/ambari-web/app/styles/wizard.less b/ambari-web/app/styles/wizard.less
index 130896dd..5f60378 100644
--- a/ambari-web/app/styles/wizard.less
+++ b/ambari-web/app/styles/wizard.less
@@ -44,6 +44,11 @@
     .panel.panel-default.panel-internal {
       border: 1px solid #ddd;
     }
+    .panel.panel-default.panel-internal.install-retry-panel {
+      border-top: none;
+      border-left: none;
+      border-right: none;
+    }
     padding: 25px;
     background-color: #fff;
   }
@@ -218,6 +223,10 @@
         .checkbox {
           margin: 0;
         }
+        .host-component-checkbox {
+          font-size: 12px;
+          top: 0px;
+        }
       }
     }
     .spinner-overlay {

http://git-wip-us.apache.org/repos/asf/ambari/blob/969ecfc9/ambari-web/app/templates/common/breadcrumbs.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/common/breadcrumbs.hbs b/ambari-web/app/templates/common/breadcrumbs.hbs
index 363b306..a467223 100644
--- a/ambari-web/app/templates/common/breadcrumbs.hbs
+++ b/ambari-web/app/templates/common/breadcrumbs.hbs
@@ -24,5 +24,7 @@
     {{{item.formattedLabel}}}
   </a>
   {{/if}}
-  {{#unless item.isLast}}&nbsp;/&nbsp;{{/unless}}
+  {{#unless item.isLast}}
+    <p class="breadcrumbs-forward-slash">/</p>
+  {{/unless}}
 {{/each}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/969ecfc9/ambari-web/app/templates/main/service/info/service_alert_popup.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/service/info/service_alert_popup.hbs b/ambari-web/app/templates/main/service/info/service_alert_popup.hbs
index 9600aba..1022eef 100644
--- a/ambari-web/app/templates/main/service/info/service_alert_popup.hbs
+++ b/ambari-web/app/templates/main/service/info/service_alert_popup.hbs
@@ -21,7 +21,7 @@
     {{#each alert in view.alerts}}
       <li class="alert-list-wrap">
         <div class="row">
-          <div class="col-md-9 name-text">
+          <div class="col-md-8 name-text">
             <div>
               <a class="accordion-toggle" href="#" {{action "gotoAlertDetails" alert target="view"}}>{{alert.label}}</a>
             </div>
@@ -29,8 +29,8 @@
               {{alert.latestTextSummary}}
             </div>
           </div>
-          <div class="col-md-3 status-col" rel="alert-status-tooltip" >
-            {{view App.AlertDefinitionSummary contentBinding="alert"}}
+          <div class="col-md-4 status-col" rel="alert-status-tooltip" >
+            <div class="display-inline-block">{{view App.AlertDefinitionSummary contentBinding="alert"}}</div>
             <span class="timeago" {{bindAttr data-original-title="alert.lastTriggeredVerboseDisplay"}}>
               <span class="status-icon">{{{alert.status}}}</span>
               <time>{{alert.lastTriggeredForFormatted}}</time>

http://git-wip-us.apache.org/repos/asf/ambari/blob/969ecfc9/ambari-web/app/templates/wizard/step6.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/wizard/step6.hbs b/ambari-web/app/templates/wizard/step6.hbs
index e7759da..6128b88 100644
--- a/ambari-web/app/templates/wizard/step6.hbs
+++ b/ambari-web/app/templates/wizard/step6.hbs
@@ -61,7 +61,7 @@
                     <div class="checkbox" {{bindAttr data-qa="checkbox.dataQaAttr"}}>
                         <input {{bindAttr id="checkbox.uId" checked="checkbox.checked" disabled="checkbox.isDisabled"}} {{action "checkboxClick" checkbox target="view" }}
                                 type="checkbox"/>
-                      <label {{bindAttr for="checkbox.uId"}}>{{checkbox.title}}</label>
+                      <label class="host-component-checkbox" {{bindAttr for="checkbox.uId"}}>{{checkbox.title}}</label>
                     </div>
                   </td>
                 {{/each}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/969ecfc9/ambari-web/app/templates/wizard/step9.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/wizard/step9.hbs b/ambari-web/app/templates/wizard/step9.hbs
index 5d63798..ba13c2c 100644
--- a/ambari-web/app/templates/wizard/step9.hbs
+++ b/ambari-web/app/templates/wizard/step9.hbs
@@ -32,18 +32,18 @@
         <div class="col-md-2" {{QAAttr "progress-message"}}>{{view.progressMessage}}</div>
       </div>
 
-      <div class="panel panel-default panel-internal">
+      <div class="panel panel-default panel-internal install-retry-panel">
         <div class="panel-heading">
-          <div class="pull-left">
+          <div class="pull-right">
             {{#if controller.showRetry}}
-              <a class="btn btn-primary" href="#" {{action retry}} {{QAAttr "retry-button"}}>
-                <i class="glyphicon glyphicon-repeat glyphicon-white"></i>
+              <button class="btn btn-default" href="#" {{action retry}} {{QAAttr "retry-button"}}>
+                <i class="glyphicon glyphicon-repeat glyphicon-white"></i>&nbsp;
                 {{t common.retry}}
-              </a>
+              </button>
             {{/if}}
           </div>
           <!-- filter by host level -->
-          <div id="host-filter" class="pull-right">
+          <div id="host-filter" class="pull-left">
             <ul class="clearfix">
               <li class="first">{{t common.show}}:</li>
               {{#each category in view.categories}}


[31/50] [abbrv] ambari git commit: AMBARI-22136 Enable server_action tasks defined in EU/RU upgrade pack xml files to take parameters (dili)

Posted by jl...@apache.org.
AMBARI-22136 Enable server_action tasks defined in EU/RU upgrade pack xml files to take parameters (dili)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/d5dd1938
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/d5dd1938
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/d5dd1938

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: d5dd19389bd4b43c587f72dd8ab8230e34b98bfc
Parents: 0ed128f
Author: Di Li <di...@apache.org>
Authored: Thu Oct 5 13:53:46 2017 -0400
Committer: Di Li <di...@apache.org>
Committed: Thu Oct 5 13:53:46 2017 -0400

----------------------------------------------------------------------
 .../ambari/server/actionmanager/Stage.java      |  3 +-
 .../internal/UpgradeResourceProvider.java       |  2 +
 .../stack/upgrade/ServerSideActionTask.java     | 15 +++++++
 .../state/stack/upgrade/TaskParameter.java      | 41 ++++++++++++++++++++
 .../src/main/resources/upgrade-pack.xsd         |  9 +++++
 5 files changed, 69 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/d5dd1938/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Stage.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Stage.java b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Stage.java
index f466ce9..b88275a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Stage.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Stage.java
@@ -465,7 +465,8 @@ public class Stage {
    * @param retryAllowed
    *          indicates whether retry after failure is allowed
    */
-  public synchronized void addServerActionCommand(String actionName, @Nullable String userName,
+  public synchronized void addServerActionCommand(String actionName,
+      @Nullable String userName,
       Role role, RoleCommand command, String clusterName,
       ServiceComponentHostServerActionEvent event, @Nullable Map<String, String> commandParams,
       @Nullable String commandDetail, @Nullable Map<String, Map<String, String>> configTags,

http://git-wip-us.apache.org/repos/asf/ambari/blob/d5dd1938/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
index 85f3a1b..a1ec98a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
@@ -1378,6 +1378,8 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     stage.setStageId(stageId);
     entity.setStageId(Long.valueOf(stageId));
 
+    Map<String, String> taskParameters = task.getParameters();
+    commandParams.putAll(taskParameters);
     stage.addServerActionCommand(task.getImplementationClass(),
         getManagementController().getAuthName(), Role.AMBARI_SERVER_ACTION, RoleCommand.EXECUTE,
         cluster.getClusterName(),

http://git-wip-us.apache.org/repos/asf/ambari/blob/d5dd1938/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ServerSideActionTask.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ServerSideActionTask.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ServerSideActionTask.java
index c593c04..844ef24 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ServerSideActionTask.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ServerSideActionTask.java
@@ -18,7 +18,9 @@
 package org.apache.ambari.server.state.stack.upgrade;
 
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 
 import javax.xml.bind.annotation.XmlAttribute;
 import javax.xml.bind.annotation.XmlElement;
@@ -31,6 +33,19 @@ public abstract class ServerSideActionTask extends Task {
   @XmlAttribute(name="class")
   protected String implClass;
 
+  @XmlElement(name = "parameter")
+  public List<TaskParameter> parameters;
+
+  public Map<String, String> getParameters(){
+    Map<String, String> result = new HashMap<String, String>();
+    if (parameters != null) {
+      for (TaskParameter parameter : parameters) {
+        result.put(parameter.name, parameter.value);
+      }
+    }
+    return result;
+  }
+
   public static final String actionVerb = "Executing";
 
   public String getImplementationClass() {

http://git-wip-us.apache.org/repos/asf/ambari/blob/d5dd1938/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskParameter.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskParameter.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskParameter.java
new file mode 100644
index 0000000..7773a67
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/TaskParameter.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.state.stack.upgrade;
+
+import javax.xml.bind.annotation.XmlAttribute;
+import javax.xml.bind.annotation.XmlValue;
+
+/**
+ * Optional parameter defined in upgrade packs to be used by server actions.
+ * These parameters are passed to corresponding server action as part of the command parameters.
+ * */
+public class TaskParameter {
+
+  /**
+   * Name of the parameter
+   * */
+  @XmlAttribute (name = "name")
+  public String name;
+
+  /**
+   * Parameter value
+   * */
+  @XmlValue
+  public String value;
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/d5dd1938/ambari-server/src/main/resources/upgrade-pack.xsd
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade-pack.xsd b/ambari-server/src/main/resources/upgrade-pack.xsd
index 21606bd..79c50a7 100644
--- a/ambari-server/src/main/resources/upgrade-pack.xsd
+++ b/ambari-server/src/main/resources/upgrade-pack.xsd
@@ -292,6 +292,15 @@
       <xs:extension base="abstract-task-type">
         <xs:sequence>
           <xs:element name="message" minOccurs="0" maxOccurs="unbounded" />
+          <xs:element name="parameter" minOccurs="0" maxOccurs="unbounded">
+            <xs:complexType>
+              <xs:simpleContent>
+                <xs:extension base="xs:string">
+                  <xs:attribute name="name" />
+                </xs:extension>
+              </xs:simpleContent>
+            </xs:complexType>
+          </xs:element>
         </xs:sequence>
       </xs:extension>
     </xs:complexContent>


[16/50] [abbrv] ambari git commit: AMBARI-22130. Agent UT fail on trunk (aonishuk)

Posted by jl...@apache.org.
AMBARI-22130. Agent UT fail on trunk (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/de981ca0
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/de981ca0
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/de981ca0

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: de981ca006eba891868d97983c03a3cb019e23ef
Parents: 15cd1c5
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Wed Oct 4 15:57:54 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Wed Oct 4 15:57:54 2017 +0300

----------------------------------------------------------------------
 ambari-agent/src/test/python/ambari_agent/TestLiveStatus.py | 5 +----
 ambari-agent/src/test/python/ambari_agent/TestMain.py       | 5 ++---
 ambari-agent/src/test/python/ambari_agent/TestSecurity.py   | 1 -
 3 files changed, 3 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/de981ca0/ambari-agent/src/test/python/ambari_agent/TestLiveStatus.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestLiveStatus.py b/ambari-agent/src/test/python/ambari_agent/TestLiveStatus.py
index 930c845..abd7def 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestLiveStatus.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestLiveStatus.py
@@ -25,7 +25,6 @@ import os, sys, StringIO
 from ambari_agent import ActualConfigHandler
 from mock.mock import patch, MagicMock
 import pprint
-from ambari_agent import StatusCheck
 from ambari_commons import OSCheck
 from only_for_platform import os_distro_value
 
@@ -43,8 +42,7 @@ class TestLiveStatus(TestCase):
 
   @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
   @patch.object(ActualConfigHandler.ActualConfigHandler, "read_actual_component")
-  @patch.object(StatusCheck.StatusCheck, "getStatus")
-  def test_build_predefined(self, getStatus_mock, read_actual_component_mock):
+  def test_build_predefined(self, read_actual_component_mock):
     read_actual_component_mock.return_value = "actual_component"
     """
     Tests that if live status us defined (using default parameter),
@@ -62,6 +60,5 @@ class TestLiveStatus(TestCase):
                      "'configurationTags': 'actual_component',\n "
                      "'msg': '',\n 'serviceName': 'SOME_UNKNOWN_SERVICE',\n "
                      "'stackVersion': '',\n 'status': 'STARTED'}")
-    self.assertFalse(getStatus_mock.called)
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/de981ca0/ambari-agent/src/test/python/ambari_agent/TestMain.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestMain.py b/ambari-agent/src/test/python/ambari_agent/TestMain.py
index 504ca08..35f5e6f 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestMain.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestMain.py
@@ -36,7 +36,7 @@ from mock.mock import MagicMock, patch, ANY, Mock, call
 
 with patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value)):
   from ambari_agent import NetUtil, security
-  from ambari_agent import ProcessHelper, main
+  from ambari_agent import main
   from ambari_agent.AmbariConfig import AmbariConfig
   from ambari_agent.PingPortListener import PingPortListener
   from ambari_agent.Controller import Controller
@@ -62,8 +62,7 @@ class TestMain(unittest.TestCase):
   @patch("ambari_agent.HeartbeatHandlers.HeartbeatStopHandlersLinux")
   @patch("sys.exit")
   @patch("os.getpid")
-  @patch.object(ProcessHelper, "stopAgent")
-  def test_signal_handler(self, stopAgent_mock, os_getpid_mock, sys_exit_mock, heartbeat_handler_mock):
+  def test_signal_handler(self,os_getpid_mock, sys_exit_mock, heartbeat_handler_mock):
     # testing exit of children
     main.agentPid = 4444
     os_getpid_mock.return_value = 5555

http://git-wip-us.apache.org/repos/asf/ambari/blob/de981ca0/ambari-agent/src/test/python/ambari_agent/TestSecurity.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestSecurity.py b/ambari-agent/src/test/python/ambari_agent/TestSecurity.py
index c9a7fbe..ac295b5 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestSecurity.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestSecurity.py
@@ -35,7 +35,6 @@ from only_for_platform import os_distro_value
 with patch("platform.linux_distribution", return_value = ('Suse','11','Final')):
   from ambari_agent import NetUtil
   from ambari_agent.security import CertificateManager
-  from ambari_agent import ProcessHelper, main
   from ambari_agent.AmbariConfig import AmbariConfig
   from ambari_agent.Controller import Controller
   from ambari_agent import security


[29/50] [abbrv] ambari git commit: AMBARI-22144. Hitting pause during upgrade jumps progress to 100% (ncole)

Posted by jl...@apache.org.
AMBARI-22144. Hitting pause during upgrade jumps progress to 100% (ncole)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/5370297d
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/5370297d
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/5370297d

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 5370297d982330cd43673afbc9a0cb341ea027f4
Parents: ed378b7
Author: Nate Cole <nc...@hortonworks.com>
Authored: Thu Oct 5 12:03:00 2017 -0400
Committer: Nate Cole <nc...@hortonworks.com>
Committed: Thu Oct 5 12:03:00 2017 -0400

----------------------------------------------------------------------
 .../internal/UpgradeResourceProvider.java       | 72 +++++++++++++++++++-
 .../internal/UpgradeResourceProviderTest.java   | 33 ++++++++-
 2 files changed, 102 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/5370297d/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
index 33ce25e..85f3a1b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
@@ -198,7 +198,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
   private static final String REQUEST_END_TIME_ID = "Upgrade/end_time";
   private static final String REQUEST_EXCLUSIVE_ID = "Upgrade/exclusive";
 
-  private static final String REQUEST_PROGRESS_PERCENT_ID = "Upgrade/progress_percent";
+  protected static final String REQUEST_PROGRESS_PERCENT_ID = "Upgrade/progress_percent";
   private static final String REQUEST_STATUS_PROPERTY_ID = "Upgrade/request_status";
 
   private static final Set<String> PK_PROPERTY_IDS = new HashSet<>(
@@ -416,14 +416,82 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
 
         CalculatedStatus calc = CalculatedStatus.statusFromStageSummary(summary, summary.keySet());
 
+        if (calc.getStatus() == HostRoleStatus.ABORTED && entity.isSuspended()) {
+          double percent = calculateAbortedProgress(summary);
+          setResourceProperty(r, REQUEST_PROGRESS_PERCENT_ID, percent*100, requestPropertyIds);
+        } else {
+          setResourceProperty(r, REQUEST_PROGRESS_PERCENT_ID, calc.getPercent(), requestPropertyIds);
+        }
+
         setResourceProperty(r, REQUEST_STATUS_PROPERTY_ID, calc.getStatus(), requestPropertyIds);
-        setResourceProperty(r, REQUEST_PROGRESS_PERCENT_ID, calc.getPercent(), requestPropertyIds);
       }
     }
 
     return results;
   }
 
+  /**
+   * Unlike in CalculatedStatus, we can't use ABORTED here as a COMPLETED state.
+   * Therefore, the values will be slightly off since in CalulatedStatus, ABORTED
+   * contributes all of its progress to the overall progress, but here it
+   * contributes none of it.
+   *
+   * Since this is specifically for ABORTED upgrades that are
+   * also suspended, the percentages should come out pretty close after ABORTED move back
+   * to PENDING.
+   *
+   * @return the percent complete, counting ABORTED as zero percent.
+   */
+  private double calculateAbortedProgress(Map<Long, HostRoleCommandStatusSummaryDTO> summary) {
+    // !!! use the raw states to determine percent completes
+    Map<HostRoleStatus, Integer> countTotals = new HashMap<>();
+    int totalTasks = 0;
+
+
+    for (HostRoleCommandStatusSummaryDTO statusSummary : summary.values()) {
+      totalTasks += statusSummary.getTaskTotal();
+      for (Map.Entry<HostRoleStatus, Integer> entry : statusSummary.getCounts().entrySet()) {
+        if (!countTotals.containsKey(entry.getKey())) {
+          countTotals.put(entry.getKey(), Integer.valueOf(0));
+        }
+        countTotals.put(entry.getKey(), countTotals.get(entry.getKey()) + entry.getValue());
+      }
+    }
+
+    double percent = 0d;
+
+    for (HostRoleStatus status : HostRoleStatus.values()) {
+      if (!countTotals.containsKey(status)) {
+        countTotals.put(status, Integer.valueOf(0));
+      }
+      double countValue = (double) countTotals.get(status);
+
+      // !!! calculation lifted from CalculatedStatus
+      switch (status) {
+        case ABORTED:
+          // !!! see javadoc
+          break;
+        case HOLDING:
+        case HOLDING_FAILED:
+        case HOLDING_TIMEDOUT:
+        case IN_PROGRESS:
+        case PENDING:  // shouldn't be any, we're supposed to be ABORTED
+          percent += countValue * 0.35d;
+          break;
+        case QUEUED:
+          percent += countValue * 0.09d;
+          break;
+        default:
+          if (status.isCompletedState()) {
+            percent += countValue / (double) totalTasks;
+          }
+          break;
+      }
+    }
+
+    return percent;
+  }
+
   @Override
   public RequestStatus updateResources(final Request request, Predicate predicate)
       throws SystemException, UnsupportedPropertyException, NoSuchResourceException,

http://git-wip-us.apache.org/repos/asf/ambari/blob/5370297d/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
index fea56d9..20adac2 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
@@ -871,9 +871,40 @@ public class UpgradeResourceProviderTest extends EasyMockSupport {
 
     UpgradeResourceProvider urp = createProvider(amc);
 
-    // !!! make sure we can.  actual abort is tested elsewhere
     Request req = PropertyHelper.getUpdateRequest(requestProps, null);
     urp.updateResources(req, null);
+
+    List<HostRoleCommandEntity> commands = hrcDAO.findByRequest(id);
+
+    int i = 0;
+    for (HostRoleCommandEntity command : commands) {
+      if (i < 3) {
+        command.setStatus(HostRoleStatus.COMPLETED);
+      } else {
+        command.setStatus(HostRoleStatus.ABORTED);
+      }
+      hrcDAO.merge(command);
+      i++;
+    }
+
+    req = PropertyHelper.getReadRequest(
+        UpgradeResourceProvider.UPGRADE_CLUSTER_NAME,
+        UpgradeResourceProvider.UPGRADE_ID,
+        UpgradeResourceProvider.REQUEST_PROGRESS_PERCENT_ID);
+
+    Predicate pred = new PredicateBuilder()
+        .property(UpgradeResourceProvider.UPGRADE_REQUEST_ID).equals(id.toString())
+        .and()
+        .property(UpgradeResourceProvider.UPGRADE_CLUSTER_NAME).equals("c1")
+        .toPredicate();
+
+    Set<Resource> resources = urp.getResources(req, pred);
+    assertEquals(1, resources.size());
+    res = resources.iterator().next();
+
+    Double value = (Double) res.getPropertyValue(UpgradeResourceProvider.REQUEST_PROGRESS_PERCENT_ID);
+
+    assertEquals(37.5d, value, 0.1d);
   }
 
 


[47/50] [abbrv] ambari git commit: AMBARI-21776. Move druid version to druid 0.10.1 and drop TP flag. (Slim Bouguerra via Swapan Shridhar).

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/druid.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/druid.py b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/druid.py
deleted file mode 100644
index ec98c3c..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/druid.py
+++ /dev/null
@@ -1,307 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import json
-import os
-from resource_management import Fail
-from resource_management.libraries.resources.properties_file import PropertiesFile
-from resource_management.core.resources.system import Directory, Execute, File
-from resource_management.core.source import DownloadSource
-from resource_management.core.source import InlineTemplate
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions.show_logs import show_logs
-from resource_management.core.logger import Logger
-
-
-def druid(upgrade_type=None, nodeType=None):
-  import params
-  ensure_base_directories()
-
-  # Environment Variables
-  File(format("{params.druid_conf_dir}/druid-env.sh"),
-       owner=params.druid_user,
-       content=InlineTemplate(params.druid_env_sh_template),
-       mode = 0700
-       )
-
-  # common config
-  druid_common_config = mutable_config_dict(params.config['configurations']['druid-common'])
-  # User cannot override below configs
-  druid_common_config['druid.host'] = params.hostname
-  druid_common_config['druid.extensions.directory'] = params.druid_extensions_dir
-  druid_common_config['druid.extensions.hadoopDependenciesDir'] = params.druid_hadoop_dependencies_dir
-  druid_common_config['druid.selectors.indexing.serviceName'] = params.config['configurations']['druid-overlord'][
-    'druid.service']
-  druid_common_config['druid.selectors.coordinator.serviceName'] = \
-    params.config['configurations']['druid-coordinator']['druid.service']
-  druid_common_config['druid.extensions.loadList'] = json.dumps(eval(params.druid_extensions_load_list) +
-                                                     eval(params.druid_security_extensions_load_list))
-
-  # delete the password and user if empty otherwiswe derby will fail.
-  if 'derby' == druid_common_config['druid.metadata.storage.type']:
-    del druid_common_config['druid.metadata.storage.connector.user']
-    del druid_common_config['druid.metadata.storage.connector.password']
-
-  druid_env_config = mutable_config_dict(params.config['configurations']['druid-env'])
-
-  PropertiesFile("common.runtime.properties",
-                 dir=params.druid_common_conf_dir,
-                 properties=druid_common_config,
-                 owner=params.druid_user,
-                 group=params.user_group,
-                 mode = 0600
-                 )
-  Logger.info("Created common.runtime.properties")
-
-  File(format("{params.druid_common_conf_dir}/druid-log4j.xml"),
-       mode=0644,
-       owner=params.druid_user,
-       group=params.user_group,
-       content=InlineTemplate(params.log4j_props)
-       )
-  Logger.info("Created log4j file")
-
-  File("/etc/logrotate.d/druid",
-       mode=0644,
-       owner='root',
-       group='root',
-       content=InlineTemplate(params.logrotate_props)
-       )
-
-  Logger.info("Created log rotate file")
-
-  # node specific configs
-  for node_type in ['coordinator', 'overlord', 'historical', 'broker', 'middleManager', 'router']:
-    node_config_dir = format('{params.druid_conf_dir}/{node_type}')
-    node_type_lowercase = node_type.lower()
-
-    # Write runtime.properties file
-    node_config = mutable_config_dict(params.config['configurations'][format('druid-{node_type_lowercase}')])
-    PropertiesFile("runtime.properties",
-                   dir=node_config_dir,
-                   properties=node_config,
-                   owner=params.druid_user,
-                   group=params.user_group,
-                   mode = 0600
-                   )
-    Logger.info(format("Created druid-{node_type_lowercase} runtime.properties"))
-
-    # Write jvm configs
-    File(format('{node_config_dir}/jvm.config'),
-         owner=params.druid_user,
-         group=params.user_group,
-         content=InlineTemplate(
-           "-server \n-Xms{{node_heap_memory}}m \n-Xmx{{node_heap_memory}}m \n-XX:MaxDirectMemorySize={{node_direct_memory}}m \n-Dlog4j.configurationFile={{log4j_config_file}} \n-Dlog4j.debug \n{{node_jvm_opts}}",
-           node_heap_memory=druid_env_config[format('druid.{node_type_lowercase}.jvm.heap.memory')],
-           log4j_config_file=format("{params.druid_common_conf_dir}/druid-log4j.xml"),
-           node_direct_memory=druid_env_config[
-             format('druid.{node_type_lowercase}.jvm.direct.memory')],
-           node_jvm_opts=druid_env_config[format('druid.{node_type_lowercase}.jvm.opts')])
-         )
-    Logger.info(format("Created druid-{node_type_lowercase} jvm.config"))
-    # Handling hadoop Lzo jars if enable and node type is hadoop related eg Overlords and MMs
-    if ['middleManager', 'overlord'].__contains__(node_type_lowercase) and params.lzo_enabled and len(
-            params.lzo_packages) > 0:
-        try:
-            Logger.info(
-                format(
-                    "Copying hadoop lzo jars from {hadoop_lib_home} to {druid_hadoop_dependencies_dir}/hadoop-client/*/"))
-            Execute(
-                format('{sudo} cp {hadoop_lib_home}/hadoop-lzo*.jar {druid_hadoop_dependencies_dir}/hadoop-client/*/'))
-        except Fail as ex:
-            Logger.info(format("No Hadoop LZO found at {hadoop_lib_home}/hadoop-lzo*.jar"))
-
-  # All druid nodes have dependency on hdfs_client
-  ensure_hadoop_directories()
-  download_database_connector_if_needed()
-  # Pull all required dependencies
-  pulldeps()
-
-
-def mutable_config_dict(config):
-  rv = {}
-  for key, value in config.iteritems():
-    rv[key] = value
-  return rv
-
-
-def ensure_hadoop_directories():
-  import params
-  if 'hdfs-site' not in params.config['configurations']:
-    # HDFS Not Installed nothing to do.
-    Logger.info("Skipping HDFS directory creation as HDFS not installed")
-    return
-
-  druid_common_config = params.config['configurations']['druid-common']
-  # final overlord config contains both common and overlord config
-  druid_middlemanager_config = params.config['configurations']['druid-middlemanager']
-
-  # If user is using HDFS as deep storage create HDFS Directory for storing segments
-  deep_storage = druid_common_config["druid.storage.type"]
-  storage_dir = druid_common_config["druid.storage.storageDirectory"]
-
-  if deep_storage == 'hdfs':
-    # create the home dir for druid
-    params.HdfsResource(format("/user/{params.druid_user}"),
-                        type="directory",
-                        action="create_on_execute",
-                        owner=params.druid_user,
-                        group='hadoop',
-                        recursive_chown=True,
-                        recursive_chmod=True
-                        )
-
-    # create the segment storage dir, users like hive from group hadoop need to write to this directory
-    create_hadoop_directory(storage_dir, mode=0775)
-
-  # Create HadoopIndexTask hadoopWorkingPath
-  hadoop_working_path = druid_middlemanager_config['druid.indexer.task.hadoopWorkingPath']
-  if hadoop_working_path is not None:
-    if hadoop_working_path.startswith(params.hdfs_tmp_dir):
-        params.HdfsResource(params.hdfs_tmp_dir,
-                            type="directory",
-                            action="create_on_execute",
-                            owner=params.hdfs_user,
-                            mode=0777,
-                            )
-    create_hadoop_directory(hadoop_working_path, mode=0775)
-
-  # If HDFS is used for storing logs, create Index Task log directory
-  indexer_logs_type = druid_common_config['druid.indexer.logs.type']
-  indexer_logs_directory = druid_common_config['druid.indexer.logs.directory']
-  if indexer_logs_type == 'hdfs' and indexer_logs_directory is not None:
-    create_hadoop_directory(indexer_logs_directory)
-
-
-def create_hadoop_directory(hadoop_dir, mode=0755):
-  import params
-  params.HdfsResource(hadoop_dir,
-                      type="directory",
-                      action="create_on_execute",
-                      owner=params.druid_user,
-                      group='hadoop',
-                      mode=mode
-                      )
-  Logger.info(format("Created Hadoop Directory [{hadoop_dir}], with mode [{mode}]"))
-
-
-def ensure_base_directories():
-  import params
-  Directory(
-    [params.druid_log_dir, params.druid_pid_dir],
-    mode=0755,
-    owner=params.druid_user,
-    group=params.user_group,
-    create_parents=True,
-    recursive_ownership=True,
-  )
-
-  Directory(
-    [params.druid_conf_dir, params.druid_common_conf_dir, params.druid_coordinator_conf_dir,
-     params.druid_broker_conf_dir, params.druid_middlemanager_conf_dir, params.druid_historical_conf_dir,
-     params.druid_overlord_conf_dir, params.druid_router_conf_dir, params.druid_segment_infoDir,
-     params.druid_tasks_dir],
-    mode=0700,
-    cd_access='a',
-    owner=params.druid_user,
-    group=params.user_group,
-    create_parents=True,
-    recursive_ownership=True,
-  )
-
-  segment_cache_locations = json.loads(params.druid_segment_cache_locations)
-  for segment_cache_location in segment_cache_locations:
-    Directory(
-      segment_cache_location["path"],
-      mode=0700,
-      owner=params.druid_user,
-      group=params.user_group,
-      create_parents=True,
-      recursive_ownership=True,
-      cd_access='a'
-    )
-
-
-
-def get_daemon_cmd(params=None, node_type=None, command=None):
-  return format('source {params.druid_conf_dir}/druid-env.sh ; {params.druid_home}/bin/node.sh {node_type} {command}')
-
-
-def getPid(params=None, nodeType=None):
-  return format('{params.druid_pid_dir}/{nodeType}.pid')
-
-
-def pulldeps():
-  import params
-  extensions_list = eval(params.druid_extensions)
-  extensions_string = '{0}'.format("-c ".join(extensions_list))
-  repository_list = eval(params.druid_repo_list)
-  repository_string = '{0}'.format("-r ".join(repository_list))
-  if len(extensions_list) > 0:
-    try:
-      # Make sure druid user has permissions to write dependencies
-      Directory(
-        [params.druid_extensions_dir, params.druid_hadoop_dependencies_dir],
-        mode=0755,
-        cd_access='a',
-        owner=params.druid_user,
-        group=params.user_group,
-        create_parents=True,
-        recursive_ownership=True,
-      )
-      pull_deps_command = format(
-        "source {params.druid_conf_dir}/druid-env.sh ; java -classpath '{params.druid_home}/lib/*' -Ddruid.extensions.loadList=[] "
-        "-Ddruid.extensions.directory={params.druid_extensions_dir} -Ddruid.extensions.hadoopDependenciesDir={params.druid_hadoop_dependencies_dir} "
-        "io.druid.cli.Main tools pull-deps -c {extensions_string} --no-default-hadoop")
-
-      if len(repository_list) > 0:
-        pull_deps_command = format("{pull_deps_command} -r {repository_string}")
-
-      Execute(pull_deps_command,
-              user=params.druid_user
-              )
-      Logger.info(format("Pull Dependencies Complete"))
-    except:
-      show_logs(params.druid_log_dir, params.druid_user)
-      raise
-
-
-def download_database_connector_if_needed():
-  """
-  Downloads the database connector to use when connecting to the metadata storage
-  """
-  import params
-  if params.metadata_storage_type != 'mysql' or not params.jdbc_driver_jar:
-    return
-
-  File(params.check_db_connection_jar,
-       content = DownloadSource(format("{jdk_location}{check_db_connection_jar_name}"))
-       )
-
-  target_jar_with_directory = params.connector_download_dir + os.path.sep + params.jdbc_driver_jar
-
-  if not os.path.exists(target_jar_with_directory):
-    File(params.downloaded_custom_connector,
-         content=DownloadSource(params.connector_curl_source))
-
-    Execute(('cp', '--remove-destination', params.downloaded_custom_connector, target_jar_with_directory),
-            path=["/bin", "/usr/bin/"],
-            sudo=True)
-
-    File(target_jar_with_directory, owner=params.druid_user,
-         group=params.user_group)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/druid_node.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/druid_node.py b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/druid_node.py
deleted file mode 100644
index 8053dcb..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/druid_node.py
+++ /dev/null
@@ -1,114 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os
-from resource_management.core import sudo
-from resource_management import Script
-from resource_management.core.logger import Logger
-from resource_management.core.resources.system import Execute
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.check_process_status import check_process_status
-from resource_management.libraries.functions.show_logs import show_logs
-from druid import druid, get_daemon_cmd, getPid
-
-
-class DruidBase(Script):
-  def __init__(self, nodeType=None):
-    self.nodeType = nodeType
-
-  def install(self, env):
-    self.install_packages(env)
-
-  def configure(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    druid(upgrade_type=upgrade_type, nodeType=self.nodeType)
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    node_type_lower = self.nodeType.lower()
-    Logger.info(format("Executing druid-{node_type_lower} Upgrade pre-restart"))
-    import params
-
-    env.set_params(params)
-
-    if params.stack_version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.stack_version):
-      stack_select.select_packages(params.stack_version)
-
-  def start(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    self.configure(env, upgrade_type=upgrade_type)
-    daemon_cmd = get_daemon_cmd(params, self.nodeType, "start")
-    # Verify Database connection on Druid start
-    if params.metadata_storage_type == 'mysql':
-      if not params.jdbc_driver_jar or not os.path.isfile(params.connector_download_dir + os.path.sep + params.jdbc_driver_jar):
-        path_to_jdbc =  params.connector_download_dir + os.path.sep + "*"
-        error_message = "Error! Sorry, but we can't find jdbc driver for mysql.So, db connection check can fail." + \
-                        "Please run 'ambari-server setup --jdbc-db=mysql --jdbc-driver={path_to_jdbc} on server host.'"
-        Logger.error(error_message)
-      else:
-        path_to_jdbc = params.connector_download_dir + os.path.sep + params.jdbc_driver_jar
-      db_connection_check_command = format("{params.java8_home}/bin/java -cp {params.check_db_connection_jar}:{path_to_jdbc} org.apache.ambari.server.DBConnectionVerification '{params.metadata_storage_url}' {params.metadata_storage_user} {params.metadata_storage_password!p} com.mysql.jdbc.Driver")
-    else:
-      db_connection_check_command = None
-
-    if db_connection_check_command:
-      sudo.chmod(params.check_db_connection_jar, 0755)
-      Execute( db_connection_check_command,
-               tries=5,
-               try_sleep=10,
-               user=params.druid_user
-               )
-
-    try:
-      Execute(daemon_cmd,
-              user=params.druid_user
-              )
-    except:
-      show_logs(params.druid_log_dir, params.druid_user)
-      raise
-
-  def stop(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-
-    daemon_cmd = get_daemon_cmd(params, self.nodeType, "stop")
-    try:
-      Execute(daemon_cmd,
-              user=params.druid_user
-              )
-    except:
-      show_logs(params.druid_log_dir, params.druid_user)
-      raise
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    pid_file = getPid(status_params, self.nodeType)
-    check_process_status(pid_file)
-
-  def get_log_folder(self):
-    import params
-    return params.druid_log_dir
-
-  def get_user(self):
-    import params
-    return params.druid_user

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/historical.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/historical.py b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/historical.py
deleted file mode 100644
index 22390a6..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/historical.py
+++ /dev/null
@@ -1,28 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from druid_node import DruidBase
-
-
-class DruidHistorical(DruidBase):
-  def __init__(self):
-    DruidBase.__init__(self, nodeType="historical")
-
-
-if __name__ == "__main__":
-  DruidHistorical().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/middlemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/middlemanager.py b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/middlemanager.py
deleted file mode 100644
index 20df89c..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/middlemanager.py
+++ /dev/null
@@ -1,28 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from druid_node import DruidBase
-
-
-class DruidMiddleManager(DruidBase):
-  def __init__(self):
-    DruidBase.__init__(self, nodeType="middleManager")
-
-
-if __name__ == "__main__":
-  DruidMiddleManager().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/overlord.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/overlord.py b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/overlord.py
deleted file mode 100644
index e4d7fcc..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/overlord.py
+++ /dev/null
@@ -1,28 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from druid_node import DruidBase
-
-
-class DruidOverlord(DruidBase):
-  def __init__(self):
-    DruidBase.__init__(self, nodeType="overlord")
-
-
-if __name__ == "__main__":
-  DruidOverlord().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/params.py b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/params.py
deleted file mode 100644
index fd1cde6..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/params.py
+++ /dev/null
@@ -1,200 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from ambari_commons import OSCheck
-from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.resources.hdfs_resource import HdfsResource
-from resource_management.libraries.functions import get_kinit_path
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
-from resource_management.libraries.functions.default import default
-from ambari_commons.constants import AMBARI_SUDO_BINARY
-
-import status_params
-
-# a map of the Ambari role to the component name
-# for use with <stack-root>/current/<component>
-SERVER_ROLE_DIRECTORY_MAP = {
-  'DRUID_BROKER': 'druid-broker',
-  'DRUID_COORDINATOR': 'druid-coordinator',
-  'DRUID_HISTORICAL': 'druid-historical',
-  'DRUID_MIDDLEMANAGER': 'druid-middlemanager',
-  'DRUID_OVERLORD': 'druid-overlord',
-  'DRUID_ROUTER': 'druid-router'
-}
-
-# server configurations
-config = Script.get_config()
-stack_root = Script.get_stack_root()
-tmp_dir = Script.get_tmp_dir()
-
-stack_name = default("/hostLevelParams/stack_name", None)
-
-# stack version
-stack_version = default("/commandParams/version", None)
-
-# un-formatted stack version
-stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-
-# default role to coordinator needed for service checks
-component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "DRUID_COORDINATOR")
-
-hostname = config['hostname']
-sudo = AMBARI_SUDO_BINARY
-
-# default druid parameters
-druid_home = format("{stack_root}/current/{component_directory}")
-druid_conf_dir = format("{stack_root}/current/{component_directory}/conf")
-
-druid_common_conf_dir = druid_conf_dir + "/_common"
-druid_coordinator_conf_dir = druid_conf_dir + "/coordinator"
-druid_overlord_conf_dir = druid_conf_dir + "/overlord"
-druid_broker_conf_dir = druid_conf_dir + "/broker"
-druid_historical_conf_dir = druid_conf_dir + "/historical"
-druid_middlemanager_conf_dir = druid_conf_dir + "/middleManager"
-druid_router_conf_dir = druid_conf_dir + "/router"
-druid_extensions_dir = druid_home + "/extensions"
-druid_hadoop_dependencies_dir = druid_home + "/hadoop-dependencies"
-druid_segment_infoDir = config['configurations']['druid-historical']['druid.segmentCache.infoDir']
-druid_segment_cache_locations = config['configurations']['druid-historical']['druid.segmentCache.locations']
-druid_tasks_dir = config['configurations']['druid-middlemanager']['druid.indexer.task.baseTaskDir']
-druid_user = config['configurations']['druid-env']['druid_user']
-druid_log_dir = config['configurations']['druid-env']['druid_log_dir']
-druid_classpath = config['configurations']['druid-env']['druid_classpath']
-druid_extensions = config['configurations']['druid-common']['druid.extensions.pullList']
-druid_repo_list = config['configurations']['druid-common']['druid.extensions.repositoryList']
-druid_extensions_load_list = config['configurations']['druid-common']['druid.extensions.loadList']
-druid_security_extensions_load_list = config['configurations']['druid-common']['druid.security.extensions.loadList']
-
-
-# status params
-druid_pid_dir = status_params.druid_pid_dir
-user_group = config['configurations']['cluster-env']['user_group']
-java8_home = config['hostLevelParams']['java_home']
-druid_env_sh_template = config['configurations']['druid-env']['content']
-
-# log4j params
-log4j_props = config['configurations']['druid-log4j']['content']
-druid_log_level = config['configurations']['druid-log4j']['druid_log_level']
-metamx_log_level = config['configurations']['druid-log4j']['metamx_log_level']
-root_log_level = config['configurations']['druid-log4j']['root_log_level']
-
-druid_log_maxbackupindex = default('/configurations/druid-logrotate/druid_log_maxbackupindex', 7)
-druid_log_maxfilesize = default('/configurations/druid-logrotate/druid_log_maxfilesize', 256)
-logrotate_props = config['configurations']['druid-logrotate']['content']
-
-# Metadata storage
-metadata_storage_user = config['configurations']['druid-common']['druid.metadata.storage.connector.user']
-metadata_storage_password = config['configurations']['druid-common']['druid.metadata.storage.connector.password']
-metadata_storage_db_name = config['configurations']['druid-common']['database_name']
-metadata_storage_db_name = config['configurations']['druid-common']['database_name']
-metadata_storage_type = config['configurations']['druid-common']['druid.metadata.storage.type']
-metadata_storage_url = config['configurations']['druid-common']['druid.metadata.storage.connector.connectURI']
-jdk_location = config['hostLevelParams']['jdk_location']
-if 'mysql' == metadata_storage_type:
-  jdbc_driver_jar = default("/hostLevelParams/custom_mysql_jdbc_name", None)
-  connector_curl_source = format("{jdk_location}/{jdbc_driver_jar}")
-  connector_download_dir=format("{druid_extensions_dir}/mysql-metadata-storage")
-  downloaded_custom_connector = format("{tmp_dir}/{jdbc_driver_jar}")
-
-check_db_connection_jar_name = "DBConnectionVerification.jar"
-check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
-
-# HDFS
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
-hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', 'missing_principal').replace("_HOST",
-                                                                                                             hostname)
-hdfs_site = config['configurations']['hdfs-site']
-default_fs = config['configurations']['core-site']['fs.defaultFS']
-dfs_type = default("/commandParams/dfs_type", "")
-hdfs_tmp_dir = config['configurations']['hadoop-env']['hdfs_tmp_dir']
-
-# Kerberos
-druid_principal_name = default('/configurations/druid-common/druid.hadoop.security.kerberos.principal',
-                               'missing_principal')
-druid_user_keytab = default('/configurations/druid-common/druid.hadoop.security.kerberos.keytab', 'missing_keytab')
-
-import functools
-
-# create partial functions with common arguments for every HdfsResource call
-# to create hdfs directory we need to call params.HdfsResource in code
-HdfsResource = functools.partial(
-  HdfsResource,
-  user=hdfs_user,
-  hdfs_resource_ignore_file="/var/lib/ambari-agent/data/.hdfs_resource_ignore",
-  security_enabled=security_enabled,
-  keytab=hdfs_user_keytab,
-  kinit_path_local=kinit_path_local,
-  hadoop_bin_dir=hadoop_bin_dir,
-  hadoop_conf_dir=hadoop_conf_dir,
-  principal_name=hdfs_principal_name,
-  hdfs_site=hdfs_site,
-  default_fs=default_fs,
-  immutable_paths=get_not_managed_resources(),
-  dfs_type=dfs_type
-)
-
-# Ambari Metrics
-metric_emitter_type = "noop"
-metric_collector_host = ""
-metric_collector_port = ""
-metric_collector_protocol = ""
-metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
-metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
-metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
-
-ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
-has_metric_collector = not len(ams_collector_hosts) == 0
-
-if has_metric_collector:
-    metric_emitter_type = "ambari-metrics"
-    if 'cluster-env' in config['configurations'] and \
-                    'metrics_collector_vip_host' in config['configurations']['cluster-env']:
-        metric_collector_host = config['configurations']['cluster-env']['metrics_collector_vip_host']
-    else:
-        metric_collector_host = ams_collector_hosts[0]
-    if 'cluster-env' in config['configurations'] and \
-                    'metrics_collector_vip_port' in config['configurations']['cluster-env']:
-        metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
-    else:
-        metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "localhost:6188")
-        if metric_collector_web_address.find(':') != -1:
-            metric_collector_port = metric_collector_web_address.split(':')[1]
-        else:
-            metric_collector_port = '6188'
-    if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
-        metric_collector_protocol = 'https'
-    else:
-        metric_collector_protocol = 'http'
-    pass
-
-# Create current Hadoop Clients  Libs
-stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
-lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
-lzo_packages = get_lzo_packages(stack_version_unformatted)
-hadoop_lib_home = stack_root + '/' + stack_version + '/hadoop/lib'

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/router.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/router.py b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/router.py
deleted file mode 100644
index 1731a2a..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/router.py
+++ /dev/null
@@ -1,28 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from druid_node import DruidBase
-
-
-class DruidRouter(DruidBase):
-  def __init__(self):
-    DruidBase.__init__(self, nodeType="router")
-
-
-if __name__ == "__main__":
-  DruidRouter().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/service_check.py
deleted file mode 100644
index 139b727..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/service_check.py
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions.format import format
-from resource_management.core.resources.system import Execute
-
-
-class ServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    self.checkComponent(params, "druid_coordinator", "druid-coordinator")
-    self.checkComponent(params, "druid_overlord", "druid-overlord")
-
-  def checkComponent(self, params, component_name, config_name):
-    component_port = params.config['configurations'][format('{config_name}')]['druid.port']
-    for component_host in params.config['clusterHostInfo'][format('{component_name}_hosts')]:
-      Execute(format(
-        "curl -s -o /dev/null -w'%{{http_code}}' --negotiate -u: -k {component_host}:{component_port}/status | grep 200"),
-        tries=10,
-        try_sleep=3,
-        logoutput=True)
-
-
-if __name__ == "__main__":
-  ServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/status_params.py
deleted file mode 100644
index ee1d61c..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/status_params.py
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from resource_management.libraries.script.script import Script
-
-config = Script.get_config()
-
-druid_pid_dir = config['configurations']['druid-env']['druid_pid_dir']

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/quicklinks/quicklinks.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/quicklinks/quicklinks.json b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/quicklinks/quicklinks.json
deleted file mode 100644
index c68b9b9..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/quicklinks/quicklinks.json
+++ /dev/null
@@ -1,37 +0,0 @@
-{
-  "name": "default",
-  "description": "default quick links configuration",
-  "configuration": {
-    "protocol": {
-      "type": "HTTP_ONLY"
-    },
-    "links": [
-      {
-        "name": "coordinator_console",
-        "label": "Druid Coordinator Console",
-        "component_name": "DRUID_COORDINATOR",
-        "requires_user_name": "false",
-        "url": "%@://%@:%@",
-        "port": {
-          "http_property": "druid.port",
-          "http_default_port": "8081",
-          "regex": "^(\\d+)$",
-          "site": "druid-coordinator"
-        }
-      },
-      {
-        "name": "overlord_console",
-        "label": "Druid Overlord Console",
-        "component_name": "DRUID_OVERLORD",
-        "requires_user_name": "false",
-        "url": "%@://%@:%@",
-        "port": {
-          "http_property": "druid.port",
-          "http_default_port": "8090",
-          "regex": "^(\\d+)$",
-          "site": "druid-overlord"
-        }
-      }
-    ]
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/role_command_order.json b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/role_command_order.json
deleted file mode 100644
index 4d697fe..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/role_command_order.json
+++ /dev/null
@@ -1,17 +0,0 @@
-{
-  "general_deps" : {
-    "_comment" : "dependencies for Druid",
-    "DRUID_HISTORICAL-START" : ["ZOOKEEPER_SERVER-START", "NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START"],
-    "DRUID_OVERLORD-START" : ["ZOOKEEPER_SERVER-START", "NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START"],
-    "DRUID_MIDDLEMANAGER-START" : ["ZOOKEEPER_SERVER-START", "NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START"],
-    "DRUID_BROKER-START" : ["ZOOKEEPER_SERVER-START", "NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START"],
-    "DRUID_ROUTER-START" : ["ZOOKEEPER_SERVER-START", "NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START"],
-    "DRUID_COORDINATOR-START" : ["ZOOKEEPER_SERVER-START", "NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START"],
-    "DRUID_OVERLORD-RESTART" : ["DRUID_HISTORICAL-RESTART"],
-    "DRUID_MIDDLEMANAGER-RESTART" : ["DRUID_OVERLORD-RESTART"],
-    "DRUID_BROKER-RESTART" : ["DRUID_MIDDLEMANAGER-RESTART"],
-    "DRUID_ROUTER-RESTART" : ["DRUID_BROKER-RESTART"],
-    "DRUID_COORDINATOR-RESTART" : ["DRUID_ROUTER-RESTART"],
-    "DRUID_SERVICE_CHECK-SERVICE_CHECK" : ["DRUID_HISTORICAL-START", "DRUID_COORDINATOR-START", "DRUID_OVERLORD-START", "DRUID_MIDDLEMANAGER-START", "DRUID_BROKER-START", "DRUID_ROUTER-START"]
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/themes/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/themes/theme.json b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/themes/theme.json
deleted file mode 100644
index 7033e19..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/themes/theme.json
+++ /dev/null
@@ -1,120 +0,0 @@
-{
-  "name": "default",
-  "description": "Default theme for Druid service",
-  "configuration": {
-    "layouts": [
-      {
-        "name": "default",
-        "tabs": [
-          {
-            "name": "metadata_storage",
-            "display-name": "META DATA STORAGE CONFIG",
-            "layout": {
-              "tab-columns": "",
-              "tab-rows": "1",
-              "sections": [
-                {
-                  "name": "section-metadata-storage",
-                  "display-name": "",
-                  "row-index": "0",
-                  "column-index": "0",
-                  "row-span": "2",
-                  "column-span": "1",
-                  "section-columns": "1",
-                  "section-rows": "1",
-                  "subsections": [
-                    {
-                      "name": "subsection-metadata-storage-row1-col1",
-                      "display-name": "DRUID META DATA STORAGE",
-                      "row-index": "0",
-                      "column-index": "0",
-                      "row-span": "1",
-                      "column-span": "1"
-                    }
-                  ]
-                }
-              ]
-            }
-          }
-        ]
-      }
-    ],
-    "placement": {
-      "configuration-layout": "default",
-      "configs": [
-        {
-          "config": "druid-common/database_name",
-          "subsection-name": "subsection-metadata-storage-row1-col1"
-        },
-        {
-          "config": "druid-common/druid.metadata.storage.type",
-          "subsection-name": "subsection-metadata-storage-row1-col1"
-        },
-        {
-          "config": "druid-common/druid.metadata.storage.connector.user",
-          "subsection-name": "subsection-metadata-storage-row1-col1"
-        },
-        {
-          "config": "druid-common/druid.metadata.storage.connector.password",
-          "subsection-name": "subsection-metadata-storage-row1-col1"
-        },
-        {
-          "config": "druid-common/metastore_hostname",
-          "subsection-name": "subsection-metadata-storage-row1-col1"
-        },
-        {
-          "config": "druid-common/druid.metadata.storage.connector.port",
-          "subsection-name": "subsection-metadata-storage-row1-col1"
-        },
-        {
-          "config": "druid-common/druid.metadata.storage.connector.connectURI",
-          "subsection-name": "subsection-metadata-storage-row1-col1"
-        }
-      ]
-    },
-    "widgets": [
-      {
-        "config": "druid-common/database_name",
-        "widget": {
-          "type": "text-field"
-        }
-      },
-      {
-        "config": "druid-common/druid.metadata.storage.type",
-        "widget": {
-          "type": "combo"
-        }
-      },
-      {
-        "config": "druid-common/druid.metadata.storage.connector.user",
-        "widget": {
-          "type": "text-field"
-        }
-      },
-      {
-        "config": "druid-common/druid.metadata.storage.connector.password",
-        "widget": {
-          "type": "password"
-        }
-      },
-      {
-        "config": "druid-common/metastore_hostname",
-        "widget": {
-          "type": "text-field"
-        }
-      },
-      {
-        "config": "druid-common/druid.metadata.storage.connector.port",
-        "widget": {
-          "type": "text-field"
-        }
-      },
-      {
-        "config": "druid-common/druid.metadata.storage.connector.connectURI",
-        "widget": {
-          "type": "text-field"
-        }
-      }
-    ]
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/stacks/HDP/2.6/services/DRUID/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/DRUID/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/DRUID/metainfo.xml
index f1f099e..07dd6d6 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/DRUID/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/DRUID/metainfo.xml
@@ -20,9 +20,8 @@
   <services>
     <service>
       <name>DRUID</name>
-      <version>0.9.2</version>
-      <extends>common-services/DRUID/0.9.2</extends>
-      <selection>TECH_PREVIEW</selection>
+      <version>0.10.1</version>
+      <extends>common-services/DRUID/0.10.1</extends>
     </service>
   </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/test/python/stacks/2.6/DRUID/test_druid.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.6/DRUID/test_druid.py b/ambari-server/src/test/python/stacks/2.6/DRUID/test_druid.py
index d8b9fe2..80e9d54 100644
--- a/ambari-server/src/test/python/stacks/2.6/DRUID/test_druid.py
+++ b/ambari-server/src/test/python/stacks/2.6/DRUID/test_druid.py
@@ -31,7 +31,7 @@ from resource_management.core.logger import Logger
 @patch("resource_management.libraries.Script.get_tmp_dir", new=MagicMock(return_value=('/var/lib/ambari-agent/tmp')))
 @patch.object(functions, "get_stack_version", new=MagicMock(return_value="2.0.0.0-1234"))
 class TestDruid(RMFTestCase):
-  COMMON_SERVICES_PACKAGE_DIR = "DRUID/0.9.2/package"
+  COMMON_SERVICES_PACKAGE_DIR = "DRUID/0.10.1/package"
   STACK_VERSION = "2.6"
   DEFAULT_IMMUTABLE_PATHS = ['/apps/hive/warehouse', '/apps/falcon', '/mr-history/done', '/app-logs', '/tmp']
 


[38/50] [abbrv] ambari git commit: AMBARI-22131 Move resources/stacks/HDP/3.0/widgets.json to resources/widgets.json (dsen)

Posted by jl...@apache.org.
AMBARI-22131 Move resources/stacks/HDP/3.0/widgets.json to resources/widgets.json (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7172655f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7172655f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7172655f

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 7172655ff269fbb7e0d29ba93197aa5a804749c0
Parents: 32bf39e
Author: Dmytro Sen <ds...@apache.org>
Authored: Fri Oct 6 16:33:57 2017 +0300
Committer: Dmytro Sen <ds...@apache.org>
Committed: Fri Oct 6 16:33:57 2017 +0300

----------------------------------------------------------------------
 .../server/api/services/AmbariMetaInfo.java     |  8 ++
 .../AmbariManagementControllerImpl.java         | 22 ++---
 .../internal/ServiceResourceProvider.java       |  8 --
 .../internal/StackArtifactResourceProvider.java | 18 +---
 .../server/orm/entities/WidgetLayoutEntity.java |  6 +-
 .../ambari/server/stack/StackDirectory.java     | 18 ----
 .../apache/ambari/server/stack/StackModule.java |  5 --
 .../apache/ambari/server/state/StackInfo.java   |  8 --
 .../resources/stacks/HDP/2.0.6/widgets.json     | 95 --------------------
 .../main/resources/stacks/HDP/3.0/widgets.json  | 95 --------------------
 .../server/api/services/AmbariMetaInfoTest.java |  8 ++
 .../AmbariManagementControllerImplTest.java     |  6 +-
 .../AmbariManagementControllerTest.java         |  6 ++
 .../resources/stacks/OTHER/1.0/widgets.json     | 95 --------------------
 14 files changed, 34 insertions(+), 364 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/7172655f/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
index de84965..425d247 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
@@ -21,6 +21,7 @@ package org.apache.ambari.server.api.services;
 import static org.apache.ambari.server.controller.spi.Resource.InternalType.Component;
 import static org.apache.ambari.server.controller.spi.Resource.InternalType.HostComponent;
 import static org.apache.ambari.server.controller.utilities.PropertyHelper.AGGREGATE_FUNCTION_IDENTIFIERS;
+import static org.apache.ambari.server.stack.StackDirectory.WIDGETS_DESCRIPTOR_FILE_NAME;
 
 import java.io.File;
 import java.io.FileReader;
@@ -125,6 +126,7 @@ public class AmbariMetaInfo {
   private File commonServicesRoot;
   private File extensionsRoot;
   private File serverVersionFile;
+  private File commonWidgetsDescriptorFile;
   private File customActionRoot;
   private Map<String, VersionDefinitionXml> versionDefinitions = null;
 
@@ -214,6 +216,8 @@ public class AmbariMetaInfo {
     serverVersionFile = new File(serverVersionFilePath);
 
     customActionRoot = new File(conf.getCustomActionDefinitionPath());
+
+    commonWidgetsDescriptorFile = new File(conf.getResourceDirPath(), WIDGETS_DESCRIPTOR_FILE_NAME);
   }
 
   /**
@@ -1435,4 +1439,8 @@ public class AmbariMetaInfo {
 
     return null;
   }
+
+  public File getCommonWidgetsDescriptorFile() {
+    return commonWidgetsDescriptorFile;
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/7172655f/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index b2993e3..5642575 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -5184,22 +5184,12 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
         widgetDescriptorFiles.add(widgetDescriptorFile);
       }
     } else {
-      Set<StackId> stackIds = new HashSet<>();
-
-      for (Service svc : cluster.getServices().values()) {
-        stackIds.add(svc.getDesiredStackId());
-      }
-
-      for (StackId stackId : stackIds) {
-        StackInfo stackInfo = ambariMetaInfo.getStack(stackId);
-
-        String widgetDescriptorFileLocation = stackInfo.getWidgetsDescriptorFileLocation();
-        if (widgetDescriptorFileLocation != null) {
-          File widgetDescriptorFile = new File(widgetDescriptorFileLocation);
-          if (widgetDescriptorFile.exists()) {
-            widgetDescriptorFiles.add(widgetDescriptorFile);
-          }
-        }
+      // common cluster level widgets
+      File commonWidgetsFile = ambariMetaInfo.getCommonWidgetsDescriptorFile();
+      if (commonWidgetsFile != null && commonWidgetsFile.exists()) {
+        widgetDescriptorFiles.add(commonWidgetsFile);
+      } else {
+        LOG.warn("Common widgets file with path {%s} doesn't exist. No cluster widgets will be created.", commonWidgetsFile);
       }
     }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7172655f/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
index 76a4547..e65693b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
@@ -423,8 +423,6 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
     // do all validation checks
     validateCreateRequests(requests, clusters);
 
-    Set<Cluster> clustersSetFromRequests = new HashSet<>();
-
     for (ServiceRequest request : requests) {
       Cluster cluster = clusters.getCluster(request.getClusterName());
 
@@ -480,12 +478,6 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
 
       // Initialize service widgets
       getManagementController().initializeWidgetsAndLayouts(cluster, s);
-      clustersSetFromRequests.add(cluster);
-    }
-
-    // Create cluster widgets and layouts
-    for (Cluster cluster : clustersSetFromRequests) {
-      getManagementController().initializeWidgetsAndLayouts(cluster, null);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7172655f/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java
index 2e8a32a..a7f7710 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java
@@ -426,7 +426,7 @@ public class StackArtifactResourceProvider extends AbstractControllerResourcePro
     }
 
     if (StringUtils.isEmpty(serviceName)) {
-      return getWidgetsDescriptorForCluster(stackInfo);
+      return null;
     } else {
       return getWidgetsDescriptorForService(stackInfo, serviceName);
     }
@@ -450,22 +450,6 @@ public class StackArtifactResourceProvider extends AbstractControllerResourcePro
     return widgetDescriptor;
   }
 
-  public Map<String, Object> getWidgetsDescriptorForCluster(StackInfo stackInfo)
-      throws NoSuchParentResourceException, IOException {
-
-    Map<String, Object> widgetDescriptor = null;
-
-    String widgetDescriptorFileLocation = stackInfo.getWidgetsDescriptorFileLocation();
-    if (widgetDescriptorFileLocation != null) {
-      File widgetDescriptorFile = new File(widgetDescriptorFileLocation);
-      if (widgetDescriptorFile.exists()) {
-        widgetDescriptor = gson.fromJson(new FileReader(widgetDescriptorFile), widgetLayoutType);
-      }
-    }
-
-    return widgetDescriptor;
-  }
-
   /**
    * Get a kerberos descriptor.
    *

http://git-wip-us.apache.org/repos/asf/ambari/blob/7172655f/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/WidgetLayoutEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/WidgetLayoutEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/WidgetLayoutEntity.java
index 90d98fc..1fa45e9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/WidgetLayoutEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/WidgetLayoutEntity.java
@@ -33,6 +33,7 @@ import javax.persistence.OneToMany;
 import javax.persistence.OrderBy;
 import javax.persistence.Table;
 import javax.persistence.TableGenerator;
+import javax.persistence.UniqueConstraint;
 
 @Entity
 @Table(name = "widget_layout")
@@ -41,7 +42,8 @@ import javax.persistence.TableGenerator;
         pkColumnName = "sequence_name",
         valueColumnName = "sequence_value",
         pkColumnValue = "widget_layout_id_seq",
-        initialValue = 0
+        initialValue = 0,
+        uniqueConstraints=@UniqueConstraint(columnNames={"layout_name", "cluster_id"})
 )
 @NamedQueries({
     @NamedQuery(name = "WidgetLayoutEntity.findAll", query = "SELECT widgetLayout FROM WidgetLayoutEntity widgetLayout"),
@@ -56,7 +58,7 @@ public class WidgetLayoutEntity {
   @Column(name = "id", nullable = false, updatable = false)
   private Long id;
 
-  @Column(name = "layout_name", nullable = false, unique = true, length = 255)
+  @Column(name = "layout_name", nullable = false, length = 255)
   private String layoutName;
 
   @Column(name = "section_name", nullable = false, length = 255)

http://git-wip-us.apache.org/repos/asf/ambari/blob/7172655f/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
index 9259466..e3c586b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
@@ -94,11 +94,6 @@ public class StackDirectory extends StackDefinitionDirectory {
   private String kerberosDescriptorPreconfigureFilePath;
 
   /**
-   * widgets descriptor file path
-   */
-  private String widgetsDescriptorFilePath;
-
-  /**
    * repository file
    */
   private RepositoryXml repoFile;
@@ -233,15 +228,6 @@ public class StackDirectory extends StackDefinitionDirectory {
   }
 
   /**
-   * Obtain the path to the (stack-level) widgets descriptor file
-   *
-   * @return the path to the (stack-level) widgets descriptor file
-   */
-  public String getWidgetsDescriptorFilePath() {
-    return widgetsDescriptorFilePath;
-  }
-
-  /**
    * Obtain the repository directory path.
    *
    * @return repository directory path
@@ -324,10 +310,6 @@ public class StackDirectory extends StackDefinitionDirectory {
       kerberosDescriptorPreconfigureFilePath = getAbsolutePath() + File.separator + KERBEROS_DESCRIPTOR_PRECONFIGURE_FILE_NAME;
     }
 
-    if (subDirs.contains(WIDGETS_DESCRIPTOR_FILE_NAME)) {
-      widgetsDescriptorFilePath = getAbsolutePath() + File.separator + WIDGETS_DESCRIPTOR_FILE_NAME;
-    }
-
     parseUpgradePacks(subDirs);
     parseServiceDirectories(subDirs);
     parseRepoFile(subDirs);

http://git-wip-us.apache.org/repos/asf/ambari/blob/7172655f/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
index 742706d..71235f3 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
@@ -294,10 +294,6 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
       stackInfo.setKerberosDescriptorPreConfigurationFileLocation(parentStack.getModuleInfo().getKerberosDescriptorPreConfigurationFileLocation());
     }
 
-    if (stackInfo.getWidgetsDescriptorFileLocation() == null) {
-      stackInfo.setWidgetsDescriptorFileLocation(parentStack.getModuleInfo().getWidgetsDescriptorFileLocation());
-    }
-
     mergeServicesWithParent(parentStack, allStacks, commonServices, extensions);
   }
 
@@ -573,7 +569,6 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
       stackInfo.setRcoFileLocation(stackDirectory.getRcoFilePath());
       stackInfo.setKerberosDescriptorFileLocation(stackDirectory.getKerberosDescriptorFilePath());
       stackInfo.setKerberosDescriptorPreConfigurationFileLocation(stackDirectory.getKerberosDescriptorPreconfigureFilePath());
-      stackInfo.setWidgetsDescriptorFileLocation(stackDirectory.getWidgetsDescriptorFilePath());
       stackInfo.setUpgradesFolder(stackDirectory.getUpgradesDir());
       stackInfo.setUpgradePacks(stackDirectory.getUpgradePacks());
       stackInfo.setConfigUpgradePack(stackDirectory.getConfigUpgradePack());

http://git-wip-us.apache.org/repos/asf/ambari/blob/7172655f/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
index dcf850f..3efc997 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
@@ -429,14 +429,6 @@ public class StackInfo implements Comparable<StackInfo>, Validable {
     this.kerberosDescriptorPreConfigurationFileLocation = kerberosDescriptorPreConfigurationFileLocation;
   }
 
-  public String getWidgetsDescriptorFileLocation() {
-    return widgetsDescriptorFileLocation;
-  }
-
-  public void setWidgetsDescriptorFileLocation(String widgetsDescriptorFileLocation) {
-    this.widgetsDescriptorFileLocation = widgetsDescriptorFileLocation;
-  }
-
   /**
    * Set the path of the stack upgrade directory.
    *

http://git-wip-us.apache.org/repos/asf/ambari/blob/7172655f/ambari-server/src/main/resources/stacks/HDP/2.0.6/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/widgets.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/widgets.json
deleted file mode 100644
index 3176354..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/widgets.json
+++ /dev/null
@@ -1,95 +0,0 @@
-{
-  "layouts": [
-    {
-      "layout_name": "default_system_heatmap",
-      "display_name": "Heatmaps",
-      "section_name": "SYSTEM_HEATMAPS",
-      "widgetLayoutInfo": [
-        {
-          "widget_name": "Host Disk Space Used %",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "disk_free",
-              "metric_path": "metrics/disk/disk_free",
-              "service_name": "STACK"
-            },
-            {
-              "name": "disk_total",
-              "metric_path": "metrics/disk/disk_total",
-              "service_name": "STACK"
-            }
-          ],
-          "values": [
-            {
-              "name": "Host Disk Space Used %",
-              "value": "${((disk_total-disk_free)/disk_total)*100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "Host Memory Used %",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "mem_total",
-              "metric_path": "metrics/memory/mem_total",
-              "service_name": "STACK"
-            },
-            {
-              "name": "mem_free",
-              "metric_path": "metrics/memory/mem_free",
-              "service_name": "STACK"
-            },
-            {
-              "name": "mem_cached",
-              "metric_path": "metrics/memory/mem_cached",
-              "service_name": "STACK"
-            }
-          ],
-          "values": [
-            {
-              "name": "Host Memory Used %",
-              "value": "${((mem_total-mem_free-mem_cached)/mem_total)*100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "Host CPU Wait IO %",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "cpu_wio",
-              "metric_path": "metrics/cpu/cpu_wio",
-              "service_name": "STACK"
-            }
-          ],
-          "values": [
-            {
-              "name": "Host Memory Used %",
-              "value": "${cpu_wio*100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        }
-      ]
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/7172655f/ambari-server/src/main/resources/stacks/HDP/3.0/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/widgets.json b/ambari-server/src/main/resources/stacks/HDP/3.0/widgets.json
deleted file mode 100644
index 3176354..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/widgets.json
+++ /dev/null
@@ -1,95 +0,0 @@
-{
-  "layouts": [
-    {
-      "layout_name": "default_system_heatmap",
-      "display_name": "Heatmaps",
-      "section_name": "SYSTEM_HEATMAPS",
-      "widgetLayoutInfo": [
-        {
-          "widget_name": "Host Disk Space Used %",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "disk_free",
-              "metric_path": "metrics/disk/disk_free",
-              "service_name": "STACK"
-            },
-            {
-              "name": "disk_total",
-              "metric_path": "metrics/disk/disk_total",
-              "service_name": "STACK"
-            }
-          ],
-          "values": [
-            {
-              "name": "Host Disk Space Used %",
-              "value": "${((disk_total-disk_free)/disk_total)*100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "Host Memory Used %",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "mem_total",
-              "metric_path": "metrics/memory/mem_total",
-              "service_name": "STACK"
-            },
-            {
-              "name": "mem_free",
-              "metric_path": "metrics/memory/mem_free",
-              "service_name": "STACK"
-            },
-            {
-              "name": "mem_cached",
-              "metric_path": "metrics/memory/mem_cached",
-              "service_name": "STACK"
-            }
-          ],
-          "values": [
-            {
-              "name": "Host Memory Used %",
-              "value": "${((mem_total-mem_free-mem_cached)/mem_total)*100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "Host CPU Wait IO %",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "cpu_wio",
-              "metric_path": "metrics/cpu/cpu_wio",
-              "service_name": "STACK"
-            }
-          ],
-          "values": [
-            {
-              "name": "Host Memory Used %",
-              "value": "${cpu_wio*100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        }
-      ]
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/7172655f/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
index 4baca5c..25e8d04 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
@@ -1894,6 +1894,14 @@ public class AmbariMetaInfoTest {
     Assert.assertTrue(descriptor.getService("NEW_SERVICE").shouldPreconfigure());
   }
 
+  @Test
+  public void testGetCommonWidgetsFile() throws AmbariException {
+    File widgetsFile = metaInfo.getCommonWidgetsDescriptorFile();
+
+    Assert.assertNotNull(widgetsFile);
+    Assert.assertEquals("/var/lib/ambari-server/resources/widgets.json", widgetsFile.getPath());
+  }
+
   private File getStackRootTmp(String buildDir) {
     return new File(buildDir + "/ambari-metaInfo");
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/7172655f/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
index a02690f..9547271 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
@@ -2367,18 +2367,14 @@ public class AmbariManagementControllerImplTest {
     Cluster cluster = createNiceMock(Cluster.class);
     Service service = createNiceMock(Service.class);
     expect(service.getDesiredStackId()).andReturn(stackId).atLeastOnce();
-    expect(cluster.getServices()).andReturn(ImmutableMap.<String, Service>builder()
-        .put("HDFS", service)
-        .build());
 
     expect(clusters.getCluster("c1")).andReturn(cluster).atLeastOnce();
 
 
     StackInfo stackInfo = createNiceMock(StackInfo.class);
-    expect(stackInfo.getWidgetsDescriptorFileLocation()).andReturn(null).once();
 
     expect(ambariMetaInfo.getStack("HDP", "2.1")).andReturn(stackInfo).atLeastOnce();
-    expect(ambariMetaInfo.getStack(stackId)).andReturn(stackInfo).atLeastOnce();
+    expect(ambariMetaInfo.getCommonWidgetsDescriptorFile()).andReturn(null).once();
 
     replay(injector, clusters, ambariMetaInfo, stackInfo, cluster, service, repoVersionDAO, repoVersion);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7172655f/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
index b370829..7094caa 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
@@ -34,6 +34,7 @@ import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import java.io.File;
 import java.io.StringReader;
 import java.lang.reflect.Type;
 import java.text.MessageFormat;
@@ -10424,6 +10425,11 @@ public class AmbariManagementControllerTest {
     Assert.assertEquals("UPDATED_BLOCKED_TIME", layoutUserWidgetEntities.get(3).getWidget().getWidgetName());
     Assert.assertEquals("HBASE_SUMMARY", layoutUserWidgetEntities.get(0).getWidget().getDefaultSectionName());
 
+    File widgetsFile  = ambariMetaInfo.getCommonWidgetsDescriptorFile();
+    assertNotNull(widgetsFile);
+    assertEquals("src/test/resources/widgets.json", widgetsFile.getPath());
+    assertTrue(widgetsFile.exists());
+
     candidateLayoutEntity = null;
     for (WidgetLayoutEntity entity : layoutEntities) {
       if (entity.getLayoutName().equals("default_system_heatmap")) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/7172655f/ambari-server/src/test/resources/stacks/OTHER/1.0/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/OTHER/1.0/widgets.json b/ambari-server/src/test/resources/stacks/OTHER/1.0/widgets.json
deleted file mode 100644
index 3176354..0000000
--- a/ambari-server/src/test/resources/stacks/OTHER/1.0/widgets.json
+++ /dev/null
@@ -1,95 +0,0 @@
-{
-  "layouts": [
-    {
-      "layout_name": "default_system_heatmap",
-      "display_name": "Heatmaps",
-      "section_name": "SYSTEM_HEATMAPS",
-      "widgetLayoutInfo": [
-        {
-          "widget_name": "Host Disk Space Used %",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "disk_free",
-              "metric_path": "metrics/disk/disk_free",
-              "service_name": "STACK"
-            },
-            {
-              "name": "disk_total",
-              "metric_path": "metrics/disk/disk_total",
-              "service_name": "STACK"
-            }
-          ],
-          "values": [
-            {
-              "name": "Host Disk Space Used %",
-              "value": "${((disk_total-disk_free)/disk_total)*100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "Host Memory Used %",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "mem_total",
-              "metric_path": "metrics/memory/mem_total",
-              "service_name": "STACK"
-            },
-            {
-              "name": "mem_free",
-              "metric_path": "metrics/memory/mem_free",
-              "service_name": "STACK"
-            },
-            {
-              "name": "mem_cached",
-              "metric_path": "metrics/memory/mem_cached",
-              "service_name": "STACK"
-            }
-          ],
-          "values": [
-            {
-              "name": "Host Memory Used %",
-              "value": "${((mem_total-mem_free-mem_cached)/mem_total)*100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "Host CPU Wait IO %",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "cpu_wio",
-              "metric_path": "metrics/cpu/cpu_wio",
-              "service_name": "STACK"
-            }
-          ],
-          "values": [
-            {
-              "name": "Host Memory Used %",
-              "value": "${cpu_wio*100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        }
-      ]
-    }
-  ]
-}


[45/50] [abbrv] ambari git commit: AMBARI-21601 [addendum]. Pre-configure services when Kerberos is enabled to reduce number of core service restarts when services are added (Eugene Chekanskiy via rlevas)

Posted by jl...@apache.org.
AMBARI-21601 [addendum]. Pre-configure services when Kerberos is enabled to reduce number of core service restarts when services are added (Eugene Chekanskiy via rlevas)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/01b79aae
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/01b79aae
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/01b79aae

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 01b79aae5553dcd89464d8f6558bdba031417a3f
Parents: 8b83a0a
Author: Robert Levas <rl...@hortonworks.com>
Authored: Fri Oct 6 12:26:16 2017 -0400
Committer: Robert Levas <rl...@hortonworks.com>
Committed: Fri Oct 6 12:26:16 2017 -0400

----------------------------------------------------------------------
 .../resources/stacks/HDP/2.6/kerberos_preconfigure.json     | 9 +++++++++
 1 file changed, 9 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/01b79aae/ambari-server/src/main/resources/stacks/HDP/2.6/kerberos_preconfigure.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/kerberos_preconfigure.json b/ambari-server/src/main/resources/stacks/HDP/2.6/kerberos_preconfigure.json
index 9c29393..8460958 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/kerberos_preconfigure.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/kerberos_preconfigure.json
@@ -16,6 +16,15 @@
             "local_username": "beacon"
           }
         }
+      ],
+      "configurations": [
+          {
+            "core-site": {
+              "hadoop.proxyuser.beacon.groups": "*",
+              "hadoop.proxyuser.beacon.hosts": "*",
+              "hadoop.proxyuser.beacon.users": "*"
+            }
+        }
       ]
     }
   ]


[30/50] [abbrv] ambari git commit: AMBARI-22141. Intermittent failure of test_stack_advisor_perf

Posted by jl...@apache.org.
AMBARI-22141. Intermittent failure of test_stack_advisor_perf


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/0ed128fb
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/0ed128fb
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/0ed128fb

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 0ed128fb6403f061e376050fae7ab41e621659f2
Parents: 5370297
Author: Attila Doroszlai <ad...@hortonworks.com>
Authored: Thu Oct 5 10:36:45 2017 +0200
Committer: Attila Doroszlai <ad...@hortonworks.com>
Committed: Thu Oct 5 19:03:32 2017 +0200

----------------------------------------------------------------------
 .../2.2/common/test_stack_advisor_perf.py       | 66 ++++++++++++--------
 1 file changed, 40 insertions(+), 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/0ed128fb/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor_perf.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor_perf.py b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor_perf.py
index 82cef1b..77dbb66 100644
--- a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor_perf.py
+++ b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor_perf.py
@@ -23,43 +23,57 @@ import imp
 from unittest import TestCase
 from mock.mock import patch
 
-class TestHDP22StackAdvisor(TestCase):
-
-  def instantiate_stack_advisor(self, testDirectory):
-    default_stack_advisor_path = os.path.join(testDirectory, '../../../../../main/resources/stacks/stack_advisor.py')
-    hdp_206_stack_advisor_path = os.path.join(testDirectory, '../../../../../main/resources/stacks/HDP/2.0.6/services/stack_advisor.py')
-    hdp_21_stack_advisor_path = os.path.join(testDirectory, '../../../../../main/resources/stacks/HDP/2.1/services/stack_advisor.py')
-    hdp_22_stack_advisor_path = os.path.join(testDirectory, '../../../../../main/resources/stacks/HDP/2.2/services/stack_advisor.py')
-    hdp_206_stack_advisor_classname = 'HDP206StackAdvisor'
-
-    with open(default_stack_advisor_path, 'rb') as fp:
-      imp.load_module('stack_advisor', fp, default_stack_advisor_path, ('.py', 'rb', imp.PY_SOURCE))
-    with open(hdp_206_stack_advisor_path, 'rb') as fp:
-      imp.load_module('stack_advisor_impl', fp, hdp_206_stack_advisor_path, ('.py', 'rb', imp.PY_SOURCE))
-    with open(hdp_21_stack_advisor_path, 'rb') as fp:
-      imp.load_module('stack_advisor_impl', fp, hdp_21_stack_advisor_path, ('.py', 'rb', imp.PY_SOURCE))
-    with open(hdp_22_stack_advisor_path, 'rb') as fp:
-      stack_advisor_impl = imp.load_module('stack_advisor_impl', fp, hdp_22_stack_advisor_path, ('.py', 'rb', imp.PY_SOURCE))
-    clazz = getattr(stack_advisor_impl, hdp_206_stack_advisor_classname)
+class TestStackAdvisorPerformance(TestCase):
+
+  TIME_ALLOWED = 0.2 # somewhat arbitrary, based on test runs
+
+  def setUp(self):
+    self.testDirectory = os.path.dirname(os.path.abspath(__file__))
+
+  def instantiate_stack_advisor(self):
+    self.load_stack_advisor('main/resources/stacks/stack_advisor.py', 'stack_advisor')
+
+    stack_advisors = (
+      'main/resources/stacks/HDP/2.0.6/services/stack_advisor.py',
+      'main/resources/stacks/HDP/2.1/services/stack_advisor.py',
+      'main/resources/stacks/HDP/2.2/services/stack_advisor.py',
+      'main/resources/stacks/HDP/2.3/services/stack_advisor.py',
+      'main/resources/stacks/HDP/2.4/services/stack_advisor.py',
+      'main/resources/stacks/HDP/2.5/services/stack_advisor.py',
+      'main/resources/stacks/HDP/2.6/services/stack_advisor.py',
+    )
+
+    for filename in stack_advisors:
+      stack_advisor_impl = self.load_stack_advisor(filename, 'stack_advisor_impl')
+
+    current_stack_advisor_classname = 'HDP26StackAdvisor'
+    clazz = getattr(stack_advisor_impl, current_stack_advisor_classname)
     return clazz()
 
+
+  def load_stack_advisor(self, filename, module_name):
+    path = os.path.join(self.testDirectory, '../../../../..', filename)
+    with open(path, 'rb') as fp:
+      return imp.load_module(module_name, fp, path, ('.py', 'rb', imp.PY_SOURCE))
+
+
   @patch('socket.getfqdn')
   def test_performance(self, getfqdn_method):
     getfqdn_method.side_effect = lambda host='perf400-a-1.c.pramod-thangali.internal': host
-    testDirectory = os.path.dirname(os.path.abspath(__file__))
-    current_stack_advisor_path = os.path.join(testDirectory, '../../../../../main/resources/stacks/stack_advisor.py')
 
     for folder_name in ['1', '2']:
-      services = json.load(open(os.path.join(testDirectory, folder_name + '/services.json')))
-      hosts = json.load(open(os.path.join(testDirectory, folder_name + '/hosts.json')))
+      with open(os.path.join(self.testDirectory, folder_name, 'services.json')) as fp:
+        services = json.load(fp)
+      with open(os.path.join(self.testDirectory, folder_name, 'hosts.json')) as fp:
+        hosts = json.load(fp)
+
+      stack_advisor = self.instantiate_stack_advisor()
 
-      stack_advisor = self.instantiate_stack_advisor(testDirectory)
       start = time.time()
       recommendation = stack_advisor.recommendComponentLayout(services, hosts)
       time_taken = time.time() - start
-      print "time taken by current stack_advisor.py = " + str(time_taken)
-
-      self.assertTrue(time_taken < 0.1)
+      print "Current stack advisor elapsed {0}, allowed {1}".format(time_taken, TestStackAdvisorPerformance.TIME_ALLOWED)
 
+      self.assertTrue(time_taken < TestStackAdvisorPerformance.TIME_ALLOWED) # Python 2.7: assertLess
 
 


[11/50] [abbrv] ambari git commit: AMBARI-22118 Log Search UI: implement time range selection from graph. (ababiichuk)

Posted by jl...@apache.org.
AMBARI-22118 Log Search UI: implement time range selection from graph. (ababiichuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/1f00c19d
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/1f00c19d
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/1f00c19d

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 1f00c19d09ffe9889a6fe59df28c2905a09c1333
Parents: c28b797
Author: ababiichuk <ab...@hortonworks.com>
Authored: Tue Oct 3 16:02:56 2017 +0300
Committer: ababiichuk <ab...@hortonworks.com>
Committed: Tue Oct 3 16:35:56 2017 +0300

----------------------------------------------------------------------
 .../src/app/classes/histogram-options.class.ts  | 36 ++++++++
 .../logs-container.component.html               |  4 +-
 .../logs-container/logs-container.component.ts  | 13 ++-
 .../time-histogram.component.less               | 22 +++--
 .../time-histogram/time-histogram.component.ts  | 94 ++++++++++++++++----
 .../src/app/services/filtering.service.ts       | 23 ++---
 6 files changed, 154 insertions(+), 38 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/1f00c19d/ambari-logsearch/ambari-logsearch-web/src/app/classes/histogram-options.class.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/classes/histogram-options.class.ts b/ambari-logsearch/ambari-logsearch-web/src/app/classes/histogram-options.class.ts
new file mode 100644
index 0000000..dee5d98
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/classes/histogram-options.class.ts
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+export interface HistogramMarginOptions {
+  top: number;
+  right: number;
+  bottom: number;
+  left: number;
+}
+
+export interface HistogramStyleOptions {
+  margin?: HistogramMarginOptions;
+  height?: number;
+  tickPadding?: number;
+  columnWidth?: number;
+  dragAreaColor?: string;
+}
+
+export interface HistogramOptions extends HistogramStyleOptions {
+  keysWithColors: {[key: string]: string};
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/1f00c19d/ambari-logsearch/ambari-logsearch-web/src/app/components/logs-container/logs-container.component.html
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/components/logs-container/logs-container.component.html b/ambari-logsearch/ambari-logsearch-web/src/app/components/logs-container/logs-container.component.html
index 9c6c336..776bb9a 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/components/logs-container/logs-container.component.html
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/components/logs-container/logs-container.component.html
@@ -20,7 +20,9 @@
     {{'filter.capture.triggeringRefresh' | translate: autoRefreshMessageParams}}
   </div>
 </div>
-<time-histogram class="col-md-12" [data]="histogramData" [customOptions]="histogramOptions"></time-histogram>
+<time-histogram class="col-md-12" [data]="histogramData" [customOptions]="histogramOptions"
+                svgId="service-logs-histogram"
+                (selectArea)="setCustomTimeRange($event[0], $event[1])"></time-histogram>
 <dropdown-button *ngIf="!isServiceLogsFileView" class="pull-right" label="logs.columns"
                  [options]="availableColumns | async" [isRightAlign]="true" [isMultipleChoice]="true"
                  action="updateSelectedColumns" [additionalArgs]="logsTypeMapObject.fieldsModel"></dropdown-button>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1f00c19d/ambari-logsearch/ambari-logsearch-web/src/app/components/logs-container/logs-container.component.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/components/logs-container/logs-container.component.ts b/ambari-logsearch/ambari-logsearch-web/src/app/components/logs-container/logs-container.component.ts
index fd3a58b..7345288 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/components/logs-container/logs-container.component.ts
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/components/logs-container/logs-container.component.ts
@@ -28,6 +28,7 @@ import {AuditLog} from '@app/models/audit-log.model';
 import {ServiceLog} from '@app/models/service-log.model';
 import {LogField} from '@app/models/log-field.model';
 import {ActiveServiceLogEntry} from '@app/classes/active-service-log-entry.class';
+import {HistogramOptions} from '@app/classes/histogram-options.class';
 
 @Component({
   selector: 'logs-container',
@@ -92,9 +93,9 @@ export class LogsContainerComponent implements OnInit {
 
   displayedColumns: any[] = [];
 
-  histogramData: any;
+  histogramData: {[key: string]: number};
 
-  readonly histogramOptions = {
+  readonly histogramOptions: HistogramOptions = {
     keysWithColors: this.logsContainer.colors
   };
 
@@ -116,9 +117,13 @@ export class LogsContainerComponent implements OnInit {
 
   get isServiceLogsFileView(): boolean {
     return this.logsContainer.isServiceLogsFileView;
-  };
+  }
 
   get activeLog(): ActiveServiceLogEntry | null {
     return this.logsContainer.activeLog;
-  };
+  }
+
+  setCustomTimeRange(startTime: number, endTime: number): void {
+    this.filtering.setCustomTimeRange(startTime, endTime);
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/1f00c19d/ambari-logsearch/ambari-logsearch-web/src/app/components/time-histogram/time-histogram.component.less
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/components/time-histogram/time-histogram.component.less b/ambari-logsearch/ambari-logsearch-web/src/app/components/time-histogram/time-histogram.component.less
index d891862..1d29c55 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/components/time-histogram/time-histogram.component.less
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/components/time-histogram/time-histogram.component.less
@@ -16,14 +16,24 @@
  * limitations under the License.
  */
 
-/deep/ .axis {
-  .domain {
-    display: none;
-  }
+:host {
+  cursor: crosshair;
 
-  .tick {
-    line {
+  /deep/ .axis {
+    .domain {
       display: none;
     }
+
+    .tick {
+      cursor: default;
+
+      line {
+        display: none;
+      }
+    }
+  }
+
+  /deep/ .value {
+    cursor: pointer;
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/1f00c19d/ambari-logsearch/ambari-logsearch-web/src/app/components/time-histogram/time-histogram.component.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/components/time-histogram/time-histogram.component.ts b/ambari-logsearch/ambari-logsearch-web/src/app/components/time-histogram/time-histogram.component.ts
index 7856ecc..c3ec388 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/components/time-histogram/time-histogram.component.ts
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/components/time-histogram/time-histogram.component.ts
@@ -16,10 +16,12 @@
  * limitations under the License.
  */
 
-import {Component, OnInit, AfterViewInit, OnChanges, Input, ViewChild, ElementRef} from '@angular/core';
+import {Component, OnInit, AfterViewInit, OnChanges, Input, Output, ViewChild, ElementRef, EventEmitter} from '@angular/core';
+import {ContainerElement, Selection} from 'd3';
 import * as d3 from 'd3';
 import * as moment from 'moment-timezone';
 import {AppSettingsService} from '@app/services/storage/app-settings.service';
+import {HistogramStyleOptions, HistogramOptions} from '@app/classes/histogram-options.class';
 
 @Component({
   selector: 'time-histogram',
@@ -29,14 +31,14 @@ import {AppSettingsService} from '@app/services/storage/app-settings.service';
 export class TimeHistogramComponent implements OnInit, AfterViewInit, OnChanges {
 
   constructor(private appSettings: AppSettingsService) {
-    appSettings.getParameter('timeZone').subscribe(value => {
+    appSettings.getParameter('timeZone').subscribe((value: string): void => {
       this.timeZone = value;
       this.createHistogram();
     });
   }
 
   ngOnInit() {
-    Object.assign(this.options, this.defaultOptions, this.customOptions);
+    this.options = Object.assign({}, this.defaultOptions, this.customOptions);
   }
 
   ngAfterViewInit() {
@@ -52,12 +54,18 @@ export class TimeHistogramComponent implements OnInit, AfterViewInit, OnChanges
   element: ElementRef;
 
   @Input()
-  customOptions: any;
+  svgId: string;
 
   @Input()
-  data: any;
+  customOptions: HistogramOptions;
 
-  private readonly defaultOptions = {
+  @Input()
+  data: {[key: string]: number};
+
+  @Output()
+  selectArea: EventEmitter<number[]> = new EventEmitter();
+
+  private readonly defaultOptions: HistogramStyleOptions = {
     margin: {
       top: 20,
       right: 20,
@@ -66,10 +74,11 @@ export class TimeHistogramComponent implements OnInit, AfterViewInit, OnChanges
     },
     height: 200,
     tickPadding: 10,
-    columnWidth: 20
+    columnWidth: 20,
+    dragAreaColor: '#FFF'
   };
 
-  private options: any = {};
+  private options: HistogramOptions;
 
   private timeZone: string;
 
@@ -77,7 +86,7 @@ export class TimeHistogramComponent implements OnInit, AfterViewInit, OnChanges
 
   private svg;
 
-  private width;
+  private width: number;
 
   private xScale;
 
@@ -91,6 +100,16 @@ export class TimeHistogramComponent implements OnInit, AfterViewInit, OnChanges
 
   private htmlElement: HTMLElement;
 
+  private dragArea: Selection<SVGGraphicsElement, undefined, SVGGraphicsElement, undefined>;
+
+  private dragStartX: number;
+
+  private minDragX: number;
+
+  private maxDragX: number;
+
+  private readonly timeFormat: string = 'MM/DD HH:mm';
+
   histogram: any;
 
   private createHistogram(): void {
@@ -105,7 +124,7 @@ export class TimeHistogramComponent implements OnInit, AfterViewInit, OnChanges
     const margin = this.options.margin,
       keysWithColors = this.options.keysWithColors,
       keys = Object.keys(keysWithColors),
-      colors = keys.reduce((array, key) => [...array, keysWithColors[key]], []);
+      colors = keys.reduce((array: string[], key: string): string[] => [...array, keysWithColors[key]], []);
     this.width = this.htmlElement.clientWidth - margin.left - margin.right;
     this.xScale = d3.scaleTime().range([0, this.width]);
     this.yScale = d3.scaleLinear().range([this.options.height, 0]);
@@ -115,20 +134,20 @@ export class TimeHistogramComponent implements OnInit, AfterViewInit, OnChanges
   private buildSVG(): void {
     const margin = this.options.margin;
     this.host.html('');
-    this.svg = this.host.append('svg').attr('width', this.width + margin.left + margin.right)
+    this.svg = this.host.append('svg').attr('id', this.svgId).attr('width', this.htmlElement.clientWidth)
       .attr('height', this.options.height + margin.top + margin.bottom).append('g')
       .attr('transform', `translate(${margin.left},${margin.top})`);
   }
 
   private drawXAxis(): void {
     this.xAxis = d3.axisBottom(this.xScale)
-      .tickFormat(tick => moment(tick).tz(this.timeZone).format('MM/DD HH:mm'))
+      .tickFormat(tick => moment(tick).tz(this.timeZone).format(this.timeFormat))
       .tickPadding(this.options.tickPadding);
     this.svg.append('g').attr('class', 'axis').attr('transform', `translate(0,${this.options.height})`).call(this.xAxis);
   }
 
   private drawYAxis(): void {
-    this.yAxis = d3.axisLeft(this.yScale).tickFormat((tick: number) => {
+    this.yAxis = d3.axisLeft(this.yScale).tickFormat((tick: number): string | undefined => {
       if (Number.isInteger(tick)) {
         return tick.toFixed(0);
       } else {
@@ -142,20 +161,61 @@ export class TimeHistogramComponent implements OnInit, AfterViewInit, OnChanges
     const keys = Object.keys(this.options.keysWithColors),
       data = this.data,
       timeStamps = Object.keys(data),
-      formattedData = timeStamps.map(timeStamp => Object.assign({
-        timeStamp: timeStamp
+      formattedData = timeStamps.map((timeStamp: string): {[key: string]: number} => Object.assign({
+        timeStamp: Number(timeStamp)
       }, data[timeStamp])),
       layers = (d3.stack().keys(keys)(formattedData)),
       columnWidth = this.options.columnWidth;
     this.xScale.domain(d3.extent(formattedData, item => item.timeStamp));
-    this.yScale.domain([0, d3.max(formattedData, item => keys.reduce((sum, key) => sum + item[key], 0))]);
+    this.yScale.domain([0, d3.max(formattedData, item => keys.reduce((sum: number, key: string): number => sum + item[key], 0))]);
     this.drawXAxis();
     this.drawYAxis();
-    const layer = this.svg.selectAll().data(d3.transpose<any>(layers)).enter().append('g');
+    const layer = this.svg.selectAll().data(d3.transpose<any>(layers)).enter().append('g').attr('class', 'value');
     layer.selectAll().data(item => item).enter().append('rect')
       .attr('x', item => this.xScale(item.data.timeStamp) - columnWidth / 2).attr('y', item => this.yScale(item[1]))
       .attr('height', item => this.yScale(item[0]) - this.yScale(item[1])).attr('width', columnWidth.toString())
       .style('fill', (item, index) => this.colorScale(index));
+    this.setDragBehavior();
+  }
+
+  private setDragBehavior(): void {
+    this.minDragX = this.options.margin.left;
+    this.maxDragX = this.htmlElement.clientWidth;
+    d3.selectAll(`svg#${this.svgId}`).call(d3.drag()
+      .on('start', (datum: undefined, index: number, containers: ContainerElement[]): void => {
+        if (this.dragArea) {
+          this.dragArea.remove();
+        }
+        this.dragStartX = Math.max(0, this.getDragX(containers[0]) - this.options.margin.left);
+        this.dragArea = this.svg.insert('rect', ':first-child').attr('x', this.dragStartX).attr('y', 0).attr('width', 0)
+          .attr('height', this.options.height).style('fill', this.options.dragAreaColor);
+      })
+      .on('drag', (datum: undefined, index: number, containers: ContainerElement[]): void => {
+        const currentX = Math.max(this.getDragX(containers[0]), this.minDragX) - this.options.margin.left,
+          startX = Math.min(currentX, this.dragStartX),
+          currentWidth = Math.abs(currentX - this.dragStartX);
+        this.dragArea.attr('x', startX).attr('width', currentWidth);
+      })
+      .on('end', (): void => {
+        const dragAreaDetails = this.dragArea.node().getBBox(),
+          startX = Math.max(0, dragAreaDetails.x),
+          endX = Math.min(this.width, dragAreaDetails.x + dragAreaDetails.width),
+          xScaleInterval = this.xScale.domain().map((point: Date): number => point.valueOf()),
+          xScaleLength = xScaleInterval[1] - xScaleInterval[0],
+          ratio = xScaleLength / this.width,
+          startTimeStamp = Math.round(xScaleInterval[0] + ratio * startX),
+          endTimeStamp = Math.round(xScaleInterval[0] + ratio * endX);
+        this.selectArea.emit([startTimeStamp, endTimeStamp]);
+        this.dragArea.remove();
+      })
+    );
+    d3.selectAll(`svg#${this.svgId} .value, svg#${this.svgId} .axis`).call(d3.drag().on('start', (): void => {
+      d3.event.sourceEvent.stopPropagation();
+    }));
+  }
+
+  private getDragX(element: ContainerElement): number {
+    return d3.mouse(element)[0];
   }
 
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/1f00c19d/ambari-logsearch/ambari-logsearch-web/src/app/services/filtering.service.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/services/filtering.service.ts b/ambari-logsearch/ambari-logsearch-web/src/app/services/filtering.service.ts
index 6697c54..0fff75d 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/services/filtering.service.ts
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/services/filtering.service.ts
@@ -22,7 +22,6 @@ import {Subject} from 'rxjs/Subject';
 import {Observable} from 'rxjs/Observable';
 import 'rxjs/add/observable/timer';
 import 'rxjs/add/operator/takeUntil';
-import {Moment} from 'moment';
 import * as moment from 'moment-timezone';
 import {ListItem} from '@app/classes/list-item.class';
 import {AppSettingsService} from '@app/services/storage/app-settings.service';
@@ -400,29 +399,25 @@ export class FilteringService {
 
   autoRefreshRemainingSeconds: number = 0;
 
-  private startCaptureMoment: Moment;
+  private startCaptureTime: number;
 
-  private stopCaptureMoment: Moment;
+  private stopCaptureTime: number;
 
   startCaptureTimer(): void {
-    this.startCaptureMoment = moment();
+    this.startCaptureTime = new Date().valueOf();
     Observable.timer(0, 1000).takeUntil(this.stopTimer).subscribe(seconds => this.captureSeconds = seconds);
   }
 
   stopCaptureTimer(): void {
     const autoRefreshIntervalSeconds = this.autoRefreshInterval / 1000;
-    this.stopCaptureMoment = moment();
+    this.stopCaptureTime = new Date().valueOf();
     this.captureSeconds = 0;
     this.stopTimer.next();
     Observable.timer(0, 1000).takeUntil(this.stopAutoRefreshCountdown).subscribe(seconds => {
       this.autoRefreshRemainingSeconds = autoRefreshIntervalSeconds - seconds;
       if (!this.autoRefreshRemainingSeconds) {
         this.stopAutoRefreshCountdown.next();
-        this.filtersForm.controls.timeRange.setValue({
-          type: 'CUSTOM',
-          start: this.startCaptureMoment,
-          end: this.stopCaptureMoment
-        });
+        this.setCustomTimeRange(this.startCaptureTime, this.stopCaptureTime);
       }
     });
   }
@@ -457,6 +452,14 @@ export class FilteringService {
     });
   }
 
+  setCustomTimeRange(startTime: number, endTime: number): void {
+    this.filtersForm.controls.timeRange.setValue({
+      type: 'CUSTOM',
+      start: moment(startTime),
+      end: moment(endTime)
+    });
+  }
+
   private getStartTime = (value: any, current: string): string => {
     let time;
     if (value) {


[27/50] [abbrv] ambari git commit: AMBARI-22127. Installation of stack selector fails on Debian

Posted by jl...@apache.org.
AMBARI-22127. Installation of stack selector fails on Debian


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/2512dc83
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/2512dc83
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/2512dc83

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 2512dc83c497b96511c1faf901753ad3d9f59877
Parents: b042182
Author: Attila Doroszlai <ad...@hortonworks.com>
Authored: Wed Oct 4 11:53:05 2017 +0200
Committer: Attila Doroszlai <ad...@hortonworks.com>
Committed: Thu Oct 5 16:16:25 2017 +0200

----------------------------------------------------------------------
 .../resource_management/TestPackageResource.py  |  4 +-
 .../core/providers/package/__init__.py          |  4 +-
 .../core/providers/package/apt.py               | 35 ++++++++------
 .../core/providers/package/choco.py             |  8 ++--
 .../core/providers/package/yumrpm.py            |  8 ++--
 .../core/providers/package/zypper.py            |  7 +--
 .../core/resources/packaging.py                 | 14 +++++-
 .../libraries/functions/repository_util.py      |  8 +++-
 .../custom_actions/scripts/install_packages.py  | 12 ++++-
 .../custom_actions/TestInstallPackages.py       | 50 ++++++++++++--------
 10 files changed, 96 insertions(+), 54 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/2512dc83/ambari-agent/src/test/python/resource_management/TestPackageResource.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/resource_management/TestPackageResource.py b/ambari-agent/src/test/python/resource_management/TestPackageResource.py
index 49e3adf..b10fae8 100644
--- a/ambari-agent/src/test/python/resource_management/TestPackageResource.py
+++ b/ambari-agent/src/test/python/resource_management/TestPackageResource.py
@@ -181,13 +181,13 @@ class TestPackageResource(TestCase):
   def test_action_install_use_repos_rhel(self, shell_mock):
     shell_mock.return_value = (0,'')
     with Environment('/') as env:
-      Package("some_package", use_repos=['HDP-UTILS-2.2.0.1-885', 'HDP-2.2.0.1-885'],
+      Package("some_package", use_repos={'HDP-UTILS-2.2.0.1-885': 'ambari-hdp-1', 'HDP-2.2.0.1-885': 'ambari-hdp-1'},
               logoutput = False
               )
     self.assertEquals(shell_mock.call_args[0][0],
                       ['/usr/bin/yum', '-d', '0', '-e', '0', '-y', 'install',
                        '--disablerepo=*',
-                       '--enablerepo=HDP-UTILS-2.2.0.1-885,HDP-2.2.0.1-885', 'some_package'])
+                       '--enablerepo=HDP-2.2.0.1-885,HDP-UTILS-2.2.0.1-885', 'some_package'])
 
   @patch.object(shell, "call", new = MagicMock(return_value=(0, None)))
   @patch.object(shell, "checked_call")

http://git-wip-us.apache.org/repos/asf/ambari/blob/2512dc83/ambari-common/src/main/python/resource_management/core/providers/package/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/core/providers/package/__init__.py b/ambari-common/src/main/python/resource_management/core/providers/package/__init__.py
index 9404757..8728b5e 100644
--- a/ambari-common/src/main/python/resource_management/core/providers/package/__init__.py
+++ b/ambari-common/src/main/python/resource_management/core/providers/package/__init__.py
@@ -39,13 +39,13 @@ class PackageProvider(Provider):
   def __init__(self, *args, **kwargs):
     super(PackageProvider, self).__init__(*args, **kwargs)   
   
-  def install_package(self, name, use_repos=set(), skip_repos=set(), is_upgrade=False):
+  def install_package(self, name, use_repos={}, skip_repos=set(), is_upgrade=False):
     raise NotImplementedError()
 
   def remove_package(self, name, ignore_dependencies=False):
     raise NotImplementedError()
 
-  def upgrade_package(self, name, use_repos=set(), skip_repos=set(), is_upgrade=True):
+  def upgrade_package(self, name, use_repos={}, skip_repos=set(), is_upgrade=True):
     raise NotImplementedError()
 
   def action_install(self):

http://git-wip-us.apache.org/repos/asf/ambari/blob/2512dc83/ambari-common/src/main/python/resource_management/core/providers/package/apt.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/core/providers/package/apt.py b/ambari-common/src/main/python/resource_management/core/providers/package/apt.py
index f6a5538..e236697 100644
--- a/ambari-common/src/main/python/resource_management/core/providers/package/apt.py
+++ b/ambari-common/src/main/python/resource_management/core/providers/package/apt.py
@@ -44,6 +44,7 @@ REMOVE_CMD = {
 }
 REPO_UPDATE_CMD = ['/usr/bin/apt-get', 'update','-qq']
 
+EMPTY_FILE = "/dev/null"
 APT_SOURCES_LIST_DIR = "/etc/apt/sources.list.d"
 
 CHECK_CMD = "dpkg --get-selections | grep -v deinstall | awk '{print $1}' | grep ^%s$"
@@ -338,38 +339,44 @@ class AptProvider(PackageProvider):
     return True
 
   @replace_underscores
-  def install_package(self, name, use_repos=set(), skip_repos=set(), is_upgrade=False):
+  def install_package(self, name, use_repos={}, skip_repos=set(), is_upgrade=False):
     if is_upgrade or use_repos or not self._check_existence(name):
       cmd = INSTALL_CMD[self.get_logoutput()]
       copied_sources_files = []
       is_tmp_dir_created = False
       if use_repos:
-        is_tmp_dir_created = True
-        apt_sources_list_tmp_dir = tempfile.mkdtemp(suffix="-ambari-apt-sources-d")
-        Logger.info("Temporal sources directory was created: %s" % apt_sources_list_tmp_dir)
-        if 'base' not in use_repos:
+        if 'base' in use_repos:
+          use_repos = set([v for k,v in use_repos.items() if k != 'base'])
+        else:
           cmd = cmd + ['-o', 'Dir::Etc::SourceList=%s' % EMPTY_FILE]
-        for repo in use_repos:
-          if repo != 'base':
+          use_repos = set(use_repos.values())
+
+        if use_repos:
+          is_tmp_dir_created = True
+          apt_sources_list_tmp_dir = tempfile.mkdtemp(suffix="-ambari-apt-sources-d")
+          Logger.info("Temporary sources directory was created: %s" % apt_sources_list_tmp_dir)
+
+          for repo in use_repos:
             new_sources_file = os.path.join(apt_sources_list_tmp_dir, repo + '.list')
-            Logger.info("Temporal sources file will be copied: %s" % new_sources_file)
+            Logger.info("Temporary sources file will be copied: %s" % new_sources_file)
             sudo.copy(os.path.join(APT_SOURCES_LIST_DIR, repo + '.list'), new_sources_file)
             copied_sources_files.append(new_sources_file)
-        cmd = cmd + ['-o', 'Dir::Etc::SourceParts=%s' % apt_sources_list_tmp_dir]
+          cmd = cmd + ['-o', 'Dir::Etc::SourceParts=%s' % apt_sources_list_tmp_dir]
 
       cmd = cmd + [name]
       Logger.info("Installing package %s ('%s')" % (name, string_cmd_from_args_list(cmd)))
       self.checked_call_with_retries(cmd, sudo=True, env=INSTALL_CMD_ENV, logoutput=self.get_logoutput())
 
       if is_tmp_dir_created:
-        for temporal_sources_file in copied_sources_files:
-          Logger.info("Removing temporal sources file: %s" % temporal_sources_file)
-          os.remove(temporal_sources_file)
-        Logger.info("Removing temporal sources directory: %s" % apt_sources_list_tmp_dir)
+        for temporary_sources_file in copied_sources_files:
+          Logger.info("Removing temporary sources file: %s" % temporary_sources_file)
+          os.remove(temporary_sources_file)
+        Logger.info("Removing temporary sources directory: %s" % apt_sources_list_tmp_dir)
         os.rmdir(apt_sources_list_tmp_dir)
     else:
       Logger.info("Skipping installation of existing package %s" % (name))
       
+
   def is_locked_output(self, out):
     return "Unable to lock the administration directory" in out
 
@@ -380,7 +387,7 @@ class AptProvider(PackageProvider):
     return REPO_UPDATE_CMD
 
   @replace_underscores
-  def upgrade_package(self, name, use_repos=set(), skip_repos=set(), is_upgrade=True):
+  def upgrade_package(self, name, use_repos={}, skip_repos=set(), is_upgrade=True):
     return self.install_package(name, use_repos, skip_repos, is_upgrade)
 
   @replace_underscores

http://git-wip-us.apache.org/repos/asf/ambari/blob/2512dc83/ambari-common/src/main/python/resource_management/core/providers/package/choco.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/core/providers/package/choco.py b/ambari-common/src/main/python/resource_management/core/providers/package/choco.py
index db55296..b058f42 100644
--- a/ambari-common/src/main/python/resource_management/core/providers/package/choco.py
+++ b/ambari-common/src/main/python/resource_management/core/providers/package/choco.py
@@ -46,11 +46,11 @@ CHECK_CMD = {
 }
 
 class ChocoProvider(PackageProvider):
-  def install_package(self, name, use_repos=[], skip_repos=[]):
+  def install_package(self, name, use_repos={}, skip_repos=[]):
     if not self._check_existence(name) or use_repos:
       cmd = INSTALL_CMD[self.get_logoutput()]
       if use_repos:
-        enable_repo_option = '-s' + ",".join(use_repos)
+        enable_repo_option = '-s' + ",".join(sorted(use_repos.keys()))
         cmd = cmd + [enable_repo_option]
       cmd = cmd + [name]
       cmdString = " ".join(cmd)
@@ -62,10 +62,10 @@ class ChocoProvider(PackageProvider):
     else:
       Logger.info("Skipping installation of existing package %s" % (name))
 
-  def upgrade_package(self, name, use_repos=[], skip_repos=[]):
+  def upgrade_package(self, name, use_repos={}, skip_repos=[]):
     cmd = UPGRADE_CMD[self.get_logoutput()]
     if use_repos:
-      enable_repo_option = '-s' + ",".join(use_repos)
+      enable_repo_option = '-s' + ",".join(sorted(use_repos.keys()))
       cmd = cmd + [enable_repo_option]
     cmd = cmd + [name]
     cmdString = " ".join(cmd)

http://git-wip-us.apache.org/repos/asf/ambari/blob/2512dc83/ambari-common/src/main/python/resource_management/core/providers/package/yumrpm.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/core/providers/package/yumrpm.py b/ambari-common/src/main/python/resource_management/core/providers/package/yumrpm.py
index a4a55da..b9b6792 100644
--- a/ambari-common/src/main/python/resource_management/core/providers/package/yumrpm.py
+++ b/ambari-common/src/main/python/resource_management/core/providers/package/yumrpm.py
@@ -211,11 +211,11 @@ class YumProvider(RPMBasedPackageProvider):
 
     return True
 
-  def install_package(self, name, use_repos=set(), skip_repos=set(), is_upgrade=False):
+  def install_package(self, name, use_repos={}, skip_repos=set(), is_upgrade=False):
     if is_upgrade or use_repos or not self._check_existence(name):
       cmd = INSTALL_CMD[self.get_logoutput()]
       if use_repos:
-        enable_repo_option = '--enablerepo=' + ",".join(use_repos)
+        enable_repo_option = '--enablerepo=' + ",".join(sorted(use_repos.keys()))
         disable_repo_option = '--disablerepo=' + "*" if len(skip_repos) == 0 else ','.join(skip_repos)
         cmd = cmd + [disable_repo_option, enable_repo_option]
       cmd = cmd + [name]
@@ -224,7 +224,7 @@ class YumProvider(RPMBasedPackageProvider):
     else:
       Logger.info("Skipping installation of existing package %s" % (name))
 
-  def upgrade_package(self, name, use_repos=set(), skip_repos=set(), is_upgrade=True):
+  def upgrade_package(self, name, use_repos={}, skip_repos=set(), is_upgrade=True):
     return self.install_package(name, use_repos, skip_repos, is_upgrade)
 
   def remove_package(self, name, ignore_dependencies=False):
@@ -321,4 +321,4 @@ class YumProvider(RPMBasedPackageProvider):
           if mirror in mirrors:
             repo_ids.append(section)
 
-    return set(repo_ids)
\ No newline at end of file
+    return set(repo_ids)

http://git-wip-us.apache.org/repos/asf/ambari/blob/2512dc83/ambari-common/src/main/python/resource_management/core/providers/package/zypper.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/core/providers/package/zypper.py b/ambari-common/src/main/python/resource_management/core/providers/package/zypper.py
index 5b8e5ab..6fc4b59 100644
--- a/ambari-common/src/main/python/resource_management/core/providers/package/zypper.py
+++ b/ambari-common/src/main/python/resource_management/core/providers/package/zypper.py
@@ -177,9 +177,10 @@ class ZypperProvider(RPMBasedPackageProvider):
 
     return True
 
-  def install_package(self, name, use_repos=[], skip_repos=[], is_upgrade=False):
+  def install_package(self, name, use_repos={}, skip_repos=[], is_upgrade=False):
     if is_upgrade or use_repos or not self._check_existence(name):
       cmd = INSTALL_CMD[self.get_logoutput()]
+      use_repos = use_repos.keys()
       if use_repos:
         active_base_repos = self.get_active_base_repos()
         if 'base' in use_repos:
@@ -187,7 +188,7 @@ class ZypperProvider(RPMBasedPackageProvider):
           use_repos = filter(lambda x: x != 'base', use_repos)
           use_repos.extend(active_base_repos)
         use_repos_options = []
-        for repo in use_repos:
+        for repo in sorted(use_repos):
           use_repos_options = use_repos_options + ['--repo', repo]
         cmd = cmd + use_repos_options
 
@@ -197,7 +198,7 @@ class ZypperProvider(RPMBasedPackageProvider):
     else:
       Logger.info("Skipping installation of existing package %s" % (name))
 
-  def upgrade_package(self, name, use_repos=[], skip_repos=[], is_upgrade=True):
+  def upgrade_package(self, name, use_repos={}, skip_repos=[], is_upgrade=True):
     return self.install_package(name, use_repos, skip_repos, is_upgrade)
   
   def remove_package(self, name):

http://git-wip-us.apache.org/repos/asf/ambari/blob/2512dc83/ambari-common/src/main/python/resource_management/core/resources/packaging.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/core/resources/packaging.py b/ambari-common/src/main/python/resource_management/core/resources/packaging.py
index e3adc30..58488b5 100644
--- a/ambari-common/src/main/python/resource_management/core/resources/packaging.py
+++ b/ambari-common/src/main/python/resource_management/core/resources/packaging.py
@@ -30,9 +30,19 @@ class Package(Resource):
   package_name = ResourceArgument(default=lambda obj: obj.name)
   location = ResourceArgument(default=lambda obj: obj.package_name)
 
-  # Allow using only specific list of repositories when performing action
-  use_repos = ResourceArgument(default=[])
+  """
+  Dictionary of repositories (repo ID => repo file name) to allow using
+  only a specific list of repositories when performing action.
+  (APT requires repo file names while other providers can filter by repo ID,
+  hence the need to pass both.)
+  """
+  use_repos = ResourceArgument(default={})
+
+  """
+  List of repositories to avoid using (currently only respected by YUM provider)
+  """
   skip_repos = ResourceArgument(default=[])
+
   """
   True           -  log it in INFO mode
   False          -  never log it

http://git-wip-us.apache.org/repos/asf/ambari/blob/2512dc83/ambari-common/src/main/python/resource_management/libraries/functions/repository_util.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/repository_util.py b/ambari-common/src/main/python/resource_management/libraries/functions/repository_util.py
index f1074ee..7ad7df0 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/repository_util.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/repository_util.py
@@ -33,7 +33,7 @@ def create_repo_files(template, command_repository):
   """
   Creates repositories in a consistent manner for all types
   :param command_repository: a CommandRepository instance
-  :return:
+  :return: a dictionary with repo ID => repo file name mapping
   """
 
   if command_repository.version_id is None:
@@ -43,7 +43,7 @@ def create_repo_files(template, command_repository):
     Logger.warning(
       "Repository for {0}/{1} has no repositories.  Ambari may not be managing this version.".format(
         command_repository.stack_name, command_repository.version_string))
-    return
+    return {}
 
   # add the stack name to the file name just to make it a little easier to debug
   # version_id is the primary id of the repo_version table in the database
@@ -51,6 +51,7 @@ def create_repo_files(template, command_repository):
                                       command_repository.version_id)
 
   append_to_file = False  # initialize to False to create the file anew.
+  repo_files = {}
 
   for repository in command_repository.repositories:
 
@@ -71,6 +72,9 @@ def create_repo_files(template, command_repository):
                  components = repository.ubuntu_components,
                  append_to_file = append_to_file)
       append_to_file = True
+      repo_files[repository.repo_id] = file_name
+
+  return repo_files
 
 
 def _find_value(dictionary, key):

http://git-wip-us.apache.org/repos/asf/ambari/blob/2512dc83/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py b/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py
index c5e4ae7..872d55e 100644
--- a/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py
+++ b/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py
@@ -58,6 +58,8 @@ class InstallPackages(Script):
     super(InstallPackages, self).__init__()
 
     self.pkg_provider = get_provider("Package")
+    self.repo_files = {}
+
 
   def actionexecute(self, env):
     num_errors = 0
@@ -108,7 +110,8 @@ class InstallPackages(Script):
       else:
         Logger.info(
           "Will install packages for repository version {0}".format(self.repository_version))
-        create_repo_files(template, command_repository)
+        new_repo_files = create_repo_files(template, command_repository)
+        self.repo_files.update(new_repo_files)
     except Exception, err:
       Logger.logger.exception("Cannot install repository files. Error: {0}".format(str(err)))
       num_errors += 1
@@ -326,9 +329,14 @@ class InstallPackages(Script):
       # patches installed
       repositories = config['repositoryFile']['repositories']
       repository_ids = [repository['repoId'] for repository in repositories]
+      repos_to_use = {}
+      for repo_id in repository_ids:
+        if repo_id in self.repo_files:
+          repos_to_use[repo_id] = self.repo_files[repo_id]
+
       Package(stack_selector_package,
         action="upgrade",
-        use_repos=repository_ids,
+        use_repos=repos_to_use,
         retry_on_repo_unavailability=agent_stack_retry_on_unavailability,
         retry_count=agent_stack_retry_count)
       

http://git-wip-us.apache.org/repos/asf/ambari/blob/2512dc83/ambari-server/src/test/python/custom_actions/TestInstallPackages.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/custom_actions/TestInstallPackages.py b/ambari-server/src/test/python/custom_actions/TestInstallPackages.py
index 0f303be..3a2fc98 100644
--- a/ambari-server/src/test/python/custom_actions/TestInstallPackages.py
+++ b/ambari-server/src/test/python/custom_actions/TestInstallPackages.py
@@ -109,6 +109,8 @@ class TestInstallPackages(RMFTestCase):
       lookup_packages.side_effect = TestInstallPackages._add_packages_lookUpYum
       get_provider.return_value = provider
       list_ambari_managed_repos_mock.return_value=[]
+      repo_file_name = 'ambari-hdp-1'
+      use_repos = { 'HDP-UTILS-1.1.0.20': repo_file_name, 'HDP-2.2': repo_file_name }
       self.executeScript("scripts/install_packages.py",
                          classname="InstallPackages",
                          command="actionexecute",
@@ -126,7 +128,7 @@ class TestInstallPackages(RMFTestCase):
                                 action=['create'],
                                 components=[u'HDP-UTILS', 'main'],
                                 repo_template='[{{repo_id}}]\nname={{repo_id}}\n{% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %}\n\npath=/\nenabled=1\ngpgcheck=0',
-                                repo_file_name=u'ambari-hdp-1',
+                                repo_file_name=repo_file_name,
                                 mirror_list=None,
                                 append_to_file=False,
       )
@@ -135,11 +137,11 @@ class TestInstallPackages(RMFTestCase):
                                 action=['create'],
                                 components=[u'HDP', 'main'],
                                 repo_template='[{{repo_id}}]\nname={{repo_id}}\n{% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %}\n\npath=/\nenabled=1\ngpgcheck=0',
-                                repo_file_name=u'ambari-hdp-1',
+                                repo_file_name=repo_file_name,
                                 mirror_list=None,
                                 append_to_file=True,
       )
-      self.assertResourceCalled('Package', 'hdp-select', action=["upgrade"], use_repos=['HDP-UTILS-1.1.0.20', 'HDP-2.2'], retry_count=5, retry_on_repo_unavailability=False)
+      self.assertResourceCalled('Package', 'hdp-select', action=["upgrade"], use_repos=use_repos, retry_count=5, retry_on_repo_unavailability=False)
       self.assertResourceCalled('Package', 'hadoop_2_2_0_1_885', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
       self.assertResourceCalled('Package', 'snappy', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
       self.assertResourceCalled('Package', 'snappy-devel', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
@@ -190,7 +192,7 @@ class TestInstallPackages(RMFTestCase):
                          'repository_version_id': 1,
                          'actual_version': VERSION_STUB})
 
-      self.assertResourceCalled('Package', 'hdp-select', action=["upgrade"], use_repos=[], retry_count=5, retry_on_repo_unavailability=False)
+      self.assertResourceCalled('Package', 'hdp-select', action=["upgrade"], use_repos={}, retry_count=5, retry_on_repo_unavailability=False)
       self.assertResourceCalled('Package', None, action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
       self.assertResourceCalled('Package', 'snappy', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
       self.assertResourceCalled('Package', 'snappy-devel', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
@@ -224,6 +226,8 @@ class TestInstallPackages(RMFTestCase):
       lookup_packages.side_effect = TestInstallPackages._add_packages_available
       get_provider.return_value = provider
       list_ambari_managed_repos_mock.return_value=[]
+      repo_file_name = 'ambari-hdp-1'
+      use_repos = { 'HDP-UTILS-1.1.0.20': repo_file_name, 'HDP-2.2': repo_file_name }
       self.executeScript("scripts/install_packages.py",
                          classname="InstallPackages",
                          command="actionexecute",
@@ -241,7 +245,7 @@ class TestInstallPackages(RMFTestCase):
                                 action=['create'],
                                 components=[u'HDP-UTILS', 'main'],
                                 repo_template='[{{repo_id}}]\nname={{repo_id}}\n{% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %}\n\npath=/\nenabled=1\ngpgcheck=0',
-                                repo_file_name=u'ambari-hdp-1',
+                                repo_file_name=repo_file_name,
                                 mirror_list=None,
                                 append_to_file=False,
                                 )
@@ -250,11 +254,11 @@ class TestInstallPackages(RMFTestCase):
                                 action=['create'],
                                 components=[u'HDP', 'main'],
                                 repo_template=u'[{{repo_id}}]\nname={{repo_id}}\n{% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %}\n\npath=/\nenabled=1\ngpgcheck=0',
-                                repo_file_name=u'ambari-hdp-1',
+                                repo_file_name=repo_file_name,
                                 mirror_list=None,
                                 append_to_file=True,
                                 )
-      self.assertResourceCalled('Package', 'hdp-select', action=["upgrade"], use_repos=['HDP-UTILS-1.1.0.20', 'HDP-2.2'], retry_count=5, retry_on_repo_unavailability=False)
+      self.assertResourceCalled('Package', 'hdp-select', action=["upgrade"], use_repos=use_repos, retry_count=5, retry_on_repo_unavailability=False)
       self.assertResourceCalled('Package', 'hadoop_2_2_0_1_885', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
       self.assertResourceCalled('Package', 'snappy', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
       self.assertResourceCalled('Package', 'snappy-devel', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
@@ -289,6 +293,8 @@ class TestInstallPackages(RMFTestCase):
       get_provider.return_value = provider
       list_ambari_managed_repos_mock.return_value=["HDP-UTILS-2.2.0.1-885"]
       is_redhat_family_mock.return_value = True
+      repo_file_name = 'ambari-hdp-1'
+      use_repos = { 'HDP-UTILS-1.1.0.20': repo_file_name, 'HDP-2.2': repo_file_name }
       self.executeScript("scripts/install_packages.py",
                          classname="InstallPackages",
                          command="actionexecute",
@@ -306,7 +312,7 @@ class TestInstallPackages(RMFTestCase):
                                 action=['create'],
                                 components=[u'HDP-UTILS', 'main'],
                                 repo_template=u'[{{repo_id}}]\nname={{repo_id}}\n{% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %}\n\npath=/\nenabled=1\ngpgcheck=0',
-                                repo_file_name='ambari-hdp-1',
+                                repo_file_name=repo_file_name,
                                 mirror_list=None,
                                 append_to_file=False,
       )
@@ -315,11 +321,11 @@ class TestInstallPackages(RMFTestCase):
                                 action=['create'],
                                 components=[u'HDP', 'main'],
                                 repo_template=u'[{{repo_id}}]\nname={{repo_id}}\n{% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %}\n\npath=/\nenabled=1\ngpgcheck=0',
-                                repo_file_name=u'ambari-hdp-1',
+                                repo_file_name=repo_file_name,
                                 mirror_list=None,
                                 append_to_file=True,
       )
-      self.assertResourceCalled('Package', 'hdp-select', action=["upgrade"], use_repos=['HDP-UTILS-1.1.0.20', 'HDP-2.2'], retry_count=5, retry_on_repo_unavailability=False)
+      self.assertResourceCalled('Package', 'hdp-select', action=["upgrade"], use_repos=use_repos, retry_count=5, retry_on_repo_unavailability=False)
       self.assertResourceCalled('Package', 'hadoop_2_2_0_1_885', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
       self.assertResourceCalled('Package', 'snappy', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
       self.assertResourceCalled('Package', 'snappy-devel', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
@@ -434,6 +440,8 @@ class TestInstallPackages(RMFTestCase):
 
       get_provider.return_value = provider
       is_suse_family_mock.return_value = True
+      repo_file_name = 'ambari-hdp-1'
+      use_repos = { 'HDP-UTILS-1.1.0.20': repo_file_name, 'HDP-2.2': repo_file_name }
       self.executeScript("scripts/install_packages.py",
                          classname="InstallPackages",
                          command="actionexecute",
@@ -451,7 +459,7 @@ class TestInstallPackages(RMFTestCase):
                                 action=['create'],
                                 components=[u'HDP-UTILS', 'main'],
                                 repo_template=u'[{{repo_id}}]\nname={{repo_id}}\n{% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %}\n\npath=/\nenabled=1\ngpgcheck=0',
-                                repo_file_name=u'ambari-hdp-1',
+                                repo_file_name=repo_file_name,
                                 mirror_list=None,
                                 append_to_file=False,
                                 )
@@ -460,11 +468,11 @@ class TestInstallPackages(RMFTestCase):
                                 action=['create'],
                                 components=[u'HDP', 'main'],
                                 repo_template=u'[{{repo_id}}]\nname={{repo_id}}\n{% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %}\n\npath=/\nenabled=1\ngpgcheck=0',
-                                repo_file_name=u'ambari-hdp-1',
+                                repo_file_name=repo_file_name,
                                 mirror_list=None,
                                 append_to_file=True,
                                 )
-      self.assertResourceCalled('Package', 'hdp-select', action=["upgrade"], use_repos=['HDP-UTILS-1.1.0.20', 'HDP-2.2'], retry_count=5, retry_on_repo_unavailability=False)
+      self.assertResourceCalled('Package', 'hdp-select', action=["upgrade"], use_repos=use_repos, retry_count=5, retry_on_repo_unavailability=False)
       self.assertResourceCalled('Package', 'hadoop_2_2_0_1_885', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
       self.assertResourceCalled('Package', 'snappy', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
       self.assertResourceCalled('Package', 'snappy-devel', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
@@ -509,6 +517,8 @@ class TestInstallPackages(RMFTestCase):
 
       command_json['repositoryFile']['repoVersion'] = '2.2.0.1-990'
 
+      repo_file_name = 'ambari-hdp-4'
+      use_repos = { 'HDP-UTILS-1.1.0.20-repo-4': repo_file_name, 'HDP-2.2-repo-4': repo_file_name }
       self.executeScript("scripts/install_packages.py",
                          classname="InstallPackages",
                          command="actionexecute",
@@ -526,7 +536,7 @@ class TestInstallPackages(RMFTestCase):
                                 action=['create'],
                                 components=[u'HDP-UTILS', 'main'],
                                 repo_template=u'[{{repo_id}}]\nname={{repo_id}}\n{% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %}\n\npath=/\nenabled=1\ngpgcheck=0',
-                                repo_file_name=u'ambari-hdp-4',
+                                repo_file_name=repo_file_name,
                                 mirror_list=None,
                                 append_to_file=False,
                                 )
@@ -535,11 +545,11 @@ class TestInstallPackages(RMFTestCase):
                                 action=['create'],
                                 components=[u'HDP', 'main'],
                                 repo_template=u'[{{repo_id}}]\nname={{repo_id}}\n{% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %}\n\npath=/\nenabled=1\ngpgcheck=0',
-                                repo_file_name=u'ambari-hdp-4',
+                                repo_file_name=repo_file_name,
                                 mirror_list=None,
                                 append_to_file=True,
                                 )
-      self.assertResourceCalled('Package', 'hdp-select', action=["upgrade"], use_repos=['HDP-UTILS-1.1.0.20-repo-4', 'HDP-2.2-repo-4'], retry_count=5, retry_on_repo_unavailability=False)
+      self.assertResourceCalled('Package', 'hdp-select', action=["upgrade"], use_repos=use_repos, retry_count=5, retry_on_repo_unavailability=False)
       self.assertResourceCalled('Package', 'hadoop_2_2_0_1_885', action=["upgrade"], retry_count=5,
                                 retry_on_repo_unavailability=False)
       self.assertResourceCalled('Package', 'snappy', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
@@ -1146,6 +1156,8 @@ class TestInstallPackages(RMFTestCase):
       lookup_packages.side_effect = TestInstallPackages._add_packages_lookUpYum
       get_provider.return_value = provider
       list_ambari_managed_repos_mock.return_value=[]
+      repo_file_name = 'ambari-hdp-4'
+      use_repos = { 'HDP-UTILS-1.1.0.20-repo-4': repo_file_name, 'HDP-2.2-repo-4': repo_file_name }
       self.executeScript("scripts/install_packages.py",
                          classname="InstallPackages",
                          command="actionexecute",
@@ -1164,7 +1176,7 @@ class TestInstallPackages(RMFTestCase):
                                 action=['create'],
                                 components=[u'HDP-UTILS', 'main'],
                                 repo_template='[{{repo_id}}]\nname={{repo_id}}\n{% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %}\n\npath=/\nenabled=1\ngpgcheck=0',
-                                repo_file_name=u'ambari-hdp-4',
+                                repo_file_name=repo_file_name,
                                 mirror_list=None,
                                 append_to_file=False,
       )
@@ -1173,11 +1185,11 @@ class TestInstallPackages(RMFTestCase):
                                 action=['create'],
                                 components=[u'HDP', 'main'],
                                 repo_template='[{{repo_id}}]\nname={{repo_id}}\n{% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %}\n\npath=/\nenabled=1\ngpgcheck=0',
-                                repo_file_name=u'ambari-hdp-4',
+                                repo_file_name=repo_file_name,
                                 mirror_list=None,
                                 append_to_file=True,
       )
-      self.assertResourceCalled('Package', 'hdp-select', action=["upgrade"], use_repos=['HDP-UTILS-1.1.0.20-repo-4', 'HDP-2.2-repo-4'], retry_count=5, retry_on_repo_unavailability=False)
+      self.assertResourceCalled('Package', 'hdp-select', action=["upgrade"], use_repos=use_repos, retry_count=5, retry_on_repo_unavailability=False)
       self.assertResourceCalled('Package', 'hadoop_2_2_0_1_885', action=["upgrade"], retry_count=5,
                                 retry_on_repo_unavailability=False)
       self.assertResourceCalled('Package', 'snappy', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)


[49/50] [abbrv] ambari git commit: AMBARI-21776. Move druid version to druid 0.10.1 and drop TP flag. (Slim Bouguerra via Swapan Shridhar).

Posted by jl...@apache.org.
AMBARI-21776. Move druid version to druid 0.10.1 and drop TP flag. (Slim Bouguerra via Swapan Shridhar).


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e61556cc
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e61556cc
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e61556cc

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: e61556cc28c593ce9ab58274302b5de4982601d1
Parents: 2fb4649
Author: Swapan Shridhar <ss...@hortonworks.com>
Authored: Fri Oct 6 11:05:04 2017 -0700
Committer: Swapan Shridhar <ss...@hortonworks.com>
Committed: Fri Oct 6 11:05:04 2017 -0700

----------------------------------------------------------------------
 .../DRUID/0.10.1/configuration/druid-broker.xml | 106 +++++++
 .../DRUID/0.10.1/configuration/druid-common.xml | 270 ++++++++++++++++
 .../0.10.1/configuration/druid-coordinator.xml  |  43 +++
 .../DRUID/0.10.1/configuration/druid-env.xml    | 248 +++++++++++++++
 .../0.10.1/configuration/druid-historical.xml   |  94 ++++++
 .../DRUID/0.10.1/configuration/druid-log4j.xml  |  84 +++++
 .../0.10.1/configuration/druid-logrotate.xml    |  68 ++++
 .../configuration/druid-middlemanager.xml       | 122 ++++++++
 .../0.10.1/configuration/druid-overlord.xml     |  52 ++++
 .../DRUID/0.10.1/configuration/druid-router.xml |  59 ++++
 .../common-services/DRUID/0.10.1/metainfo.xml   | 223 ++++++++++++++
 .../DRUID/0.10.1/package/scripts/broker.py      |  28 ++
 .../DRUID/0.10.1/package/scripts/coordinator.py |  28 ++
 .../DRUID/0.10.1/package/scripts/druid.py       | 307 +++++++++++++++++++
 .../DRUID/0.10.1/package/scripts/druid_node.py  | 114 +++++++
 .../DRUID/0.10.1/package/scripts/historical.py  |  28 ++
 .../0.10.1/package/scripts/middlemanager.py     |  28 ++
 .../DRUID/0.10.1/package/scripts/overlord.py    |  28 ++
 .../DRUID/0.10.1/package/scripts/params.py      | 200 ++++++++++++
 .../DRUID/0.10.1/package/scripts/router.py      |  28 ++
 .../0.10.1/package/scripts/service_check.py     |  44 +++
 .../0.10.1/package/scripts/status_params.py     |  24 ++
 .../DRUID/0.10.1/quicklinks/quicklinks.json     |  37 +++
 .../DRUID/0.10.1/role_command_order.json        |  17 +
 .../DRUID/0.10.1/themes/theme.json              | 120 ++++++++
 .../DRUID/0.9.2/configuration/druid-broker.xml  | 106 -------
 .../DRUID/0.9.2/configuration/druid-common.xml  | 270 ----------------
 .../0.9.2/configuration/druid-coordinator.xml   |  43 ---
 .../DRUID/0.9.2/configuration/druid-env.xml     | 248 ---------------
 .../0.9.2/configuration/druid-historical.xml    |  94 ------
 .../DRUID/0.9.2/configuration/druid-log4j.xml   |  84 -----
 .../0.9.2/configuration/druid-logrotate.xml     |  68 ----
 .../0.9.2/configuration/druid-middlemanager.xml | 122 --------
 .../0.9.2/configuration/druid-overlord.xml      |  52 ----
 .../DRUID/0.9.2/configuration/druid-router.xml  |  59 ----
 .../common-services/DRUID/0.9.2/metainfo.xml    | 223 --------------
 .../DRUID/0.9.2/package/scripts/broker.py       |  28 --
 .../DRUID/0.9.2/package/scripts/coordinator.py  |  28 --
 .../DRUID/0.9.2/package/scripts/druid.py        | 307 -------------------
 .../DRUID/0.9.2/package/scripts/druid_node.py   | 114 -------
 .../DRUID/0.9.2/package/scripts/historical.py   |  28 --
 .../0.9.2/package/scripts/middlemanager.py      |  28 --
 .../DRUID/0.9.2/package/scripts/overlord.py     |  28 --
 .../DRUID/0.9.2/package/scripts/params.py       | 200 ------------
 .../DRUID/0.9.2/package/scripts/router.py       |  28 --
 .../0.9.2/package/scripts/service_check.py      |  44 ---
 .../0.9.2/package/scripts/status_params.py      |  24 --
 .../DRUID/0.9.2/quicklinks/quicklinks.json      |  37 ---
 .../DRUID/0.9.2/role_command_order.json         |  17 -
 .../DRUID/0.9.2/themes/theme.json               | 120 --------
 .../stacks/HDP/2.6/services/DRUID/metainfo.xml  |   5 +-
 .../test/python/stacks/2.6/DRUID/test_druid.py  |   2 +-
 52 files changed, 2403 insertions(+), 2404 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-broker.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-broker.xml b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-broker.xml
new file mode 100644
index 0000000..6146ca3
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-broker.xml
@@ -0,0 +1,106 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>druid.service</name>
+    <value>druid/broker</value>
+    <description>The druid.service name of broker node.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.port</name>
+    <value>8082</value>
+    <description>The port on which the broker will accept connections.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.broker.http.numConnections</name>
+    <value>20</value>
+    <description>Size of connection pool for the Broker to connect to historical and real-time nodes. If there are more
+      queries than this number that all need to speak to the same node, then they will queue up.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.server.http.numThreads</name>
+    <value>50</value>
+    <description>Number of threads for HTTP requests.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.processing.buffer.sizeBytes</name>
+    <value>1073741824</value>
+    <value-attributes>
+      <type>long</type>
+      <minimum>0</minimum>
+      <unit>Bytes</unit>
+    </value-attributes>
+    <description>This specifies a buffer size for the storage of intermediate results. The computation engine in both
+      the Historical and Realtime nodes will use a scratch buffer of this size to do all of their intermediate
+      computations
+      off-heap. Larger values allow for more aggregations in a single pass over the data while smaller values can
+      require more passes depending on the query that is being executed.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.processing.numThreads</name>
+    <value>2</value>
+    <description>The number of processing threads to have available for parallel processing of segments.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.processing.numMergeBuffers</name>
+    <value>2</value>
+    <description>The number of direct memory buffers available for merging query results. The buffers are sized by druid.processing.buffer.sizeBytes.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.broker.cache.useCache</name>
+    <value>true</value>
+    <description>Enable the cache on the broker.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.broker.cache.populateCache</name>
+    <value>true</value>
+    <description>Populate the cache on the broker.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.cache.type</name>
+    <value>local</value>
+    <description>The type of cache to use for queries.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.cache.sizeInBytes</name>
+    <value>10000000</value>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <unit>Bytes</unit>
+    </value-attributes>
+    <description>Maximum cache size in bytes. Zero disables caching.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-common.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-common.xml b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-common.xml
new file mode 100644
index 0000000..d3b53cd
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-common.xml
@@ -0,0 +1,270 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>druid.extensions.pullList</name>
+    <value>[]</value>
+    <description>A comma-separated list of one or more druid extensions to download from maven.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.extensions.repositoryList</name>
+    <value>[]</value>
+    <description>A comma-separated list of maven repositories to download extensions.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.extensions.loadList</name>
+    <value>["druid-datasketches"]
+    </value>
+    <depends-on>
+      <property>
+        <type>druid-common</type>
+        <name>druid.metadata.storage.type</name>
+      </property>
+    </depends-on>
+    <description>A comma-separated list of one or more druid extensions to load.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.security.extensions.loadList</name>
+    <value>[]</value>
+    <description>A comma-separated list of one or more druid security extensions to load. This property will be set via the kerberos wizard and User will not be allowed to modify this when security is enabled.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.zk.service.host</name>
+    <value>localhost:2181</value>
+    <description>
+      zookeeper connection string.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.zk.paths.base</name>
+    <value>/druid</value>
+    <description>
+      Base Zookeeper path
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.discovery.curator.path</name>
+    <value>/druid/discovery</value>
+    <description>
+      Services announce themselves under this ZooKeeper path.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.storage.type</name>
+    <value></value>
+    <description>
+      Choices:local, noop, s3, hdfs, c*. The type of deep storage to use.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.storage.storageDirectory</name>
+    <value></value>
+    <description>
+      directory to use as deep storage.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.metadata.storage.connector.password</name>
+    <value></value>
+    <property-type>PASSWORD</property-type>
+    <display-name>Metadata storage password</display-name>
+    <description>Password for the metadata storage data base.</description>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.metadata.storage.connector.user</name>
+    <value>druid</value>
+    <display-name>Metadata storage user</display-name>
+    <description>Metadata storage user</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.metadata.storage.connector.port</name>
+    <value>1527</value>
+    <display-name>Metadata storage port</display-name>
+    <description>Metadata storage port</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>druid-common</type>
+        <name>druid.metadata.storage.type</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>database_name</name>
+    <value>druid</value>
+    <display-name>Druid Metadata storage database name</display-name>
+    <description>Druid Metadata storage database name</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>metastore_hostname</name>
+    <value>localhost</value>
+    <display-name>Metadata storage hostname</display-name>
+    <description>Metadata storage hostname</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property require-input="true">
+    <name>druid.metadata.storage.type</name>
+    <display-name>Druid Metadata storage type</display-name>
+    <value>derby</value>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>mysql</value>
+          <label>MYSQL</label>
+        </entry>
+        <entry>
+          <value>derby</value>
+          <label>DERBY</label>
+        </entry>
+        <entry>
+          <value>postgresql</value>
+          <label>POSTGRESQL</label>
+        </entry>
+      </entries>
+    </value-attributes>
+    <description>Type of the metadata storage. Note that derby will work only if all the druid node are located
+      within the same node. Use mysql or postgres for distributed mode.
+      mysql installed by ambari is only for development and not suitable for production use cases due to it being not HA
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property require-input="true">
+    <name>druid.metadata.storage.connector.connectURI</name>
+    <value>jdbc:derby://localhost:1527/druid;create=true</value>
+    <display-name>Metadata storage connector url</display-name>
+    <description>Metadata storage connector url</description>
+    <on-ambari-upgrade add="false"/>
+    <depends-on>
+      <property>
+        <type>druid-common</type>
+        <name>database_name</name>
+      </property>
+      <property>
+        <type>druid-common</type>
+        <name>metastore_hostname</name>
+      </property>
+      <property>
+        <type>druid-common</type>
+        <name>druid.metadata.storage.type</name>
+      </property>
+      <property>
+        <type>druid-common</type>
+        <name>druid.metadata.storage.connector.port</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>druid.hadoop.security.kerberos.principal</name>
+    <display-name>kerberos principal</display-name>
+    <description>Kerberos principal e.g druid@EXAMPLE.COM</description>
+    <property-type>KERBEROS_PRINCIPAL</property-type>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.hadoop.security.kerberos.keytab</name>
+    <display-name>Kerberos keytab location</display-name>
+    <description>Kerberos keytab location</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>druid.emitter</name>
+    <value>{{metric_emitter_type}}</value>
+    <description>Emitter used to emit metrics. Values - "noop", "logging", "ambari-metrics", or "http" will initialize
+      one of the emitter modules.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.emitter.ambari-metrics.hostname</name>
+    <value>{{metric_collector_host}}</value>
+    <description>Timeline host</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.emitter.ambari-metrics.port</name>
+    <value>{{metric_collector_port}}</value>
+    <description>Timeline port</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.emitter.ambari-metrics.protocol</name>
+    <value>{{metric_collector_protocol}}</value>
+    <description>Timeline protocol(http or https)</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.emitter.ambari-metrics.trustStorePath</name>
+    <value>{{metric_truststore_path}}</value>
+    <description>Location of the trust store file.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.emitter.ambari-metrics.trustStoreType</name>
+    <value>{{metric_truststore_type}}</value>
+    <description>Optional. Default value is "jks".</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.emitter.ambari-metrics.trustStorePassword</name>
+    <value>{{metric_truststore_password}}</value>
+    <description>Password to open the trust store file.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.emitter.ambari-metrics.eventConverter</name>
+    <value>{"type":"whiteList"}</value>
+    <description>Password to open the trust store file.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.monitoring.monitors</name>
+    <value>["com.metamx.metrics.JvmMonitor"]</value>
+    <description>Password to open the trust store file.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-coordinator.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-coordinator.xml b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-coordinator.xml
new file mode 100644
index 0000000..618f11d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-coordinator.xml
@@ -0,0 +1,43 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>druid.service</name>
+    <value>druid/coordinator</value>
+    <description>The druid.service name of coordinator node.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.port</name>
+    <value>8081</value>
+    <description>The port on which the coordinator will accept connections.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.coordinator.merge.on</name>
+    <value>false</value>
+    <description>Boolean flag for whether or not the coordinator should try and merge small segments into a more optimal
+      segment size.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-env.xml b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-env.xml
new file mode 100644
index 0000000..2e96f6a
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-env.xml
@@ -0,0 +1,248 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <!--Heap Settings -->
+  <property>
+    <name>druid.broker.jvm.heap.memory</name>
+    <value>2048</value>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>65536</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.coordinator.jvm.heap.memory</name>
+    <value>512</value>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>65536</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.middlemanager.jvm.heap.memory</name>
+    <value>256</value>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>65536</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.historical.jvm.heap.memory</name>
+    <value>2048</value>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>65536</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.overlord.jvm.heap.memory</name>
+    <value>512</value>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>65536</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.router.jvm.heap.memory</name>
+    <value>512</value>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>65536</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- DirectMemorySettings -->
+  <property>
+    <name>druid.broker.jvm.direct.memory</name>
+    <value>1048576</value>
+    <depends-on>
+      <property>
+        <type>druid-broker</type>
+        <name>druid.processing.buffer.sizeBytes</name>
+      </property>
+      <property>
+        <type>druid-broker</type>
+        <name>druid.processing.numThreads</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.coordinator.jvm.direct.memory</name>
+    <value>1048576</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.middlemanager.jvm.direct.memory</name>
+    <value>1048576</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.historical.jvm.direct.memory</name>
+    <value>1048576</value>
+    <depends-on>
+      <property>
+        <type>druid-historical</type>
+        <name>druid.processing.buffer.sizeBytes</name>
+      </property>
+      <property>
+        <type>druid-historical</type>
+        <name>druid.processing.numThreads</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.overlord.jvm.direct.memory</name>
+    <value>1048576</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.router.jvm.direct.memory</name>
+    <value>1048576</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- JavaOpts Tune GC related configs here-->
+  <property>
+    <name>druid.broker.jvm.opts</name>
+    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.coordinator.jvm.opts</name>
+    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.middlemanager.jvm.opts</name>
+    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.historical.jvm.opts</name>
+    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.overlord.jvm.opts</name>
+    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.router.jvm.opts</name>
+    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid_user</name>
+    <display-name>Druid User</display-name>
+    <value>druid</value>
+    <property-type>USER</property-type>
+    <description></description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+      <user-groups>
+        <property>
+          <type>cluster-env</type>
+          <name>user_group</name>
+        </property>
+      </user-groups>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid_log_dir</name>
+    <value>/var/log/druid</value>
+    <description></description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid_pid_dir</name>
+    <value>/var/run/druid</value>
+    <display-name>Druid PID dir</display-name>
+    <description></description>
+    <value-attributes>
+      <type>directory</type>
+      <editable-only-at-install>true</editable-only-at-install>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- druid-env.sh -->
+  <property>
+    <name>content</name>
+    <display-name>druid-env template</display-name>
+    <description>This is simple template for druid-env.sh file</description>
+    <value>
+      #!/bin/bash
+
+      # Set DRUID specific environment variables here.
+
+      # The java implementation to use.
+      export JAVA_HOME={{java8_home}}
+      export PATH=$JAVA_HOME/bin:$PATH
+      export DRUID_PID_DIR={{druid_pid_dir}}
+      export DRUID_LOG_DIR={{druid_log_dir}}
+      export DRUID_CONF_DIR={{druid_conf_dir}}
+      export DRUID_LIB_DIR={{druid_home}}/lib
+      export HADOOP_CONF_DIR={{hadoop_conf_dir}}
+
+    </value>
+    <value-attributes>
+      <type>content</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-historical.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-historical.xml b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-historical.xml
new file mode 100644
index 0000000..5ff30ce
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-historical.xml
@@ -0,0 +1,94 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>druid.service</name>
+    <value>druid/historical</value>
+    <description>The druid.service name of historical node.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.port</name>
+    <value>8083</value>
+    <description>The port on which the historical nodes will accept connections.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.server.http.numThreads</name>
+    <value>50</value>
+    <description>Number of threads for HTTP requests.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.processing.numMergeBuffers</name>
+    <value>2</value>
+    <description>The number of direct memory buffers available for merging query results. The buffers are sized by druid.processing.buffer.sizeBytes.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.processing.buffer.sizeBytes</name>
+    <value>1073741824</value>
+    <value-attributes>
+      <type>long</type>
+      <minimum>0</minimum>
+      <unit>Bytes</unit>
+    </value-attributes>
+    <description>This specifies a buffer size for the storage of intermediate results. The computation engine in both
+      the Historical and Realtime nodes will use a scratch buffer of this size to do all of their intermediate
+      computations off-heap. Larger values allow for more aggregations in a single pass over the data while smaller
+      values can require more passes depending on the query that is being executed.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.processing.numThreads</name>
+    <value>10</value>
+    <description>The number of processing threads to have available for parallel processing of segments.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.segmentCache.locations</name>
+    <value>[{"path":"/apps/druid/segmentCache","maxSize":300000000000}]</value>
+    <description>Segments assigned to a Historical node are first stored on the local file system (in a disk cache) and
+      then served by the Historical node. These locations define where that local cache resides.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.server.maxSize</name>
+    <value>300000000000</value>
+    <description>The maximum number of bytes-worth of segments that the node wants assigned to it. This is not a limit
+      that Historical nodes actually enforces, just a value published to the Coordinator node so it can plan
+      accordingly.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.segmentCache.infoDir</name>
+    <value>/apps/druid/segmentCache/info_dir</value>
+    <description>Historical nodes keep track of the segments they are serving so that when the process is restarted they
+      can reload the same segments without waiting for the Coordinator to reassign. This path defines where this
+      metadata is kept. Directory will be created if needed.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-log4j.xml b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-log4j.xml
new file mode 100644
index 0000000..bcb731a
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-log4j.xml
@@ -0,0 +1,84 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+  <property>
+    <name>druid_log_level</name>
+    <value>info</value>
+    <description>Log level for io.druid logging</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>root_log_level</name>
+    <value>WARN</value>
+    <description>Log level for root logging</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>metamx_log_level</name>
+    <value>info</value>
+    <description>Log level for com.metamxlogging</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>content</name>
+    <display-name>druid-log4j template</display-name>
+    <description>Custom log4j.properties</description>
+    <value><![CDATA[<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+    <Configuration>
+        <Appenders>
+            <Console name="Console" target="SYSTEM_OUT">
+                <PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/>
+            </Console>
+        </Appenders>
+        <Loggers>
+            <Logger name="com.metamx" level="{{metamx_log_level}}"/>
+            <Logger name="io.druid" level="{{druid_log_level}}"/>
+            <Root level="{{root_log_level}}">
+                <AppenderRef ref="Console"/>
+            </Root>
+        </Loggers>
+    </Configuration>
+      ]]></value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-logrotate.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-logrotate.xml b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-logrotate.xml
new file mode 100644
index 0000000..b7308ce
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-logrotate.xml
@@ -0,0 +1,68 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+  <property>
+    <name>druid_log_maxfilesize</name>
+    <value>256</value>
+    <description>The maximum size of backup file before the log is rotated</description>
+    <display-name>Druid Log: backup file size</display-name>
+    <value-attributes>
+      <unit>MB</unit>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid_log_maxbackupindex</name>
+    <value>7</value>
+    <description>The number of backup files</description>
+    <display-name>Druid Log: # of backup files</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>content</name>
+    <display-name>druid logrotate template</display-name>
+    <description>Custom logrotate file</description>
+    <value><![CDATA[
+    {{druid_log_dir}}/*.log {
+        copytruncate
+        rotate {{druid_log_maxbackupindex}}
+        daily
+        nocompress
+        missingok
+        notifempty
+        create 660 druid users
+        dateext
+        dateformat -%Y-%m-%d-%s
+        size {{druid_log_maxfilesize}}M
+        }
+      ]]></value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-middlemanager.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-middlemanager.xml b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-middlemanager.xml
new file mode 100644
index 0000000..08280ad
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-middlemanager.xml
@@ -0,0 +1,122 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>druid.service</name>
+    <value>druid/middlemanager</value>
+    <description>The druid.service name of middlemanager node.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.port</name>
+    <value>8091</value>
+    <description>The port on which the middlemanager nodes will accept connections.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.indexer.runner.startPort</name>
+    <value>8100</value>
+    <description>The port that peons begin running on.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.worker.capacity</name>
+    <value>3</value>
+    <description>
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.indexer.runner.javaOpts</name>
+    <value>-server -Xmx2g -Duser.timezone=UTC -Dfile.encoding=UTF-8 -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager -Dhdp.version={{stack_version}} -Dhadoop.mapreduce.job.classloader=true</value>
+    <description>
+      A string of -X Java options to pass to the peon's JVM.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.indexer.task.baseTaskDir</name>
+    <value>/apps/druid/tasks</value>
+    <description>
+      Base temporary working directory for druid tasks.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.server.http.numThreads</name>
+    <value>50</value>
+    <description>
+      Number of threads for HTTP requests.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.processing.buffer.sizeBytes</name>
+    <value>256000000</value>
+    <value-attributes>
+      <type>long</type>
+      <minimum>0</minimum>
+      <unit>Bytes</unit>
+    </value-attributes>
+    <description>
+      This specifies a buffer size for the storage of intermediate results. The computation engine in both the
+      Historical and Realtime nodes will use a scratch buffer of this size to do all of their intermediate computations
+      off-heap. Larger values allow for more aggregations in a single pass over the data while smaller values can
+      require more passes depending on the query that is being executed.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.processing.numThreads</name>
+    <value>2</value>
+    <description>
+      The number of processing threads to have available for parallel processing of segments.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.indexer.task.hadoopWorkingPath</name>
+    <value>/tmp/druid-indexing</value>
+    <description>
+      Temporary working directory for Hadoop tasks
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.indexer.fork.property.hadoop.mapreduce.reduce.java.opts</name>
+    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
+    <description>
+      Default java properties from Reducer containers
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>druid.indexer.fork.property.hadoop.mapreduce.map.java.opts</name>
+    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
+    <description>
+      Default java properties from Map containers
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-overlord.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-overlord.xml b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-overlord.xml
new file mode 100644
index 0000000..57d1c63
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-overlord.xml
@@ -0,0 +1,52 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>druid.service</name>
+    <value>druid/overlord</value>
+    <description>The druid.service name of overlord node.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.port</name>
+    <value>8090</value>
+    <description>The port on which the overlord will accept connections.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.indexer.runner.type</name>
+    <value>remote</value>
+    <description>Choices "local" or "remote". Indicates whether tasks should be run locally or in a distributed
+      environment.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.indexer.storage.type</name>
+    <value>metadata</value>
+    <description>Choices are "local" or "metadata". Indicates whether incoming tasks should be stored locally (in heap)
+      or in metadata storage. Storing incoming tasks in metadata storage allows for tasks to be resumed if the overlord
+      should fail.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-router.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-router.xml b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-router.xml
new file mode 100644
index 0000000..d544315
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/configuration/druid-router.xml
@@ -0,0 +1,59 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>druid.service</name>
+    <value>druid/router</value>
+    <description>The druid.service name of router node.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.port</name>
+    <value>8888</value>
+    <description>The port on which the broker will accept connections.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.router.http.numConnections</name>
+    <value>20</value>
+    <description>
+      Size of connection pool for the router to connect to historical and real-time nodes. If there are more
+      queries than this number that all need to speak to the same node, then they will queue up.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.server.http.numThreads</name>
+    <value>50</value>
+    <description>Number of threads for HTTP requests.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.router.tierToBrokerMap</name>
+    <value>{"_default_tier":"druid/broker"}</value>
+    <description>
+      Used to route queries for a certain tier of data to their appropriate broker. An ordered JSON map of
+      tiers to broker names. The priority of brokers is based on the ordering.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/metainfo.xml b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/metainfo.xml
new file mode 100644
index 0000000..53dee2b
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/metainfo.xml
@@ -0,0 +1,223 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>DRUID</name>
+      <displayName>Druid</displayName>
+      <comment>A fast column-oriented distributed data store.</comment>
+      <version>0.10.1</version>
+      <components>
+        <component>
+          <name>DRUID_COORDINATOR</name>
+          <displayName>Druid Coordinator</displayName>
+          <category>MASTER</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/coordinator.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <configuration-dependencies>
+            <config-type>druid-coordinator</config-type>
+          </configuration-dependencies>
+        </component>
+        <component>
+          <name>DRUID_OVERLORD</name>
+          <displayName>Druid Overlord</displayName>
+          <category>MASTER</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/overlord.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <configuration-dependencies>
+            <config-type>druid-overlord</config-type>
+          </configuration-dependencies>
+        </component>
+        <component>
+          <name>DRUID_HISTORICAL</name>
+          <displayName>Druid Historical</displayName>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/historical.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <configuration-dependencies>
+            <config-type>druid-historical</config-type>
+          </configuration-dependencies>
+        </component>
+        <component>
+          <name>DRUID_BROKER</name>
+          <displayName>Druid Broker</displayName>
+          <category>MASTER</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/broker.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <configuration-dependencies>
+            <config-type>druid-broker</config-type>
+          </configuration-dependencies>
+        </component>
+        <component>
+          <name>DRUID_MIDDLEMANAGER</name>
+          <displayName>Druid MiddleManager</displayName>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/middlemanager.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <configuration-dependencies>
+            <config-type>druid-middlemanager</config-type>
+          </configuration-dependencies>
+        </component>
+        <component>
+          <name>DRUID_ROUTER</name>
+          <displayName>Druid Router</displayName>
+          <category>MASTER</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/router.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <configuration-dependencies>
+            <config-type>druid-router</config-type>
+          </configuration-dependencies>
+        </component>
+      </components>
+
+      <themes>
+        <theme>
+          <fileName>theme.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat7,amazon2015,redhat6,suse11,suse12</osFamily>
+          <packages>
+            <package>
+              <name>druid_${stack_version}</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>debian7,ubuntu12,ubuntu14,ubuntu16</osFamily>
+          <packages>
+            <package>
+              <name>druid-${stack_version}</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+      <requiredServices>
+        <service>ZOOKEEPER</service>
+      </requiredServices>
+      <configuration-dependencies>
+        <config-type>druid-common</config-type>
+        <config-type>druid-env</config-type>
+        <config-type>druid-log4j</config-type>
+        <config-type>druid-logrotate</config-type>
+        <config-type>zoo.cfg</config-type>
+      </configuration-dependencies>
+
+      <quickLinksConfigurations>
+        <quickLinksConfiguration>
+          <fileName>quicklinks.json</fileName>
+          <default>true</default>
+        </quickLinksConfiguration>
+      </quickLinksConfigurations>
+
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/broker.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/broker.py b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/broker.py
new file mode 100644
index 0000000..bd170cb
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/broker.py
@@ -0,0 +1,28 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from druid_node import DruidBase
+
+
+class DruidBroker(DruidBase):
+  def __init__(self):
+    DruidBase.__init__(self, nodeType="broker")
+
+
+if __name__ == "__main__":
+  DruidBroker().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/coordinator.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/coordinator.py b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/coordinator.py
new file mode 100644
index 0000000..a86fa40
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/coordinator.py
@@ -0,0 +1,28 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from druid_node import DruidBase
+
+
+class DruidCoordinator(DruidBase):
+  def __init__(self):
+    DruidBase.__init__(self, nodeType="coordinator")
+
+
+if __name__ == "__main__":
+  DruidCoordinator().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/druid.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/druid.py b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/druid.py
new file mode 100644
index 0000000..ec98c3c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/druid.py
@@ -0,0 +1,307 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import json
+import os
+from resource_management import Fail
+from resource_management.libraries.resources.properties_file import PropertiesFile
+from resource_management.core.resources.system import Directory, Execute, File
+from resource_management.core.source import DownloadSource
+from resource_management.core.source import InlineTemplate
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.show_logs import show_logs
+from resource_management.core.logger import Logger
+
+
+def druid(upgrade_type=None, nodeType=None):
+  import params
+  ensure_base_directories()
+
+  # Environment Variables
+  File(format("{params.druid_conf_dir}/druid-env.sh"),
+       owner=params.druid_user,
+       content=InlineTemplate(params.druid_env_sh_template),
+       mode = 0700
+       )
+
+  # common config
+  druid_common_config = mutable_config_dict(params.config['configurations']['druid-common'])
+  # User cannot override below configs
+  druid_common_config['druid.host'] = params.hostname
+  druid_common_config['druid.extensions.directory'] = params.druid_extensions_dir
+  druid_common_config['druid.extensions.hadoopDependenciesDir'] = params.druid_hadoop_dependencies_dir
+  druid_common_config['druid.selectors.indexing.serviceName'] = params.config['configurations']['druid-overlord'][
+    'druid.service']
+  druid_common_config['druid.selectors.coordinator.serviceName'] = \
+    params.config['configurations']['druid-coordinator']['druid.service']
+  druid_common_config['druid.extensions.loadList'] = json.dumps(eval(params.druid_extensions_load_list) +
+                                                     eval(params.druid_security_extensions_load_list))
+
+  # delete the password and user if empty otherwiswe derby will fail.
+  if 'derby' == druid_common_config['druid.metadata.storage.type']:
+    del druid_common_config['druid.metadata.storage.connector.user']
+    del druid_common_config['druid.metadata.storage.connector.password']
+
+  druid_env_config = mutable_config_dict(params.config['configurations']['druid-env'])
+
+  PropertiesFile("common.runtime.properties",
+                 dir=params.druid_common_conf_dir,
+                 properties=druid_common_config,
+                 owner=params.druid_user,
+                 group=params.user_group,
+                 mode = 0600
+                 )
+  Logger.info("Created common.runtime.properties")
+
+  File(format("{params.druid_common_conf_dir}/druid-log4j.xml"),
+       mode=0644,
+       owner=params.druid_user,
+       group=params.user_group,
+       content=InlineTemplate(params.log4j_props)
+       )
+  Logger.info("Created log4j file")
+
+  File("/etc/logrotate.d/druid",
+       mode=0644,
+       owner='root',
+       group='root',
+       content=InlineTemplate(params.logrotate_props)
+       )
+
+  Logger.info("Created log rotate file")
+
+  # node specific configs
+  for node_type in ['coordinator', 'overlord', 'historical', 'broker', 'middleManager', 'router']:
+    node_config_dir = format('{params.druid_conf_dir}/{node_type}')
+    node_type_lowercase = node_type.lower()
+
+    # Write runtime.properties file
+    node_config = mutable_config_dict(params.config['configurations'][format('druid-{node_type_lowercase}')])
+    PropertiesFile("runtime.properties",
+                   dir=node_config_dir,
+                   properties=node_config,
+                   owner=params.druid_user,
+                   group=params.user_group,
+                   mode = 0600
+                   )
+    Logger.info(format("Created druid-{node_type_lowercase} runtime.properties"))
+
+    # Write jvm configs
+    File(format('{node_config_dir}/jvm.config'),
+         owner=params.druid_user,
+         group=params.user_group,
+         content=InlineTemplate(
+           "-server \n-Xms{{node_heap_memory}}m \n-Xmx{{node_heap_memory}}m \n-XX:MaxDirectMemorySize={{node_direct_memory}}m \n-Dlog4j.configurationFile={{log4j_config_file}} \n-Dlog4j.debug \n{{node_jvm_opts}}",
+           node_heap_memory=druid_env_config[format('druid.{node_type_lowercase}.jvm.heap.memory')],
+           log4j_config_file=format("{params.druid_common_conf_dir}/druid-log4j.xml"),
+           node_direct_memory=druid_env_config[
+             format('druid.{node_type_lowercase}.jvm.direct.memory')],
+           node_jvm_opts=druid_env_config[format('druid.{node_type_lowercase}.jvm.opts')])
+         )
+    Logger.info(format("Created druid-{node_type_lowercase} jvm.config"))
+    # Handling hadoop Lzo jars if enable and node type is hadoop related eg Overlords and MMs
+    if ['middleManager', 'overlord'].__contains__(node_type_lowercase) and params.lzo_enabled and len(
+            params.lzo_packages) > 0:
+        try:
+            Logger.info(
+                format(
+                    "Copying hadoop lzo jars from {hadoop_lib_home} to {druid_hadoop_dependencies_dir}/hadoop-client/*/"))
+            Execute(
+                format('{sudo} cp {hadoop_lib_home}/hadoop-lzo*.jar {druid_hadoop_dependencies_dir}/hadoop-client/*/'))
+        except Fail as ex:
+            Logger.info(format("No Hadoop LZO found at {hadoop_lib_home}/hadoop-lzo*.jar"))
+
+  # All druid nodes have dependency on hdfs_client
+  ensure_hadoop_directories()
+  download_database_connector_if_needed()
+  # Pull all required dependencies
+  pulldeps()
+
+
+def mutable_config_dict(config):
+  rv = {}
+  for key, value in config.iteritems():
+    rv[key] = value
+  return rv
+
+
+def ensure_hadoop_directories():
+  import params
+  if 'hdfs-site' not in params.config['configurations']:
+    # HDFS Not Installed nothing to do.
+    Logger.info("Skipping HDFS directory creation as HDFS not installed")
+    return
+
+  druid_common_config = params.config['configurations']['druid-common']
+  # final overlord config contains both common and overlord config
+  druid_middlemanager_config = params.config['configurations']['druid-middlemanager']
+
+  # If user is using HDFS as deep storage create HDFS Directory for storing segments
+  deep_storage = druid_common_config["druid.storage.type"]
+  storage_dir = druid_common_config["druid.storage.storageDirectory"]
+
+  if deep_storage == 'hdfs':
+    # create the home dir for druid
+    params.HdfsResource(format("/user/{params.druid_user}"),
+                        type="directory",
+                        action="create_on_execute",
+                        owner=params.druid_user,
+                        group='hadoop',
+                        recursive_chown=True,
+                        recursive_chmod=True
+                        )
+
+    # create the segment storage dir, users like hive from group hadoop need to write to this directory
+    create_hadoop_directory(storage_dir, mode=0775)
+
+  # Create HadoopIndexTask hadoopWorkingPath
+  hadoop_working_path = druid_middlemanager_config['druid.indexer.task.hadoopWorkingPath']
+  if hadoop_working_path is not None:
+    if hadoop_working_path.startswith(params.hdfs_tmp_dir):
+        params.HdfsResource(params.hdfs_tmp_dir,
+                            type="directory",
+                            action="create_on_execute",
+                            owner=params.hdfs_user,
+                            mode=0777,
+                            )
+    create_hadoop_directory(hadoop_working_path, mode=0775)
+
+  # If HDFS is used for storing logs, create Index Task log directory
+  indexer_logs_type = druid_common_config['druid.indexer.logs.type']
+  indexer_logs_directory = druid_common_config['druid.indexer.logs.directory']
+  if indexer_logs_type == 'hdfs' and indexer_logs_directory is not None:
+    create_hadoop_directory(indexer_logs_directory)
+
+
+def create_hadoop_directory(hadoop_dir, mode=0755):
+  import params
+  params.HdfsResource(hadoop_dir,
+                      type="directory",
+                      action="create_on_execute",
+                      owner=params.druid_user,
+                      group='hadoop',
+                      mode=mode
+                      )
+  Logger.info(format("Created Hadoop Directory [{hadoop_dir}], with mode [{mode}]"))
+
+
+def ensure_base_directories():
+  import params
+  Directory(
+    [params.druid_log_dir, params.druid_pid_dir],
+    mode=0755,
+    owner=params.druid_user,
+    group=params.user_group,
+    create_parents=True,
+    recursive_ownership=True,
+  )
+
+  Directory(
+    [params.druid_conf_dir, params.druid_common_conf_dir, params.druid_coordinator_conf_dir,
+     params.druid_broker_conf_dir, params.druid_middlemanager_conf_dir, params.druid_historical_conf_dir,
+     params.druid_overlord_conf_dir, params.druid_router_conf_dir, params.druid_segment_infoDir,
+     params.druid_tasks_dir],
+    mode=0700,
+    cd_access='a',
+    owner=params.druid_user,
+    group=params.user_group,
+    create_parents=True,
+    recursive_ownership=True,
+  )
+
+  segment_cache_locations = json.loads(params.druid_segment_cache_locations)
+  for segment_cache_location in segment_cache_locations:
+    Directory(
+      segment_cache_location["path"],
+      mode=0700,
+      owner=params.druid_user,
+      group=params.user_group,
+      create_parents=True,
+      recursive_ownership=True,
+      cd_access='a'
+    )
+
+
+
+def get_daemon_cmd(params=None, node_type=None, command=None):
+  return format('source {params.druid_conf_dir}/druid-env.sh ; {params.druid_home}/bin/node.sh {node_type} {command}')
+
+
+def getPid(params=None, nodeType=None):
+  return format('{params.druid_pid_dir}/{nodeType}.pid')
+
+
+def pulldeps():
+  import params
+  extensions_list = eval(params.druid_extensions)
+  extensions_string = '{0}'.format("-c ".join(extensions_list))
+  repository_list = eval(params.druid_repo_list)
+  repository_string = '{0}'.format("-r ".join(repository_list))
+  if len(extensions_list) > 0:
+    try:
+      # Make sure druid user has permissions to write dependencies
+      Directory(
+        [params.druid_extensions_dir, params.druid_hadoop_dependencies_dir],
+        mode=0755,
+        cd_access='a',
+        owner=params.druid_user,
+        group=params.user_group,
+        create_parents=True,
+        recursive_ownership=True,
+      )
+      pull_deps_command = format(
+        "source {params.druid_conf_dir}/druid-env.sh ; java -classpath '{params.druid_home}/lib/*' -Ddruid.extensions.loadList=[] "
+        "-Ddruid.extensions.directory={params.druid_extensions_dir} -Ddruid.extensions.hadoopDependenciesDir={params.druid_hadoop_dependencies_dir} "
+        "io.druid.cli.Main tools pull-deps -c {extensions_string} --no-default-hadoop")
+
+      if len(repository_list) > 0:
+        pull_deps_command = format("{pull_deps_command} -r {repository_string}")
+
+      Execute(pull_deps_command,
+              user=params.druid_user
+              )
+      Logger.info(format("Pull Dependencies Complete"))
+    except:
+      show_logs(params.druid_log_dir, params.druid_user)
+      raise
+
+
+def download_database_connector_if_needed():
+  """
+  Downloads the database connector to use when connecting to the metadata storage
+  """
+  import params
+  if params.metadata_storage_type != 'mysql' or not params.jdbc_driver_jar:
+    return
+
+  File(params.check_db_connection_jar,
+       content = DownloadSource(format("{jdk_location}{check_db_connection_jar_name}"))
+       )
+
+  target_jar_with_directory = params.connector_download_dir + os.path.sep + params.jdbc_driver_jar
+
+  if not os.path.exists(target_jar_with_directory):
+    File(params.downloaded_custom_connector,
+         content=DownloadSource(params.connector_curl_source))
+
+    Execute(('cp', '--remove-destination', params.downloaded_custom_connector, target_jar_with_directory),
+            path=["/bin", "/usr/bin/"],
+            sudo=True)
+
+    File(target_jar_with_directory, owner=params.druid_user,
+         group=params.user_group)

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/druid_node.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/druid_node.py b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/druid_node.py
new file mode 100644
index 0000000..8053dcb
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/druid_node.py
@@ -0,0 +1,114 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+from resource_management.core import sudo
+from resource_management import Script
+from resource_management.core.logger import Logger
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.functions.show_logs import show_logs
+from druid import druid, get_daemon_cmd, getPid
+
+
+class DruidBase(Script):
+  def __init__(self, nodeType=None):
+    self.nodeType = nodeType
+
+  def install(self, env):
+    self.install_packages(env)
+
+  def configure(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    druid(upgrade_type=upgrade_type, nodeType=self.nodeType)
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    node_type_lower = self.nodeType.lower()
+    Logger.info(format("Executing druid-{node_type_lower} Upgrade pre-restart"))
+    import params
+
+    env.set_params(params)
+
+    if params.stack_version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.stack_version):
+      stack_select.select_packages(params.stack_version)
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env, upgrade_type=upgrade_type)
+    daemon_cmd = get_daemon_cmd(params, self.nodeType, "start")
+    # Verify Database connection on Druid start
+    if params.metadata_storage_type == 'mysql':
+      if not params.jdbc_driver_jar or not os.path.isfile(params.connector_download_dir + os.path.sep + params.jdbc_driver_jar):
+        path_to_jdbc =  params.connector_download_dir + os.path.sep + "*"
+        error_message = "Error! Sorry, but we can't find jdbc driver for mysql.So, db connection check can fail." + \
+                        "Please run 'ambari-server setup --jdbc-db=mysql --jdbc-driver={path_to_jdbc} on server host.'"
+        Logger.error(error_message)
+      else:
+        path_to_jdbc = params.connector_download_dir + os.path.sep + params.jdbc_driver_jar
+      db_connection_check_command = format("{params.java8_home}/bin/java -cp {params.check_db_connection_jar}:{path_to_jdbc} org.apache.ambari.server.DBConnectionVerification '{params.metadata_storage_url}' {params.metadata_storage_user} {params.metadata_storage_password!p} com.mysql.jdbc.Driver")
+    else:
+      db_connection_check_command = None
+
+    if db_connection_check_command:
+      sudo.chmod(params.check_db_connection_jar, 0755)
+      Execute( db_connection_check_command,
+               tries=5,
+               try_sleep=10,
+               user=params.druid_user
+               )
+
+    try:
+      Execute(daemon_cmd,
+              user=params.druid_user
+              )
+    except:
+      show_logs(params.druid_log_dir, params.druid_user)
+      raise
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    daemon_cmd = get_daemon_cmd(params, self.nodeType, "stop")
+    try:
+      Execute(daemon_cmd,
+              user=params.druid_user
+              )
+    except:
+      show_logs(params.druid_log_dir, params.druid_user)
+      raise
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = getPid(status_params, self.nodeType)
+    check_process_status(pid_file)
+
+  def get_log_folder(self):
+    import params
+    return params.druid_log_dir
+
+  def get_user(self):
+    import params
+    return params.druid_user

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/historical.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/historical.py b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/historical.py
new file mode 100644
index 0000000..22390a6
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/historical.py
@@ -0,0 +1,28 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from druid_node import DruidBase
+
+
+class DruidHistorical(DruidBase):
+  def __init__(self):
+    DruidBase.__init__(self, nodeType="historical")
+
+
+if __name__ == "__main__":
+  DruidHistorical().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/middlemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/middlemanager.py b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/middlemanager.py
new file mode 100644
index 0000000..20df89c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/middlemanager.py
@@ -0,0 +1,28 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from druid_node import DruidBase
+
+
+class DruidMiddleManager(DruidBase):
+  def __init__(self):
+    DruidBase.__init__(self, nodeType="middleManager")
+
+
+if __name__ == "__main__":
+  DruidMiddleManager().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/overlord.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/overlord.py b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/overlord.py
new file mode 100644
index 0000000..e4d7fcc
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/overlord.py
@@ -0,0 +1,28 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from druid_node import DruidBase
+
+
+class DruidOverlord(DruidBase):
+  def __init__(self):
+    DruidBase.__init__(self, nodeType="overlord")
+
+
+if __name__ == "__main__":
+  DruidOverlord().execute()


[02/50] [abbrv] ambari git commit: AMBARI-22102 Ranger KMS should add proxy user for Spark2 user (mugdha)

Posted by jl...@apache.org.
AMBARI-22102 Ranger KMS should add proxy user for Spark2 user (mugdha)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/03696f16
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/03696f16
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/03696f16

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 03696f16607dc84723b608a66c476ea0706bf525
Parents: 969ecfc
Author: Mugdha Varadkar <mu...@apache.org>
Authored: Tue Oct 3 11:08:34 2017 +0530
Committer: Mugdha Varadkar <mu...@apache.org>
Committed: Tue Oct 3 11:29:00 2017 +0530

----------------------------------------------------------------------
 .../common-services/RANGER_KMS/1.0.0.3.0/service_advisor.py       | 3 ++-
 .../src/main/resources/stacks/HDP/2.5/services/stack_advisor.py   | 3 ++-
 2 files changed, 4 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/03696f16/ambari-server/src/main/resources/common-services/RANGER_KMS/1.0.0.3.0/service_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER_KMS/1.0.0.3.0/service_advisor.py b/ambari-server/src/main/resources/common-services/RANGER_KMS/1.0.0.3.0/service_advisor.py
index b81e05b..33f39b2 100644
--- a/ambari-server/src/main/resources/common-services/RANGER_KMS/1.0.0.3.0/service_advisor.py
+++ b/ambari-server/src/main/resources/common-services/RANGER_KMS/1.0.0.3.0/service_advisor.py
@@ -232,7 +232,8 @@ class RangerKMSRecommender(service_advisor.ServiceAdvisor):
   def recommendRangerKMSConfigurationsFromHDP25(self, configurations, clusterData, services, hosts):
 
     security_enabled = self.isSecurityEnabled(services)
-    required_services = [{'service' : 'RANGER', 'config-type': 'ranger-env', 'property-name': 'ranger_user', 'proxy-category': ['hosts', 'users', 'groups']}]
+    required_services = [{'service' : 'RANGER', 'config-type': 'ranger-env', 'property-name': 'ranger_user', 'proxy-category': ['hosts', 'users', 'groups']},
+                        {'service' : 'SPARK2', 'config-type': 'livy2-env', 'property-name': 'livy2_user', 'proxy-category': ['hosts', 'users', 'groups']}]
 
     if security_enabled:
       # recommendations for kms proxy related properties

http://git-wip-us.apache.org/repos/asf/ambari/blob/03696f16/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
index 2a5d76b..92ce9b9 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
@@ -1969,7 +1969,8 @@ yarn.scheduler.capacity.root.{0}.maximum-am-resource-percent=1""".format(llap_qu
     super(HDP25StackAdvisor, self).recommendRangerKMSConfigurations(configurations, clusterData, services, hosts)
 
     security_enabled = self.isSecurityEnabled(services)
-    required_services = [{'service' : 'RANGER', 'config-type': 'ranger-env', 'property-name': 'ranger_user', 'proxy-category': ['hosts', 'users', 'groups']}]
+    required_services = [{'service' : 'RANGER', 'config-type': 'ranger-env', 'property-name': 'ranger_user', 'proxy-category': ['hosts', 'users', 'groups']},
+    {'service' : 'SPARK2', 'config-type': 'livy2-env', 'property-name': 'livy2_user', 'proxy-category': ['hosts', 'users', 'groups']}]
 
     if security_enabled:
       # recommendations for kms proxy related properties


[44/50] [abbrv] ambari git commit: AMBARI-22139 - CURRENT cluster Shows Upgrade If Component Didn't Report Version (jonathanhurley)

Posted by jl...@apache.org.
AMBARI-22139 - CURRENT cluster Shows Upgrade If Component Didn't Report Version (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/8b83a0a5
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/8b83a0a5
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/8b83a0a5

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 8b83a0a538358e54c5aa7f3c4eccc4a458296f9a
Parents: a9d2698
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Thu Oct 5 16:06:06 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Fri Oct 6 10:38:29 2017 -0400

----------------------------------------------------------------------
 .../python/resource_management/TestScript.py    | 26 ++++++++-
 .../libraries/script/script.py                  | 22 ++++++--
 .../0.4.0/package/scripts/ranger_admin.py       | 13 ++---
 .../configs/ranger_admin_default.json           | 55 ++++++++++++++++++++
 .../src/test/python/stacks/utils/RMFTestCase.py |  9 ++--
 5 files changed, 110 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/8b83a0a5/ambari-agent/src/test/python/resource_management/TestScript.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/resource_management/TestScript.py b/ambari-agent/src/test/python/resource_management/TestScript.py
index 75726d6..79d0598 100644
--- a/ambari-agent/src/test/python/resource_management/TestScript.py
+++ b/ambari-agent/src/test/python/resource_management/TestScript.py
@@ -21,9 +21,9 @@ import StringIO
 import sys, pprint
 from resource_management.libraries.script import Script
 from resource_management.core.environment import Environment
+from resource_management.core.logger import Logger
 from mock.mock import patch, MagicMock
 from stacks.utils.RMFTestCase import *
-import logging
 
 class TestScript(RMFTestCase):
 
@@ -110,7 +110,7 @@ class TestScript(RMFTestCase):
   @patch("__builtin__.open")
   def test_status_commands_clear_structured_out(self, open_mock):
     """
-    Tests that status commands will clear and stored structured output from prior status commands.
+    Tests that status commands will clear any stored structured output from prior status commands.
     :param open_mock: 
     :return: 
     """
@@ -141,6 +141,28 @@ class TestScript(RMFTestCase):
     self.assertTrue(open_mock.called)
     self.assertEquals({}, Script.structuredOut)
 
+
+  @patch.object(Logger, "error", new = MagicMock())
+  @patch.object(Script, "put_structured_out")
+  @patch("resource_management.libraries.functions.version_select_util.get_component_version_from_symlink", new = MagicMock(return_value=None))
+  @patch("resource_management.libraries.functions.stack_select.get_package_name", new = MagicMock(return_value="foo-package"))
+  @patch("resource_management.libraries.functions.stack_select.unsafe_get_stack_versions", new = MagicMock(return_value=("",0,["2.6.0.0-1234"])))
+  def test_save_version_structured_out_stack_select(self, pso_mock):
+    """
+    Tests that when writing out the version of the component to the structure output,
+    if all else fails, we'll invoke the stack-select tool to see if there are any versions
+    reported.
+    :param pso_mock:
+    :return:
+    """
+    script = Script()
+    script.stroutfile = ''
+    script.save_component_version_to_structured_out("start")
+
+    self.assertEqual(pso_mock.call_count, 1)
+    self.assertEquals(pso_mock.call_args[0][0], {'version':'2.6.0.0-1234'})
+
+
   def tearDown(self):
     # enable stdout
     sys.stdout = sys.__stdout__

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b83a0a5/ambari-common/src/main/python/resource_management/libraries/script/script.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/script/script.py b/ambari-common/src/main/python/resource_management/libraries/script/script.py
index e612638..d5b4469 100644
--- a/ambari-common/src/main/python/resource_management/libraries/script/script.py
+++ b/ambari-common/src/main/python/resource_management/libraries/script/script.py
@@ -47,7 +47,7 @@ from resource_management.core.environment import Environment
 from resource_management.core.logger import Logger
 from resource_management.core.exceptions import Fail, ClientComponentHasNoStatus, ComponentIsNotRunning
 from resource_management.core.resources.packaging import Package
-from resource_management.libraries.functions.version_select_util import get_component_version_from_symlink
+from resource_management.libraries.functions import version_select_util
 from resource_management.libraries.functions.version import compare_versions
 from resource_management.libraries.functions.version import format_stack_version
 from resource_management.libraries.functions import stack_tools
@@ -212,6 +212,12 @@ class Script(object):
     Saves the version of the component for this command to the structured out file. If the
     command is an install command and the repository is trusted, then it will use the version of
     the repository. Otherwise, it will consult the stack-select tool to read the symlink version.
+
+    Under rare circumstances, a component may have a bug which prevents it from reporting a
+    version back after being installed. This is most likely due to the stack-select tool not being
+    invoked by the package's installer. In these rare cases, we try to see if the component
+    should have reported a version and we try to fallback to the "<stack-select> versions" command.
+
     :param command_name: command name
     :return: None
     """
@@ -240,7 +246,17 @@ class Script(object):
     if stack_select_package_name and stack_name:
       # only query for the component version from stack-select if we can't trust the repository yet
       if component_version is None:
-        component_version = get_component_version_from_symlink(stack_name, stack_select_package_name)
+        component_version = version_select_util.get_component_version_from_symlink(stack_name, stack_select_package_name)
+
+      # last ditch effort - should cover the edge case where the package failed to setup its
+      # link and we have to try to see if <stack-select> can help
+      if component_version is None:
+        output, code, versions = stack_select.unsafe_get_stack_versions()
+        if len(versions) == 1:
+          component_version = versions[0]
+          Logger.error("The '{0}' component did not advertise a version. This may indicate a problem with the component packaging. " \
+                         "However, the stack-select tool was able to report a single version installed ({1}). " \
+                         "This is the version that will be reported.".format(stack_select_package_name, component_version))
 
       if component_version:
         self.put_structured_out({"version": component_version})
@@ -252,7 +268,7 @@ class Script(object):
           self.put_structured_out({"repository_version_id": repo_version_id})
       else:
         if not self.is_hook():
-          Logger.error("Component '{0}' did not advertise a version. This may indicate a problem with the component packaging.".format(stack_select_package_name))
+          Logger.error("The '{0}' component did not advertise a version. This may indicate a problem with the component packaging.".format(stack_select_package_name))
 
 
   def should_expose_component_version(self, command_name):

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b83a0a5/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_admin.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_admin.py b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_admin.py
index f779c18..848b137 100644
--- a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_admin.py
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_admin.py
@@ -20,7 +20,6 @@ limitations under the License.
 from resource_management.core.exceptions import Fail
 from resource_management.libraries.functions.check_process_status import check_process_status
 from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions.constants import Direction
 from resource_management.libraries.script import Script
 from resource_management.core.resources.system import Execute, File
 from resource_management.core.exceptions import ComponentIsNotRunning
@@ -28,10 +27,12 @@ from resource_management.libraries.functions.format import format
 from resource_management.core.logger import Logger
 from resource_management.core import shell
 from ranger_service import ranger_service
-from setup_ranger_xml import setup_ranger_audit_solr, setup_ranger_admin_passwd_change, update_password_configs
 from resource_management.libraries.functions import solr_cloud_util
-from ambari_commons.constants import UPGRADE_TYPE_NON_ROLLING, UPGRADE_TYPE_ROLLING
+from ambari_commons.constants import UPGRADE_TYPE_NON_ROLLING
 from resource_management.libraries.functions.constants import Direction
+
+import setup_ranger_xml
+
 import os, errno
 
 class RangerAdmin(Script):
@@ -93,9 +94,9 @@ class RangerAdmin(Script):
 
     if params.stack_supports_infra_client and params.audit_solr_enabled and params.is_solrCloud_enabled:
       solr_cloud_util.setup_solr_client(params.config, custom_log4j = params.custom_log4j)
-      setup_ranger_audit_solr()
+      setup_ranger_xml.setup_ranger_audit_solr()
 
-    update_password_configs()
+    setup_ranger_xml.update_password_configs()
     ranger_service('ranger_admin')
 
 
@@ -142,7 +143,7 @@ class RangerAdmin(Script):
         setup_java_patch()
 
       if params.stack_supports_ranger_admin_password_change:
-        setup_ranger_admin_passwd_change()
+        setup_ranger_xml.setup_ranger_admin_passwd_change()
 
   def set_ru_rangeradmin_in_progress(self, upgrade_marker_file):
     config_dir = os.path.dirname(upgrade_marker_file)

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b83a0a5/ambari-server/src/test/python/common-services/configs/ranger_admin_default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/common-services/configs/ranger_admin_default.json b/ambari-server/src/test/python/common-services/configs/ranger_admin_default.json
index b360c07..2e66c62 100644
--- a/ambari-server/src/test/python/common-services/configs/ranger_admin_default.json
+++ b/ambari-server/src/test/python/common-services/configs/ranger_admin_default.json
@@ -64,6 +64,61 @@
       "db_host": "localhost",
       "xa_ldap_groupRoleAttribute": "\"cn\""
     },
+    "ranger-ugsync-site": {
+      "ranger.usersync.ldap.binddn": "",
+      "ranger.usersync.policymgr.username": "rangerusersync",
+      "ranger.usersync.policymanager.mockrun": "false",
+      "ranger.usersync.group.searchbase": "",
+      "ranger.usersync.ldap.bindalias": "testldapalias",
+      "ranger.usersync.truststore.file": "/usr/hdp/current/ranger-usersync/conf/mytruststore.jks",
+      "ranger.usersync.port": "5151",
+      "ranger.usersync.pagedresultssize": "500",
+      "ranger.usersync.group.memberattributename": "",
+      "ranger.usersync.kerberos.principal": "",
+      "ranger.usersync.source.impl.class": "org.apache.ranger.unixusersync.process.UnixUserGroupBuilder",
+      "ranger.usersync.ldap.referral": "ignore",
+      "ranger.usersync.group.searchfilter": "",
+      "ranger.usersync.ldap.user.objectclass": "person",
+      "ranger.usersync.logdir": "{{usersync_log_dir}}",
+      "ranger.usersync.ldap.user.searchfilter": "",
+      "ranger.usersync.ldap.groupname.caseconversion": "none",
+      "ranger.usersync.ldap.ldapbindpassword": "",
+      "ranger.usersync.unix.minUserId": "500",
+      "ranger.usersync.policymanager.maxrecordsperapicall": "1000",
+      "ranger.usersync.group.nameattribute": "",
+      "ranger.usersync.policymgr.alias": "ranger.usersync.policymgr.password",
+      "ranger.usersync.keystore.file": "/usr/hdp/current/ranger-usersync/conf/unixauthservice.jks",
+      "ranger.usersync.user.searchenabled": "false",
+      "ranger.usersync.group.usermapsyncenabled": "true",
+      "ranger.usersync.ldap.bindkeystore": "",
+      "ranger.usersync.ldap.user.groupnameattribute": "memberof, ismemberof",
+      "ranger.usersync.kerberos.keytab": "",
+      "ranger.usersync.passwordvalidator.path": "./native/credValidator.uexe",
+      "ranger.usersync.group.objectclass": "",
+      "ranger.usersync.ldap.user.searchscope": "sub",
+      "ranger.usersync.unix.password.file": "/etc/passwd",
+      "ranger.usersync.ldap.user.nameattribute": "",
+      "ranger.usersync.pagedresultsenabled": "true",
+      "ranger.usersync.policymanager.baseURL": "{{ranger_external_url}}",
+      "ranger.usersync.group.search.first.enabled": "false",
+      "ranger.usersync.group.searchenabled": "false",
+      "ranger.usersync.sink.impl.class": "org.apache.ranger.unixusersync.process.PolicyMgrUserGroupBuilder",
+      "ranger.usersync.ssl": "true",
+      "ranger.usersync.ldap.url": "",
+      "ranger.usersync.ldap.searchBase": "dc=hadoop,dc=apache,dc=org",
+      "ranger.usersync.policymgr.keystore": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+      "ranger.usersync.ldap.user.searchbase": "",
+      "ranger.usersync.ldap.username.caseconversion": "none",
+      "ranger.usersync.credstore.filename": "/usr/hdp/current/ranger-usersync/conf/ugsync.jceks",
+      "ranger.usersync.keystore.password": "UnIx529p",
+      "ranger.usersync.unix.group.file": "/etc/group",
+      "ranger.usersync.filesource.file": "/tmp/usergroup.txt",
+      "ranger.usersync.group.searchscope": "",
+      "ranger.usersync.truststore.password": "changeit",
+      "ranger.usersync.enabled": "true",
+      "ranger.usersync.sleeptimeinmillisbetweensynccycle": "60000",
+      "ranger.usersync.filesource.text.delimiter": ","
+    },
     "ranger-site": {
       "http.enabled": "true",
       "http.service.port": "6080",

http://git-wip-us.apache.org/repos/asf/ambari/blob/8b83a0a5/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
index d98e0b1..bff8642 100644
--- a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
+++ b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
@@ -154,11 +154,12 @@ class RMFTestCase(TestCase):
                     with patch('resource_management.libraries.functions.stack_select.is_package_supported', return_value=True):
                       with patch('resource_management.libraries.functions.stack_select.get_supported_packages', return_value=MagicMock()):
                         with patch.object(os, "environ", new=os_env) as mocks_dict['environ']:
-                          if not try_install:
-                            with patch.object(Script, 'install_packages') as install_mock_value:
+                          with patch('resource_management.libraries.functions.stack_select.unsafe_get_stack_versions', return_value = (("",0,[]))):
+                            if not try_install:
+                              with patch.object(Script, 'install_packages') as install_mock_value:
+                                method(RMFTestCase.env, *command_args)
+                            else:
                               method(RMFTestCase.env, *command_args)
-                          else:
-                            method(RMFTestCase.env, *command_args)
 
     sys.path.remove(scriptsdir)
 


[32/50] [abbrv] ambari git commit: AMBARI-22146. New stack versions shown as 'Not installed' on 0 hosts (alexantonenko)

Posted by jl...@apache.org.
AMBARI-22146. New stack versions shown as 'Not installed' on 0 hosts (alexantonenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/9adfcdcb
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/9adfcdcb
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/9adfcdcb

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 9adfcdcbd9b0f01b1576254bc209b1ce64b7d63f
Parents: d5dd193
Author: Alex Antonenko <aa...@hortonworks.com>
Authored: Thu Oct 5 21:56:31 2017 +0300
Committer: Alex Antonenko <aa...@hortonworks.com>
Committed: Thu Oct 5 21:56:31 2017 +0300

----------------------------------------------------------------------
 .../app/models/stack_version/repository_version.js      |  2 +-
 .../models/stack_version/repository_version_test.js     | 12 +++++++++---
 2 files changed, 10 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/9adfcdcb/ambari-web/app/models/stack_version/repository_version.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models/stack_version/repository_version.js b/ambari-web/app/models/stack_version/repository_version.js
index 51b9ab8..d40b4f2 100644
--- a/ambari-web/app/models/stack_version/repository_version.js
+++ b/ambari-web/app/models/stack_version/repository_version.js
@@ -64,7 +64,7 @@ App.RepositoryVersion = DS.Model.extend({
    * @type {Array}
    */
   notInstalledHosts: function () {
-    return Array.isArray(this.get('stackVersion.notInstalledHosts'))
+    return this.get('stackVersion.notInstalledHosts').length || this.get('stackVersion.installedHosts').length || this.get('stackVersion.currentHosts').length
           ? this.get('stackVersion.notInstalledHosts')
           : App.get('allHostNames');
   }.property('stackVersion.notInstalledHosts'),

http://git-wip-us.apache.org/repos/asf/ambari/blob/9adfcdcb/ambari-web/test/models/stack_version/repository_version_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/models/stack_version/repository_version_test.js b/ambari-web/test/models/stack_version/repository_version_test.js
index bbb98a3..2bd30de 100644
--- a/ambari-web/test/models/stack_version/repository_version_test.js
+++ b/ambari-web/test/models/stack_version/repository_version_test.js
@@ -51,15 +51,21 @@ describe('App.RepositoryVersion', function () {
       App.get.restore();
     });
 
-    it("stackVersion is null", function() {
-      model.set('stackVersion', null);
+    it("all states empty", function() {
+      model.set('stackVersion', Em.Object.create({
+        installedHosts: [],
+        notInstalledHosts: [],
+        currentHosts: []
+      }));
       model.propertyDidChange('notInstalledHosts');
       expect(model.get('notInstalledHosts')).to.eql(['host1']);
     });
 
     it("stackVersion has notInstalledHosts array", function() {
       model.set('stackVersion', Em.Object.create({
-        notInstalledHosts: ['host2']
+        installedHosts: [],
+        notInstalledHosts: ['host2'],
+        currentHosts: []
       }));
       model.propertyDidChange('notInstalledHosts');
       expect(model.get('notInstalledHosts')).to.eql(['host2']);


[23/50] [abbrv] ambari git commit: AMBARI-22134. UI Fixes for Maint Repo Version Display (alexantonenko)

Posted by jl...@apache.org.
AMBARI-22134. UI Fixes for Maint Repo Version Display (alexantonenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/d86f7649
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/d86f7649
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/d86f7649

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: d86f76497e20b17c7ca381b3941d3fd0f82aeebf
Parents: d1ba229
Author: Alex Antonenko <aa...@hortonworks.com>
Authored: Wed Oct 4 21:25:08 2017 +0300
Committer: Alex Antonenko <aa...@hortonworks.com>
Committed: Wed Oct 4 21:25:08 2017 +0300

----------------------------------------------------------------------
 .../app/controllers/main/admin/stack_and_upgrade_controller.js   | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/d86f7649/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js b/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
index 90a4db9..29e0687 100644
--- a/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
+++ b/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
@@ -2055,7 +2055,9 @@ App.MainAdminStackAndUpgradeController = Em.Controller.extend(App.LocalStorage,
    */
   getServicesToBeReverted: function(version, currentStack) {
     return version.get('stackServices').filter(function(_service) {
-      return (App.Service.find(_service.get('name')).get('isLoaded') && _service.get('isAvailable'));
+      var originalService = App.Service.find(_service.get('name'));
+      var isOriginal = originalService.get('desiredRepositoryVersionId') === version.get('id')
+      return (originalService.get('isLoaded') && _service.get('isAvailable')) && isOriginal;
     }).map(function(_service) {
       return {
         displayName: _service.get('displayName'),


[43/50] [abbrv] ambari git commit: AMBARI-22161. Unable to ignore service check failures during Upgrade (alexantonenko)

Posted by jl...@apache.org.
AMBARI-22161. Unable to ignore service check failures during Upgrade (alexantonenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a9d26986
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a9d26986
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a9d26986

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: a9d26986d615cee43de6bddca12fd0a23b6526b8
Parents: 388cb41
Author: Alex Antonenko <aa...@hortonworks.com>
Authored: Fri Oct 6 17:10:48 2017 +0300
Committer: Alex Antonenko <aa...@hortonworks.com>
Committed: Fri Oct 6 17:10:48 2017 +0300

----------------------------------------------------------------------
 .../admin/stack_upgrade/upgrade_wizard_view.js  |  2 +-
 .../stack_upgrade/upgrade_wizard_view_test.js   | 28 +++++++++++++++-----
 2 files changed, 23 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/a9d26986/ambari-web/app/views/main/admin/stack_upgrade/upgrade_wizard_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/admin/stack_upgrade/upgrade_wizard_view.js b/ambari-web/app/views/main/admin/stack_upgrade/upgrade_wizard_view.js
index e1689c2..87e2adf 100644
--- a/ambari-web/app/views/main/admin/stack_upgrade/upgrade_wizard_view.js
+++ b/ambari-web/app/views/main/admin/stack_upgrade/upgrade_wizard_view.js
@@ -126,7 +126,7 @@ App.upgradeWizardView = Em.View.extend({
     var associatedVersion = this.get('controller.upgradeData.Upgrade.associated_version');
     var version = associatedVersion && App.RepositoryVersion.find().findProperty('repositoryVersion', associatedVersion);
     var isPatchOrMaint = version && ( version.get('isPatch') || version.get('isMaint') );
-    return failedItem && failedItem.get('skippable') && !isPatchOrMaint;
+    return failedItem && failedItem.get('skippable') && !(this.get('isFinalizeItem') && isPatchOrMaint);
   }.property('failedItem'),
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/a9d26986/ambari-web/test/views/main/admin/stack_upgrade/upgrade_wizard_view_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/admin/stack_upgrade/upgrade_wizard_view_test.js b/ambari-web/test/views/main/admin/stack_upgrade/upgrade_wizard_view_test.js
index 0107975..a739838 100644
--- a/ambari-web/test/views/main/admin/stack_upgrade/upgrade_wizard_view_test.js
+++ b/ambari-web/test/views/main/admin/stack_upgrade/upgrade_wizard_view_test.js
@@ -919,20 +919,36 @@ describe('App.upgradeWizardView', function () {
     beforeEach(function () {
       view.reopen({'failedItem': Em.Object.create({skippable: true}) });
       view.set('controller.upgradeData.Upgrade', {associated_version: '2.1.1'});
+      var findResult = [Em.Object.create({repositoryVersion: '2.1.1', isPatch: true})];
+      sinon.stub(App.RepositoryVersion, 'find', function(){
+        return findResult;
+      });
+    });
+
+    afterEach(function () {
+      App.RepositoryVersion.find.restore();
     })
     it("Should return true if can not find upgrade", function () {
       view.propertyDidChange('canSkipFailedItem');
-      expect(view.get('canSkipFailedItem')).to.be.true
+      expect(view.get('canSkipFailedItem')).to.be.true;
     });
 
-    it("Should return false if upgrade is patch or maint", function () {
-      var findResult = [Em.Object.create({repositoryVersion: '2.1.1', isPatch: true})];
-      sinon.stub(App.RepositoryVersion, 'find', function(){
-        return findResult;
+    it("Should return false if upgrade is patch or maint and item is final", function () {
+      view.reopen({
+        isFinalizeItem: true
       });
       view.propertyDidChange('canSkipFailedItem');
       expect(view.get('canSkipFailedItem')).to.be.false;
-      App.RepositoryVersion.find.restore();
+    });
+
+    it("Should return true if upgrade is patch or maint and item is not final", function () {
+
+      view.reopen({
+        isFinalizeItem: false
+      });
+      view.propertyDidChange('canSkipFailedItem');
+
+      expect(view.get('canSkipFailedItem')).to.be.true;
     });
 
   });


[05/50] [abbrv] ambari git commit: AMBARI-22095 Make hooks stack agnostic (dsen)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/task-log4j.properties
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/task-log4j.properties b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/task-log4j.properties
deleted file mode 100644
index 7e12962..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/task-log4j.properties
+++ /dev/null
@@ -1,134 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-
-# Define some default values that can be overridden by system properties
-hadoop.root.logger=INFO,console
-hadoop.log.dir=.
-hadoop.log.file=hadoop.log
-
-#
-# Job Summary Appender 
-#
-# Use following logger to send summary to separate file defined by 
-# hadoop.mapreduce.jobsummary.log.file rolled daily:
-# hadoop.mapreduce.jobsummary.logger=INFO,JSA
-# 
-hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
-hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hadoop.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshhold=ALL
-
-#
-# Daily Rolling File Appender
-#
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this 
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-
-#
-# TaskLog Appender
-#
-
-#Default values
-hadoop.tasklog.taskid=null
-hadoop.tasklog.iscleanup=false
-hadoop.tasklog.noKeepSplits=4
-hadoop.tasklog.totalLogFileSize=100
-hadoop.tasklog.purgeLogSplits=true
-hadoop.tasklog.logsRetainHours=12
-
-log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
-log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
-log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
-log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
-
-log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
-log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-
-#
-# Rolling File Appender
-#
-
-#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Logfile size and and 30-day backups
-#log4j.appender.RFA.MaxFileSize=1MB
-#log4j.appender.RFA.MaxBackupIndex=30
-
-#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-# Custom Logging levels
-
-hadoop.metrics.log.level=INFO
-#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
-#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
-#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
-log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
-
-# Jets3t library
-log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
-
-#
-# Null Appender
-# Trap security logger on the hadoop client side
-#
-log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
- 
-# Removes "deprecated" messages
-log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/topology_script.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/topology_script.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/topology_script.py
deleted file mode 100644
index 0f7a55c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/topology_script.py
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/usr/bin/env python
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import sys, os
-from string import join
-import ConfigParser
-
-
-DEFAULT_RACK = "/default-rack"
-DATA_FILE_NAME =  os.path.dirname(os.path.abspath(__file__)) + "/topology_mappings.data"
-SECTION_NAME = "network_topology"
-
-class TopologyScript():
-
-  def load_rack_map(self):
-    try:
-      #RACK_MAP contains both host name vs rack and ip vs rack mappings
-      mappings = ConfigParser.ConfigParser()
-      mappings.read(DATA_FILE_NAME)
-      return dict(mappings.items(SECTION_NAME))
-    except ConfigParser.NoSectionError:
-      return {}
-
-  def get_racks(self, rack_map, args):
-    if len(args) == 1:
-      return DEFAULT_RACK
-    else:
-      return join([self.lookup_by_hostname_or_ip(input_argument, rack_map) for input_argument in args[1:]],)
-
-  def lookup_by_hostname_or_ip(self, hostname_or_ip, rack_map):
-    #try looking up by hostname
-    rack = rack_map.get(hostname_or_ip)
-    if rack is not None:
-      return rack
-    #try looking up by ip
-    rack = rack_map.get(self.extract_ip(hostname_or_ip))
-    #try by localhost since hadoop could be passing in 127.0.0.1 which might not be mapped
-    return rack if rack is not None else rack_map.get("localhost.localdomain", DEFAULT_RACK)
-
-  #strips out port and slashes in case hadoop passes in something like 127.0.0.1/127.0.0.1:50010
-  def extract_ip(self, container_string):
-    return container_string.split("/")[0].split(":")[0]
-
-  def execute(self, args):
-    rack_map = self.load_rack_map()
-    rack = self.get_racks(rack_map, args)
-    print rack
-
-if __name__ == "__main__":
-  TopologyScript().execute(sys.argv)

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/hook.py
deleted file mode 100644
index f7705c4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/hook.py
+++ /dev/null
@@ -1,40 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from rack_awareness import create_topology_script_and_mapping
-from shared_initialization import setup_hadoop, setup_configs, create_javahome_symlink, setup_unlimited_key_jce_policy
-
-class BeforeStartHook(Hook):
-
-  def hook(self, env):
-    import params
-
-    self.run_custom_hook('before-ANY')
-    env.set_params(params)
-
-    setup_hadoop()
-    setup_configs()
-    create_javahome_symlink()
-    create_topology_script_and_mapping()
-    setup_unlimited_key_jce_policy()
-
-if __name__ == "__main__":
-  BeforeStartHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py
deleted file mode 100644
index 8555fea..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py
+++ /dev/null
@@ -1,364 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import default
-from resource_management.libraries.functions import format_jvm_option
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions.version import format_stack_version, compare_versions
-from ambari_commons.os_check import OSCheck
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import get_kinit_path
-from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
-from resource_management.libraries.resources.hdfs_resource import HdfsResource
-
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-artifact_dir = tmp_dir + "/AMBARI-artifacts"
-
-# Global flag enabling or disabling the sysprep feature
-host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
-
-# Whether to skip copying fast-hdfs-resource.jar to /var/lib/ambari-agent/lib/
-# This is required if tarballs are going to be copied to HDFS, so set to False
-sysprep_skip_copy_fast_jar_hdfs = host_sys_prepped and default("/configurations/cluster-env/sysprep_skip_copy_fast_jar_hdfs", False)
-
-# Whether to skip setting up the unlimited key JCE policy
-sysprep_skip_setup_jce = host_sys_prepped and default("/configurations/cluster-env/sysprep_skip_setup_jce", False)
-
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-stack_version_formatted = format_stack_version(stack_version_unformatted)
-
-dfs_type = default("/commandParams/dfs_type", "")
-stack_root = Script.get_stack_root()
-hadoop_conf_dir = "/etc/hadoop/conf"
-component_list = default("/localComponents", [])
-
-hdfs_tmp_dir = default("/configurations/hadoop-env/hdfs_tmp_dir", "/tmp")
-
-hadoop_metrics2_properties_content = config['configurations']['hadoop-metrics2.properties']['content']
-
-# hadoop default params
-mapreduce_libs_path = format("{stack_root}/current/hadoop-mapreduce-client/*")
-
-hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
-hadoop_lib_home = stack_select.get_hadoop_dir("lib")
-hadoop_bin = stack_select.get_hadoop_dir("sbin")
-hadoop_home = stack_select.get_hadoop_dir("home")
-create_lib_snappy_symlinks = False
-
-  
-current_service = config['serviceName']
-
-#security params
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-
-ambari_server_resources_url = default("/hostLevelParams/jdk_location", None)
-if ambari_server_resources_url is not None and ambari_server_resources_url.endswith('/'):
-  ambari_server_resources_url = ambari_server_resources_url[:-1]
-
-# Unlimited key JCE policy params
-jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
-unlimited_key_jce_required = default("/hostLevelParams/unlimited_key_jce_required", False)
-jdk_name = default("/hostLevelParams/jdk_name", None)
-java_home = default("/hostLevelParams/java_home", None)
-java_exec = "{0}/bin/java".format(java_home) if java_home is not None else "/bin/java"
-
-#users and groups
-has_hadoop_env = 'hadoop-env' in config['configurations']
-mapred_user = config['configurations']['mapred-env']['mapred_user']
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-yarn_user = config['configurations']['yarn-env']['yarn_user']
-
-user_group = config['configurations']['cluster-env']['user_group']
-
-#hosts
-hostname = config["hostname"]
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
-hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-
-cluster_name = config["clusterName"]
-set_instanceId = "false"
-if 'cluster-env' in config['configurations'] and \
-        'metrics_collector_external_hosts' in config['configurations']['cluster-env']:
-  ams_collector_hosts = config['configurations']['cluster-env']['metrics_collector_external_hosts']
-  set_instanceId = "true"
-else:
-  ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
-
-has_namenode = not len(namenode_host) == 0
-has_resourcemanager = not len(rm_host) == 0
-has_slaves = not len(slave_hosts) == 0
-has_oozie_server = not len(oozie_servers) == 0
-has_hcat_server_host = not len(hcat_server_hosts) == 0
-has_hive_server_host = not len(hive_server_host) == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_zk_host = not len(zk_hosts) == 0
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-has_metric_collector = not len(ams_collector_hosts) == 0
-
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-
-if has_ganglia_server:
-  ganglia_server_host = ganglia_server_hosts[0]
-
-metric_collector_port = None
-if has_metric_collector:
-  if 'cluster-env' in config['configurations'] and \
-      'metrics_collector_external_port' in config['configurations']['cluster-env']:
-    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_external_port']
-  else:
-    metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
-    if metric_collector_web_address.find(':') != -1:
-      metric_collector_port = metric_collector_web_address.split(':')[1]
-    else:
-      metric_collector_port = '6188'
-  if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
-    metric_collector_protocol = 'https'
-  else:
-    metric_collector_protocol = 'http'
-  metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
-  metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
-  metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
-
-  pass
-metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
-metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
-host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
-host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
-
-# Cluster Zookeeper quorum
-zookeeper_quorum = None
-if has_zk_host:
-  if 'zoo.cfg' in config['configurations'] and 'clientPort' in config['configurations']['zoo.cfg']:
-    zookeeper_clientPort = config['configurations']['zoo.cfg']['clientPort']
-  else:
-    zookeeper_clientPort = '2181'
-  zookeeper_quorum = (':' + zookeeper_clientPort + ',').join(config['clusterHostInfo']['zookeeper_hosts'])
-  # last port config
-  zookeeper_quorum += ':' + zookeeper_clientPort
-
-#hadoop params
-
-if has_namenode or dfs_type == 'HCFS':
-  hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
-  hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-  task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
-
-hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-hbase_tmp_dir = "/tmp/hbase-hbase"
-#db params
-server_db_name = config['hostLevelParams']['db_name']
-db_driver_filename = config['hostLevelParams']['db_driver_filename']
-oracle_driver_url = config['hostLevelParams']['oracle_jdbc_url']
-mysql_driver_url = config['hostLevelParams']['mysql_jdbc_url']
-oracle_driver_symlink_url = format("{ambari_server_resources_url}/oracle-jdbc-driver.jar")
-mysql_driver_symlink_url = format("{ambari_server_resources_url}/mysql-jdbc-driver.jar")
-
-ambari_db_rca_url = config['hostLevelParams']['ambari_db_rca_url'][0]
-ambari_db_rca_driver = config['hostLevelParams']['ambari_db_rca_driver'][0]
-ambari_db_rca_username = config['hostLevelParams']['ambari_db_rca_username'][0]
-ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password'][0]
-
-if has_namenode and 'rca_enabled' in config['configurations']['hadoop-env']:
-  rca_enabled =  config['configurations']['hadoop-env']['rca_enabled']
-else:
-  rca_enabled = False
-rca_disabled_prefix = "###"
-if rca_enabled == True:
-  rca_prefix = ""
-else:
-  rca_prefix = rca_disabled_prefix
-
-#hadoop-env.sh
-
-jsvc_path = "/usr/lib/bigtop-utils"
-
-hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
-namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
-namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
-
-jtnode_opt_newsize = "200m"
-jtnode_opt_maxnewsize = "200m"
-jtnode_heapsize =  "1024m"
-ttnode_heapsize = "1024m"
-
-dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
-mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
-
-#log4j.properties
-
-yarn_log_dir_prefix = default("/configurations/yarn-env/yarn_log_dir_prefix","/var/log/hadoop-yarn")
-
-dfs_hosts = default('/configurations/hdfs-site/dfs.hosts', None)
-
-#log4j.properties
-if (('hdfs-log4j' in config['configurations']) and ('content' in config['configurations']['hdfs-log4j'])):
-  log4j_props = config['configurations']['hdfs-log4j']['content']
-  if (('yarn-log4j' in config['configurations']) and ('content' in config['configurations']['yarn-log4j'])):
-    log4j_props += config['configurations']['yarn-log4j']['content']
-else:
-  log4j_props = None
-
-refresh_topology = False
-command_params = config["commandParams"] if "commandParams" in config else None
-if command_params is not None:
-  refresh_topology = bool(command_params["refresh_topology"]) if "refresh_topology" in command_params else False
-
-ambari_java_home = default("/commandParams/ambari_java_home", None)
-ambari_jdk_name = default("/commandParams/ambari_jdk_name", None)
-ambari_jce_name = default("/commandParams/ambari_jce_name", None)
-  
-ambari_libs_dir = "/var/lib/ambari-agent/lib"
-is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled']
-default_fs = config['configurations']['core-site']['fs.defaultFS']
-
-#host info
-all_hosts = default("/clusterHostInfo/all_hosts", [])
-all_racks = default("/clusterHostInfo/all_racks", [])
-all_ipv4_ips = default("/clusterHostInfo/all_ipv4_ips", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-
-#topology files
-net_topology_script_file_path = "/etc/hadoop/conf/topology_script.py"
-net_topology_script_dir = os.path.dirname(net_topology_script_file_path)
-net_topology_mapping_data_file_name = 'topology_mappings.data'
-net_topology_mapping_data_file_path = os.path.join(net_topology_script_dir, net_topology_mapping_data_file_name)
-
-#Added logic to create /tmp and /user directory for HCFS stack.  
-has_core_site = 'core-site' in config['configurations']
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-kinit_path_local = get_kinit_path()
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-stack_version_formatted = format_stack_version(stack_version_unformatted)
-hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
-hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', None)
-hdfs_site = config['configurations']['hdfs-site']
-smoke_user =  config['configurations']['cluster-env']['smokeuser']
-smoke_hdfs_user_dir = format("/user/{smoke_user}")
-smoke_hdfs_user_mode = 0770
-
-
-##### Namenode RPC ports - metrics config section start #####
-
-# Figure out the rpc ports for current namenode
-nn_rpc_client_port = None
-nn_rpc_dn_port = None
-nn_rpc_healthcheck_port = None
-
-namenode_id = None
-namenode_rpc = None
-
-dfs_ha_enabled = False
-dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.internal.nameservices', None)
-if dfs_ha_nameservices is None:
-  dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.nameservices', None)
-dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
-
-dfs_ha_namemodes_ids_list = []
-other_namenode_id = None
-
-if dfs_ha_namenode_ids:
- dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
- dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
- if dfs_ha_namenode_ids_array_len > 1:
-   dfs_ha_enabled = True
-
-if dfs_ha_enabled:
- for nn_id in dfs_ha_namemodes_ids_list:
-   nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
-   if hostname in nn_host:
-     namenode_id = nn_id
-     namenode_rpc = nn_host
-   pass
- pass
-else:
-  namenode_rpc = default('/configurations/hdfs-site/dfs.namenode.rpc-address', default_fs)
-
-# if HDFS is not installed in the cluster, then don't try to access namenode_rpc
-if "core-site" in config['configurations'] and namenode_rpc:
-  port_str = namenode_rpc.split(':')[-1].strip()
-  try:
-    nn_rpc_client_port = int(port_str)
-  except ValueError:
-    nn_rpc_client_port = None
-
-if namenode_rpc:
- nn_rpc_client_port = namenode_rpc.split(':')[1].strip()
-
-if dfs_ha_enabled:
- dfs_service_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.servicerpc-address.{dfs_ha_nameservices}.{namenode_id}'), None)
- dfs_lifeline_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.lifeline.rpc-address.{dfs_ha_nameservices}.{namenode_id}'), None)
-else:
- dfs_service_rpc_address = default('/configurations/hdfs-site/dfs.namenode.servicerpc-address', None)
- dfs_lifeline_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.lifeline.rpc-address'), None)
-
-if dfs_service_rpc_address:
- nn_rpc_dn_port = dfs_service_rpc_address.split(':')[1].strip()
-
-if dfs_lifeline_rpc_address:
- nn_rpc_healthcheck_port = dfs_lifeline_rpc_address.split(':')[1].strip()
-
-is_nn_client_port_configured = False if nn_rpc_client_port is None else True
-is_nn_dn_port_configured = False if nn_rpc_dn_port is None else True
-is_nn_healthcheck_port_configured = False if nn_rpc_healthcheck_port is None else True
-
-##### end #####
-
-import functools
-#create partial functions with common arguments for every HdfsResource call
-#to create/delete/copyfromlocal hdfs directories/files we need to call params.HdfsResource in code
-HdfsResource = functools.partial(
-  HdfsResource,
-  user=hdfs_user,
-  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
-  security_enabled = security_enabled,
-  keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local,
-  hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir,
-  principal_name = hdfs_principal_name,
-  hdfs_site = hdfs_site,
-  default_fs = default_fs,
-  immutable_paths = get_not_managed_resources(),
-  dfs_type = dfs_type
-)

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/rack_awareness.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/rack_awareness.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/rack_awareness.py
deleted file mode 100644
index 548f051..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/rack_awareness.py
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-from resource_management.core.resources import File
-from resource_management.core.source import StaticFile, Template
-from resource_management.libraries.functions import format
-
-
-def create_topology_mapping():
-  import params
-
-  File(params.net_topology_mapping_data_file_path,
-       content=Template("topology_mappings.data.j2"),
-       owner=params.hdfs_user,
-       group=params.user_group,
-       only_if=format("test -d {net_topology_script_dir}"))
-
-def create_topology_script():
-  import params
-
-  File(params.net_topology_script_file_path,
-       content=StaticFile('topology_script.py'),
-       mode=0755,
-       only_if=format("test -d {net_topology_script_dir}"))
-
-def create_topology_script_and_mapping():
-  import params
-  if params.has_hadoop_env:
-    create_topology_mapping()
-    create_topology_script()

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/shared_initialization.py
deleted file mode 100644
index 3f9a863..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/shared_initialization.py
+++ /dev/null
@@ -1,249 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-from resource_management.libraries.providers.hdfs_resource import WebHDFSUtil
-from resource_management.core.resources.jcepolicyinfo import JcePolicyInfo
-
-from resource_management import *
-
-def setup_hadoop():
-  """
-  Setup hadoop files and directories
-  """
-  import params
-
-  Execute(("setenforce","0"),
-          only_if="test -f /selinux/enforce",
-          not_if="(! which getenforce ) || (which getenforce && getenforce | grep -q Disabled)",
-          sudo=True,
-  )
-
-  #directories
-  if params.has_namenode or params.dfs_type == 'HCFS':
-    Directory(params.hdfs_log_dir_prefix,
-              create_parents = True,
-              owner='root',
-              group=params.user_group,
-              mode=0775,
-              cd_access='a',
-    )
-    if params.has_namenode:
-      Directory(params.hadoop_pid_dir_prefix,
-              create_parents = True,
-              owner='root',
-              group='root',
-              cd_access='a',
-      )
-    Directory(params.hadoop_tmp_dir,
-              create_parents = True,
-              owner=params.hdfs_user,
-              cd_access='a',
-              )
-  #files
-    if params.security_enabled:
-      tc_owner = "root"
-    else:
-      tc_owner = params.hdfs_user
-      
-    # if WebHDFS is not enabled we need this jar to create hadoop folders and copy tarballs to HDFS.
-    if params.sysprep_skip_copy_fast_jar_hdfs:
-      print "Skipping copying of fast-hdfs-resource.jar as host is sys prepped"
-    elif params.dfs_type == 'HCFS' or not WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
-      # for source-code of jar goto contrib/fast-hdfs-resource
-      File(format("{ambari_libs_dir}/fast-hdfs-resource.jar"),
-           mode=0644,
-           content=StaticFile("fast-hdfs-resource.jar")
-      )
-      
-    if os.path.exists(params.hadoop_conf_dir):
-      File(os.path.join(params.hadoop_conf_dir, 'commons-logging.properties'),
-           owner=tc_owner,
-           content=Template('commons-logging.properties.j2')
-      )
-
-      health_check_template_name = "health_check"
-      File(os.path.join(params.hadoop_conf_dir, health_check_template_name),
-           owner=tc_owner,
-           content=Template(health_check_template_name + ".j2")
-      )
-
-      log4j_filename = os.path.join(params.hadoop_conf_dir, "log4j.properties")
-      if (params.log4j_props != None):
-        File(log4j_filename,
-             mode=0644,
-             group=params.user_group,
-             owner=params.hdfs_user,
-             content=params.log4j_props
-        )
-      elif (os.path.exists(format("{params.hadoop_conf_dir}/log4j.properties"))):
-        File(log4j_filename,
-             mode=0644,
-             group=params.user_group,
-             owner=params.hdfs_user,
-        )
-
-      File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"),
-           owner=params.hdfs_user,
-           group=params.user_group,
-           content=InlineTemplate(params.hadoop_metrics2_properties_content)
-      )
-
-    if params.dfs_type == 'HCFS' and params.has_core_site and 'ECS_CLIENT' in params.component_list:
-       create_dirs()
-
-    create_microsoft_r_dir()
-
-
-def setup_configs():
-  """
-  Creates configs for services HDFS mapred
-  """
-  import params
-
-  if params.has_namenode or params.dfs_type == 'HCFS':
-    if os.path.exists(params.hadoop_conf_dir):
-      File(params.task_log4j_properties_location,
-           content=StaticFile("task-log4j.properties"),
-           mode=0755
-      )
-
-    if os.path.exists(os.path.join(params.hadoop_conf_dir, 'configuration.xsl')):
-      File(os.path.join(params.hadoop_conf_dir, 'configuration.xsl'),
-           owner=params.hdfs_user,
-           group=params.user_group
-      )
-    if os.path.exists(os.path.join(params.hadoop_conf_dir, 'masters')):
-      File(os.path.join(params.hadoop_conf_dir, 'masters'),
-                owner=params.hdfs_user,
-                group=params.user_group
-      )
-
-def create_javahome_symlink():
-  if os.path.exists("/usr/jdk/jdk1.6.0_31") and not os.path.exists("/usr/jdk64/jdk1.6.0_31"):
-    Directory("/usr/jdk64/",
-         create_parents = True,
-    )
-    Link("/usr/jdk/jdk1.6.0_31",
-         to="/usr/jdk64/jdk1.6.0_31",
-    )
-
-def create_dirs():
-   import params
-   params.HdfsResource(params.hdfs_tmp_dir,
-                       type="directory",
-                       action="create_on_execute",
-                       owner=params.hdfs_user,
-                       mode=0777
-   )
-   params.HdfsResource(params.smoke_hdfs_user_dir,
-                       type="directory",
-                       action="create_on_execute",
-                       owner=params.smoke_user,
-                       mode=params.smoke_hdfs_user_mode
-   )
-   params.HdfsResource(None,
-                      action="execute"
-   )
-
-def create_microsoft_r_dir():
-  import params
-  if 'MICROSOFT_R_NODE_CLIENT' in params.component_list and params.default_fs:
-    directory = '/user/RevoShare'
-    try:
-      params.HdfsResource(directory,
-                          type="directory",
-                          action="create_on_execute",
-                          owner=params.hdfs_user,
-                          mode=0777)
-      params.HdfsResource(None, action="execute")
-    except Exception as exception:
-      Logger.warning("Could not check the existence of {0} on DFS while starting {1}, exception: {2}".format(directory, params.current_service, str(exception)))
-
-def setup_unlimited_key_jce_policy():
-  """
-  Sets up the unlimited key JCE policy if needed. (sets up ambari JCE as well if ambari and the  stack use different JDK)
-  """
-  import params
-  __setup_unlimited_key_jce_policy(custom_java_home=params.java_home, custom_jdk_name=params.jdk_name, custom_jce_name = params.jce_policy_zip)
-  if params.ambari_jce_name and params.ambari_jce_name != params.jce_policy_zip:
-    __setup_unlimited_key_jce_policy(custom_java_home=params.ambari_java_home, custom_jdk_name=params.ambari_jdk_name, custom_jce_name = params.ambari_jce_name)
-
-def __setup_unlimited_key_jce_policy(custom_java_home, custom_jdk_name, custom_jce_name):
-  """
-  Sets up the unlimited key JCE policy if needed.
-
-  The following criteria must be met:
-
-    * The cluster has not been previously prepared (sys preped) - cluster-env/sysprep_skip_setup_jce = False
-    * Ambari is managing the host's JVM - /hostLevelParams/jdk_name is set
-    * Either security is enabled OR a service requires it - /hostLevelParams/unlimited_key_jce_required = True
-    * The unlimited key JCE policy has not already been installed
-
-  If the conditions are met, the following steps are taken to install the unlimited key JCE policy JARs
-
-    1. The unlimited key JCE policy ZIP file is downloaded from the Ambari server and stored in the
-        Ambari agent's temporary directory
-    2. The existing JCE policy JAR files are deleted
-    3. The downloaded ZIP file is unzipped into the proper JCE policy directory
-
-  :return: None
-  """
-  import params
-
-  if params.sysprep_skip_setup_jce:
-    Logger.info("Skipping unlimited key JCE policy check and setup since the host is sys prepped")
-
-  elif not custom_jdk_name:
-    Logger.debug("Skipping unlimited key JCE policy check and setup since the Java VM is not managed by Ambari")
-
-  elif not params.unlimited_key_jce_required:
-    Logger.debug("Skipping unlimited key JCE policy check and setup since it is not required")
-
-  else:
-    jcePolicyInfo = JcePolicyInfo(custom_java_home)
-
-    if jcePolicyInfo.is_unlimited_key_jce_policy():
-      Logger.info("The unlimited key JCE policy is required, and appears to have been installed.")
-
-    elif custom_jce_name is None:
-      raise Fail("The unlimited key JCE policy needs to be installed; however the JCE policy zip is not specified.")
-
-    else:
-      Logger.info("The unlimited key JCE policy is required, and needs to be installed.")
-
-      jce_zip_target = format("{artifact_dir}/{custom_jce_name}")
-      jce_zip_source = format("{ambari_server_resources_url}/{custom_jce_name}")
-      java_security_dir = format("{custom_java_home}/jre/lib/security")
-
-      Logger.debug("Downloading the unlimited key JCE policy files from {0} to {1}.".format(jce_zip_source, jce_zip_target))
-      Directory(params.artifact_dir, create_parents=True)
-      File(jce_zip_target, content=DownloadSource(jce_zip_source))
-
-      Logger.debug("Removing existing JCE policy JAR files: {0}.".format(java_security_dir))
-      File(format("{java_security_dir}/US_export_policy.jar"), action="delete")
-      File(format("{java_security_dir}/local_policy.jar"), action="delete")
-
-      Logger.debug("Unzipping the unlimited key JCE policy files from {0} into {1}.".format(jce_zip_target, java_security_dir))
-      extract_cmd = ("unzip", "-o", "-j", "-q", jce_zip_target, "-d", java_security_dir)
-      Execute(extract_cmd,
-              only_if=format("test -e {java_security_dir} && test -f {jce_zip_target}"),
-              path=['/bin/', '/usr/bin'],
-              sudo=True
-              )

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/commons-logging.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/commons-logging.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/commons-logging.properties.j2
deleted file mode 100644
index 2197ba5..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/commons-logging.properties.j2
+++ /dev/null
@@ -1,43 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-#Logging Implementation
-
-#Log4J
-org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger
-
-#JDK Logger
-#org.apache.commons.logging.Log=org.apache.commons.logging.impl.Jdk14Logger

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/exclude_hosts_list.j2
deleted file mode 100644
index 1adba80..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/exclude_hosts_list.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{% for host in hdfs_exclude_file %}
-{{host}}
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
deleted file mode 100644
index 2cd9aa8..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
+++ /dev/null
@@ -1,107 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# syntax: [prefix].[source|sink|jmx].[instance].[options]
-# See package.html for org.apache.hadoop.metrics2 for details
-
-{% if has_ganglia_server %}
-*.period=60
-
-*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
-*.sink.ganglia.period=10
-
-# default for supportsparse is false
-*.sink.ganglia.supportsparse=true
-
-.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
-.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
-
-# Hook up to the server
-namenode.sink.ganglia.servers={{ganglia_server_host}}:8661
-datanode.sink.ganglia.servers={{ganglia_server_host}}:8659
-jobtracker.sink.ganglia.servers={{ganglia_server_host}}:8662
-tasktracker.sink.ganglia.servers={{ganglia_server_host}}:8658
-maptask.sink.ganglia.servers={{ganglia_server_host}}:8660
-reducetask.sink.ganglia.servers={{ganglia_server_host}}:8660
-resourcemanager.sink.ganglia.servers={{ganglia_server_host}}:8664
-nodemanager.sink.ganglia.servers={{ganglia_server_host}}:8657
-historyserver.sink.ganglia.servers={{ganglia_server_host}}:8666
-journalnode.sink.ganglia.servers={{ganglia_server_host}}:8654
-nimbus.sink.ganglia.servers={{ganglia_server_host}}:8649
-supervisor.sink.ganglia.servers={{ganglia_server_host}}:8650
-
-resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
-
-{% endif %}
-
-{% if has_metric_collector %}
-
-*.period={{metrics_collection_period}}
-*.sink.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
-*.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
-*.sink.timeline.period={{metrics_collection_period}}
-*.sink.timeline.sendInterval={{metrics_report_interval}}000
-*.sink.timeline.slave.host.name={{hostname}}
-*.sink.timeline.zookeeper.quorum={{zookeeper_quorum}}
-*.sink.timeline.protocol={{metric_collector_protocol}}
-*.sink.timeline.port={{metric_collector_port}}
-*.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
-*.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
-
-# HTTPS properties
-*.sink.timeline.truststore.path = {{metric_truststore_path}}
-*.sink.timeline.truststore.type = {{metric_truststore_type}}
-*.sink.timeline.truststore.password = {{metric_truststore_password}}
-
-datanode.sink.timeline.collector.hosts={{ams_collector_hosts}}
-namenode.sink.timeline.collector.hosts={{ams_collector_hosts}}
-resourcemanager.sink.timeline.collector.hosts={{ams_collector_hosts}}
-nodemanager.sink.timeline.collector.hosts={{ams_collector_hosts}}
-jobhistoryserver.sink.timeline.collector.hosts={{ams_collector_hosts}}
-journalnode.sink.timeline.collector.hosts={{ams_collector_hosts}}
-applicationhistoryserver.sink.timeline.collector.hosts={{ams_collector_hosts}}
-
-resourcemanager.sink.timeline.tagsForPrefix.yarn=Queue
-
-{% if is_nn_client_port_configured %}
-# Namenode rpc ports customization
-namenode.sink.timeline.metric.rpc.client.port={{nn_rpc_client_port}}
-{% endif %}
-{% if is_nn_dn_port_configured %}
-namenode.sink.timeline.metric.rpc.datanode.port={{nn_rpc_dn_port}}
-{% endif %}
-{% if is_nn_healthcheck_port_configured %}
-namenode.sink.timeline.metric.rpc.healthcheck.port={{nn_rpc_healthcheck_port}}
-{% endif %}
-
-{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/health_check.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/health_check.j2 b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/health_check.j2
deleted file mode 100644
index 0a03d17..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/health_check.j2
+++ /dev/null
@@ -1,81 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#!/bin/bash
-#
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-err=0;
-
-function check_disks {
-
-  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
-    fsdev=""
-    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
-    if [ -z "$fsdev" -a "$m" != "/mnt" ] ; then
-      msg_="$msg_ $m(u)"
-    else
-      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
-    fi
-  done
-
-  if [ -z "$msg_" ] ; then
-    echo "disks ok" ; exit 0
-  else
-    echo "$msg_" ; exit 2
-  fi
-
-}
-
-# Run all checks
-for check in disks ; do
-  msg=`check_${check}` ;
-  if [ $? -eq 0 ] ; then
-    ok_msg="$ok_msg$msg,"
-  else
-    err_msg="$err_msg$msg,"
-  fi
-done
-
-if [ ! -z "$err_msg" ] ; then
-  echo -n "ERROR $err_msg "
-fi
-if [ ! -z "$ok_msg" ] ; then
-  echo -n "OK: $ok_msg"
-fi
-
-echo
-
-# Success!
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/include_hosts_list.j2
deleted file mode 100644
index 4a9e713..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/include_hosts_list.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{% for host in slave_hosts %}
-{{host}}
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/topology_mappings.data.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/topology_mappings.data.j2 b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/topology_mappings.data.j2
deleted file mode 100644
index 15034d6..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/topology_mappings.data.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-    #
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-[network_topology]
-{% for host in all_hosts %}
-{% if host in slave_hosts %}
-{{host}}={{all_racks[loop.index-1]}}
-{{all_ipv4_ips[loop.index-1]}}={{all_racks[loop.index-1]}}
-{% endif %}
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
index 2b88bf0..4baca5c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
@@ -1265,25 +1265,6 @@ public class AmbariMetaInfoTest {
     }
   }
 
-
-  @Test
-  public void testHooksDirInheritance() throws Exception {
-    String hookAssertionTemplate = "HDP/%s/hooks";
-    if (System.getProperty("os.name").contains("Windows")) {
-      hookAssertionTemplate = "HDP\\%s\\hooks";
-    }
-    // Test hook dir determination in parent
-    StackInfo stackInfo = metaInfo.getStack(STACK_NAME_HDP, "2.0.6");
-    Assert.assertEquals(String.format(hookAssertionTemplate, "2.0.6"), stackInfo.getStackHooksFolder());
-    // Test hook dir inheritance
-    stackInfo = metaInfo.getStack(STACK_NAME_HDP, "2.0.7");
-    Assert.assertEquals(String.format(hookAssertionTemplate, "2.0.6"), stackInfo.getStackHooksFolder());
-    // Test hook dir override
-    stackInfo = metaInfo.getStack(STACK_NAME_HDP, "2.0.8");
-    Assert.assertEquals(String.format(hookAssertionTemplate, "2.0.8"), stackInfo.getStackHooksFolder());
-  }
-
-
   @Test
   public void testServicePackageDirInheritance() throws Exception {
     String assertionTemplate07 = StringUtils.join(

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/test/python/TestResourceFilesKeeper.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/TestResourceFilesKeeper.py b/ambari-server/src/test/python/TestResourceFilesKeeper.py
index 4f8bdd5..d5d1287 100644
--- a/ambari-server/src/test/python/TestResourceFilesKeeper.py
+++ b/ambari-server/src/test/python/TestResourceFilesKeeper.py
@@ -85,6 +85,7 @@ class TestResourceFilesKeeper(TestCase):
       "call('../resources/TestAmbaryServer.samples/" \
       "dummy_common_services/HIVE/0.11.0.2.0.5.0/package'),\n " \
       "call('../resources/TestAmbaryServer.samples/dummy_extension/HIVE/package'),\n " \
+      "call('../resources/stack-hooks'),\n " \
       "call('../resources/custom_actions'),\n " \
       "call('../resources/host_scripts'),\n " \
       "call('../resources/dashboards')]"

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/test/python/stacks/2.0.6/hooks/after-INSTALL/test_after_install.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/hooks/after-INSTALL/test_after_install.py b/ambari-server/src/test/python/stacks/2.0.6/hooks/after-INSTALL/test_after_install.py
index 3d2d4d3..d792192 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/hooks/after-INSTALL/test_after_install.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/hooks/after-INSTALL/test_after_install.py
@@ -29,7 +29,7 @@ from resource_management.libraries.script import Script
 @patch("os.path.isfile", new = MagicMock(return_value=False))
 class TestHookAfterInstall(RMFTestCase):
   CONFIG_OVERRIDES = {"serviceName":"HIVE", "role":"HIVE_SERVER"}
-
+  STACK_VERSION = '2.0.6'
   def setUp(self):
     Logger.initialize_logger()
 
@@ -41,10 +41,12 @@ class TestHookAfterInstall(RMFTestCase):
 
   def test_hook_default(self):
 
-    self.executeScript("2.0.6/hooks/after-INSTALL/scripts/hook.py",
+    self.executeScript("after-INSTALL/scripts/hook.py",
                        classname="AfterInstallHook",
                        command="hook",
                        config_file="default.json",
+                       stack_version = self.STACK_VERSION,
+                       target=RMFTestCase.TARGET_STACK_HOOKS,
                        config_overrides = self.CONFIG_OVERRIDES
     )
     self.assertResourceCalled('XmlConfig', 'core-site.xml',
@@ -82,9 +84,11 @@ class TestHookAfterInstall(RMFTestCase):
     json_content['commandParams']['version'] = version
     json_content['hostLevelParams']['stack_version'] = "2.3"
 
-    self.executeScript("2.0.6/hooks/after-INSTALL/scripts/hook.py",
+    self.executeScript("after-INSTALL/scripts/hook.py",
                        classname="AfterInstallHook",
                        command="hook",
+                       stack_version = self.STACK_VERSION,
+                       target=RMFTestCase.TARGET_STACK_HOOKS,
                        config_dict = json_content,
                        config_overrides = self.CONFIG_OVERRIDES)
 
@@ -156,9 +160,11 @@ class TestHookAfterInstall(RMFTestCase):
     json_content['commandParams']['version'] = version
     json_content['hostLevelParams']['stack_version'] = "2.3"
 
-    self.executeScript("2.0.6/hooks/after-INSTALL/scripts/hook.py",
+    self.executeScript("after-INSTALL/scripts/hook.py",
                        classname="AfterInstallHook",
                        command="hook",
+                       stack_version = self.STACK_VERSION,
+                       target=RMFTestCase.TARGET_STACK_HOOKS,
                        config_dict = json_content,
                        config_overrides = self.CONFIG_OVERRIDES)
 
@@ -235,9 +241,11 @@ class TestHookAfterInstall(RMFTestCase):
     json_content['commandParams']['version'] = version
     json_content['hostLevelParams']['stack_version'] = "2.3"
 
-    self.executeScript("2.0.6/hooks/after-INSTALL/scripts/hook.py",
+    self.executeScript("after-INSTALL/scripts/hook.py",
       classname="AfterInstallHook",
       command="hook",
+      stack_version = self.STACK_VERSION,
+      target=RMFTestCase.TARGET_STACK_HOOKS,
       config_dict = json_content,
       config_overrides = self.CONFIG_OVERRIDES)
 
@@ -265,9 +273,11 @@ class TestHookAfterInstall(RMFTestCase):
     json_content['hostLevelParams']['stack_version'] = "2.3"
     json_content['roleParams']['upgrade_suspended'] = "true"
 
-    self.executeScript("2.0.6/hooks/after-INSTALL/scripts/hook.py",
+    self.executeScript("after-INSTALL/scripts/hook.py",
                        classname="AfterInstallHook",
                        command="hook",
+                       stack_version = self.STACK_VERSION,
+                       target=RMFTestCase.TARGET_STACK_HOOKS,
                        config_dict = json_content,
                        config_overrides = self.CONFIG_OVERRIDES)
 
@@ -338,9 +348,11 @@ class TestHookAfterInstall(RMFTestCase):
     json_content['hostLevelParams']['stack_version'] = "2.3"
     json_content['hostLevelParams']['host_sys_prepped'] = "true"
 
-    self.executeScript("2.0.6/hooks/after-INSTALL/scripts/hook.py",
+    self.executeScript("after-INSTALL/scripts/hook.py",
                        classname="AfterInstallHook",
                        command="hook",
+                       stack_version = self.STACK_VERSION,
+                       target=RMFTestCase.TARGET_STACK_HOOKS,
                        config_dict = json_content,
                        config_overrides = self.CONFIG_OVERRIDES)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/test/python/stacks/2.0.6/hooks/before-ANY/test_before_any.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/hooks/before-ANY/test_before_any.py b/ambari-server/src/test/python/stacks/2.0.6/hooks/before-ANY/test_before_any.py
index 73828e8..fd69f73 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/hooks/before-ANY/test_before_any.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/hooks/before-ANY/test_before_any.py
@@ -28,6 +28,7 @@ import os
 @patch.object(Hook, "run_custom_hook", new = MagicMock())
 class TestHookBeforeInstall(RMFTestCase):
   TMP_PATH = '/tmp/hbase-hbase'
+  STACK_VERSION = '2.0.6'
 
   @patch("os.path.isfile")
   @patch.object(getpass, "getuser", new = MagicMock(return_value='some_user'))
@@ -43,9 +44,11 @@ class TestHookBeforeInstall(RMFTestCase):
     os_path_exists_mock.side_effect = side_effect
     os_path_isfile_mock.side_effect = [False, True, True, True, True]
 
-    self.executeScript("2.0.6/hooks/before-ANY/scripts/hook.py",
+    self.executeScript("before-ANY/scripts/hook.py",
                        classname="BeforeAnyHook",
                        command="hook",
+                       stack_version = self.STACK_VERSION,
+                       target=RMFTestCase.TARGET_STACK_HOOKS,
                        config_file="default.json",
                        call_mocks=itertools.cycle([(0, "1000")])
     )

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/test/python/stacks/2.0.6/hooks/before-INSTALL/test_before_install.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/hooks/before-INSTALL/test_before_install.py b/ambari-server/src/test/python/stacks/2.0.6/hooks/before-INSTALL/test_before_install.py
index 4ef4cc4..f55321f 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/hooks/before-INSTALL/test_before_install.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/hooks/before-INSTALL/test_before_install.py
@@ -27,9 +27,13 @@ import json
 @patch.object(getpass, "getuser", new = MagicMock(return_value='some_user'))
 @patch.object(Hook, "run_custom_hook", new = MagicMock())
 class TestHookBeforeInstall(RMFTestCase):
+  STACK_VERSION = '2.0.6'
+
   def test_hook_default(self):
-    self.executeScript("2.0.6/hooks/before-INSTALL/scripts/hook.py",
+    self.executeScript("before-INSTALL/scripts/hook.py",
                        classname="BeforeInstallHook",
+                       stack_version = self.STACK_VERSION,
+                       target=RMFTestCase.TARGET_STACK_HOOKS,
                        command="hook",
                        config_file="default.json"
     )
@@ -63,9 +67,11 @@ class TestHookBeforeInstall(RMFTestCase):
 
     command_json['hostLevelParams']['repo_info'] = "[]"
 
-    self.executeScript("2.0.6/hooks/before-INSTALL/scripts/hook.py",
+    self.executeScript("before-INSTALL/scripts/hook.py",
                        classname="BeforeInstallHook",
                        command="hook",
+                       stack_version = self.STACK_VERSION,
+                       target=RMFTestCase.TARGET_STACK_HOOKS,
                        config_dict=command_json)
 
     self.assertResourceCalled('Package', 'unzip', retry_count=5, retry_on_repo_unavailability=False)
@@ -75,9 +81,11 @@ class TestHookBeforeInstall(RMFTestCase):
 
 
   def test_hook_default_repository_file(self):
-    self.executeScript("2.0.6/hooks/before-INSTALL/scripts/hook.py",
+    self.executeScript("before-INSTALL/scripts/hook.py",
                        classname="BeforeInstallHook",
                        command="hook",
+                       stack_version = self.STACK_VERSION,
+                       target=RMFTestCase.TARGET_STACK_HOOKS,
                        config_file="repository_file.json"
     )
     self.assertResourceCalled('Repository', 'HDP-2.2-repo-4',

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/test/python/stacks/2.0.6/hooks/before-START/test_before_start.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/hooks/before-START/test_before_start.py b/ambari-server/src/test/python/stacks/2.0.6/hooks/before-START/test_before_start.py
index 510dc41..8e20d17 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/hooks/before-START/test_before_start.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/hooks/before-START/test_before_start.py
@@ -28,9 +28,12 @@ import json
 @patch("os.path.exists", new = MagicMock(return_value=True))
 @patch.object(Hook, "run_custom_hook", new = MagicMock())
 class TestHookBeforeStart(RMFTestCase):
+  STACK_VERSION = '2.0.6'
   def test_hook_default(self):
-    self.executeScript("2.0.6/hooks/before-START/scripts/hook.py",
+    self.executeScript("before-START/scripts/hook.py",
                        classname="BeforeStartHook",
+                       stack_version = self.STACK_VERSION,
+                       target=RMFTestCase.TARGET_STACK_HOOKS,
                        command="hook",
                        config_file="default.json"
     )
@@ -104,8 +107,10 @@ class TestHookBeforeStart(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_hook_secured(self):
-    self.executeScript("2.0.6/hooks/before-START/scripts/hook.py",
+    self.executeScript("before-START/scripts/hook.py",
                        classname="BeforeStartHook",
+                       stack_version = self.STACK_VERSION,
+                       target=RMFTestCase.TARGET_STACK_HOOKS,
                        command="hook",
                        config_file="secured.json"
     )
@@ -184,8 +189,10 @@ class TestHookBeforeStart(RMFTestCase):
       default_json = json.load(f)
 
     default_json['serviceName']= 'HDFS'
-    self.executeScript("2.0.6/hooks/before-START/scripts/hook.py",
+    self.executeScript("before-START/scripts/hook.py",
                        classname="BeforeStartHook",
+                       stack_version = self.STACK_VERSION,
+                       target=RMFTestCase.TARGET_STACK_HOOKS,
                        command="hook",
                        config_dict=default_json
     )
@@ -266,8 +273,10 @@ class TestHookBeforeStart(RMFTestCase):
     default_json['serviceName'] = 'HDFS'
     default_json['configurations']['core-site']['net.topology.script.file.name'] = '/home/myhadoop/hadoop/conf.hadoop/topology_script.py'
 
-    self.executeScript("2.0.6/hooks/before-START/scripts/hook.py",
+    self.executeScript("before-START/scripts/hook.py",
                        classname="BeforeStartHook",
+                       stack_version = self.STACK_VERSION,
+                       target=RMFTestCase.TARGET_STACK_HOOKS,
                        command="hook",
                        config_dict=default_json
     )
@@ -342,8 +351,10 @@ class TestHookBeforeStart(RMFTestCase):
 
   def test_that_jce_is_required_in_secured_cluster(self):
     try:
-      self.executeScript("2.0.6/hooks/before-START/scripts/hook.py",
+      self.executeScript("before-START/scripts/hook.py",
                          classname="BeforeStartHook",
+                         stack_version = self.STACK_VERSION,
+                         target=RMFTestCase.TARGET_STACK_HOOKS,
                          command="hook",
                          config_file="secured_no_jce_name.json"
       )

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
index 81ac262..d98e0b1 100644
--- a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
+++ b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
@@ -43,6 +43,7 @@ PATH_TO_STACKS = "main/resources/stacks/HDP"
 PATH_TO_STACK_TESTS = "test/python/stacks/"
 
 PATH_TO_COMMON_SERVICES = "main/resources/common-services"
+PATH_TO_STACK_HOOKS = "main/resources/stack-hooks"
 
 PATH_TO_CUSTOM_ACTIONS = "main/resources/custom_actions"
 PATH_TO_CUSTOM_ACTION_TESTS = "test/python/custom_actions"
@@ -62,6 +63,9 @@ class RMFTestCase(TestCase):
   # build all paths to test common services scripts
   TARGET_COMMON_SERVICES = 'TARGET_COMMON_SERVICES'
 
+  # build all paths to test common services scripts
+  TARGET_STACK_HOOKS = 'TARGET_STACK_HOOKS'
+
   def executeScript(self, path, classname=None, command=None, config_file=None,
                     config_dict=None,
                     # common mocks for all the scripts
@@ -195,6 +199,10 @@ class RMFTestCase(TestCase):
       base_path = os.path.join(src_dir, PATH_TO_COMMON_SERVICES)
       configs_path = os.path.join(src_dir, PATH_TO_STACK_TESTS, stack_version, "configs")
       return base_path, configs_path
+    elif target == self.TARGET_STACK_HOOKS:
+      base_path = os.path.join(src_dir, PATH_TO_STACK_HOOKS)
+      configs_path = os.path.join(src_dir, PATH_TO_STACK_TESTS, stack_version, "configs")
+      return base_path, configs_path
     else:
       raise RuntimeError("Wrong target value %s", target)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/contrib/management-packs/hdf-ambari-mpack/src/main/assemblies/hdf-ambari-mpack.xml
----------------------------------------------------------------------
diff --git a/contrib/management-packs/hdf-ambari-mpack/src/main/assemblies/hdf-ambari-mpack.xml b/contrib/management-packs/hdf-ambari-mpack/src/main/assemblies/hdf-ambari-mpack.xml
index 2df8075..033e95f 100644
--- a/contrib/management-packs/hdf-ambari-mpack/src/main/assemblies/hdf-ambari-mpack.xml
+++ b/contrib/management-packs/hdf-ambari-mpack/src/main/assemblies/hdf-ambari-mpack.xml
@@ -40,6 +40,7 @@
   -->
   <fileSets>
     <fileSet>
+      <!--TODO-->
       <directory>src/main/resources/hooks</directory>
       <outputDirectory>hooks</outputDirectory>
     </fileSet>


[39/50] [abbrv] ambari git commit: AMBARI-22157. Web Client Should Never List Any Upgrade Actions On Only Stack In System (alexantonenko)

Posted by jl...@apache.org.
AMBARI-22157. Web Client Should Never List Any Upgrade Actions On Only Stack In System (alexantonenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/fab2aa3c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/fab2aa3c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/fab2aa3c

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: fab2aa3c62e3dbe5cce795ca50c4b61baee1d19e
Parents: 7172655
Author: Alex Antonenko <aa...@hortonworks.com>
Authored: Fri Oct 6 16:35:29 2017 +0300
Committer: Alex Antonenko <aa...@hortonworks.com>
Committed: Fri Oct 6 16:35:29 2017 +0300

----------------------------------------------------------------------
 .../stack_upgrade/upgrade_version_box_view.js   | 13 ++++--
 .../upgrade_version_box_view_test.js            | 44 ++++++++++++++++++--
 2 files changed, 49 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/fab2aa3c/ambari-web/app/views/main/admin/stack_upgrade/upgrade_version_box_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/admin/stack_upgrade/upgrade_version_box_view.js b/ambari-web/app/views/main/admin/stack_upgrade/upgrade_version_box_view.js
index 28f4f32..355ad88 100644
--- a/ambari-web/app/views/main/admin/stack_upgrade/upgrade_version_box_view.js
+++ b/ambari-web/app/views/main/admin/stack_upgrade/upgrade_version_box_view.js
@@ -199,7 +199,8 @@ App.UpgradeVersionBoxView = Em.View.extend({
     'isUpgrading',
     'controller.requestInProgress',
     'controller.requestInProgressRepoId',
-    'parentView.repoVersions.@each.status'
+    'parentView.repoVersions.@each.status',
+    'isCurrentStackPresent'
   ),
 
   /**
@@ -208,6 +209,7 @@ App.UpgradeVersionBoxView = Em.View.extend({
    */
   isDisabledOnInit: function() {
     return  this.get('controller.requestInProgress') ||
+            !this.get('isCurrentStackPresent') ||
             !this.get('content.isCompatible') ||
             (App.get('upgradeIsRunning') && !App.get('upgradeSuspended')) ||
             this.get('parentView.repoVersions').someProperty('status', 'INSTALLING');
@@ -312,8 +314,6 @@ App.UpgradeVersionBoxView = Em.View.extend({
             });
           }
 
-
-
       }
       element.set('isDisabled', isDisabled);
     }
@@ -357,7 +357,8 @@ App.UpgradeVersionBoxView = Em.View.extend({
    * @returns {boolean}
    */
   isDisabledOnInstalled: function() {
-    return !App.isAuthorized('CLUSTER.UPGRADE_DOWNGRADE_STACK') ||
+    return !this.get('isCurrentStackPresent') ||
+      !App.isAuthorized('CLUSTER.UPGRADE_DOWNGRADE_STACK') ||
       this.get('controller.requestInProgress') ||
       this.get('parentView.repoVersions').someProperty('status', 'INSTALLING') ||
       (this.get('controller.isDowngrade') &&
@@ -392,6 +393,10 @@ App.UpgradeVersionBoxView = Em.View.extend({
     $('.out-of-sync-badge').tooltip('destroy');
   },
 
+  isCurrentStackPresent: Ember.computed('parentView.repoVersions.@each.stackVersion.state', function () {
+    return this.get('parentView.repoVersions').someProperty('stackVersion.state', 'CURRENT');
+  }),
+
   /**
    * run custom action of controller
    */

http://git-wip-us.apache.org/repos/asf/ambari/blob/fab2aa3c/ambari-web/test/views/main/admin/stack_upgrade/upgrade_version_box_view_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/admin/stack_upgrade/upgrade_version_box_view_test.js b/ambari-web/test/views/main/admin/stack_upgrade/upgrade_version_box_view_test.js
index 506fb81..9bee7af 100644
--- a/ambari-web/test/views/main/admin/stack_upgrade/upgrade_version_box_view_test.js
+++ b/ambari-web/test/views/main/admin/stack_upgrade/upgrade_version_box_view_test.js
@@ -29,6 +29,7 @@ describe('App.UpgradeVersionBoxView', function () {
     sinon.stub(App.db, 'getFilterConditions', function () {return [];});
     view = App.UpgradeVersionBoxView.create({
       initFilters: Em.K,
+      isCurrentStackPresent: true,
       controller: Em.Object.create({
         upgrade: Em.K,
         getRepoVersionInstallId: Em.K,
@@ -1103,6 +1104,7 @@ describe('App.UpgradeVersionBoxView', function () {
         upgradeSuspended: true,
         status: 'INSTALLED',
         isCompatible: true,
+        isCurrentStackPresent: true,
         expected: true
       },
       {
@@ -1111,6 +1113,7 @@ describe('App.UpgradeVersionBoxView', function () {
         upgradeSuspended: false,
         status: 'INSTALLED',
         isCompatible: true,
+        isCurrentStackPresent: true,
         expected: true
       },
       {
@@ -1119,6 +1122,7 @@ describe('App.UpgradeVersionBoxView', function () {
         upgradeSuspended: false,
         status: 'INSTALLING',
         isCompatible: true,
+        isCurrentStackPresent: true,
         expected: true
       },
       {
@@ -1127,6 +1131,7 @@ describe('App.UpgradeVersionBoxView', function () {
         upgradeSuspended: true,
         status: 'INSTALLED',
         isCompatible: false,
+        isCurrentStackPresent: true,
         expected: true
       },
       {
@@ -1135,6 +1140,7 @@ describe('App.UpgradeVersionBoxView', function () {
         upgradeSuspended: true,
         status: 'INSTALLED',
         isCompatible: true,
+        isCurrentStackPresent: true,
         expected: false
       },
       {
@@ -1143,7 +1149,17 @@ describe('App.UpgradeVersionBoxView', function () {
         upgradeSuspended: false,
         status: 'INSTALLED',
         isCompatible: true,
+        isCurrentStackPresent: true,
         expected: false
+      },
+      {
+        requestInProgress: false,
+        upgradeIsRunning: false,
+        upgradeSuspended: false,
+        status: 'INSTALLED',
+        isCompatible: true,
+        isCurrentStackPresent: false,
+        expected: true
       }
     ];
 
@@ -1156,16 +1172,18 @@ describe('App.UpgradeVersionBoxView', function () {
     });
 
     testCases.forEach(function(test) {
-      it("requestInProgress: " + test.requestInProgress +
+      it(" requestInProgress: " + test.requestInProgress +
          " upgradeIsRunning: " + test.upgradeIsRunning +
          " upgradeSuspended: " + test.upgradeSuspended +
-         " status" + test.status +
-         " isCompatible" + test.isCompatible, function() {
+         " status: " + test.status +
+         " isCompatible: " + test.isCompatible +
+         " isCurrentStackPresent: " + test.isCurrentStackPresent, function() {
         this.mock.withArgs('upgradeSuspended').returns(test.upgradeSuspended);
         this.mock.withArgs('upgradeIsRunning').returns(test.upgradeIsRunning);
         view.set('parentView.repoVersions', [Em.Object.create({
           status: test.status
         })]);
+        view.set('isCurrentStackPresent', test.isCurrentStackPresent)
         view.set('controller.requestInProgress', test.requestInProgress);
         view.set('content.isCompatible', test.isCompatible);
         expect(view.isDisabledOnInit()).to.be.equal(test.expected);
@@ -1191,6 +1209,7 @@ describe('App.UpgradeVersionBoxView', function () {
         isDowngrade: false,
         repositoryName: 'HDP-2.2',
         upgradeVersion: 'HDP-2.3',
+        isCurrentStackPresent: true,
         expected: true
       },
       {
@@ -1200,6 +1219,7 @@ describe('App.UpgradeVersionBoxView', function () {
         isDowngrade: false,
         repositoryName: 'HDP-2.2',
         upgradeVersion: 'HDP-2.3',
+        isCurrentStackPresent: true,
         expected: true
       },
       {
@@ -1209,6 +1229,7 @@ describe('App.UpgradeVersionBoxView', function () {
         isDowngrade: false,
         repositoryName: 'HDP-2.2',
         upgradeVersion: 'HDP-2.3',
+        isCurrentStackPresent: true,
         expected: true
       },
       {
@@ -1218,6 +1239,7 @@ describe('App.UpgradeVersionBoxView', function () {
         isDowngrade: true,
         repositoryName: 'HDP-2.2',
         upgradeVersion: 'HDP-2.2',
+        isCurrentStackPresent: true,
         expected: true
       },
       {
@@ -1227,6 +1249,7 @@ describe('App.UpgradeVersionBoxView', function () {
         isDowngrade: true,
         repositoryName: 'HDP-2.2',
         upgradeVersion: 'HDP-2.3',
+        isCurrentStackPresent: true,
         expected: false
       },
       {
@@ -1236,7 +1259,18 @@ describe('App.UpgradeVersionBoxView', function () {
         isDowngrade: false,
         repositoryName: 'HDP-2.2',
         upgradeVersion: 'HDP-2.2',
+        isCurrentStackPresent: true,
         expected: false
+      },
+      {
+        isAuthorized: true,
+        requestInProgress: false,
+        status: 'INSTALLED',
+        isDowngrade: false,
+        repositoryName: 'HDP-2.2',
+        upgradeVersion: 'HDP-2.2',
+        isCurrentStackPresent: false,
+        expected: true
       }
     ];
 
@@ -1246,13 +1280,15 @@ describe('App.UpgradeVersionBoxView', function () {
           "status: " + test.status +
           "isDowngrade: " + test.isDowngrade +
           "repositoryName: " + test.repositoryName +
-          "upgradeVersion: " + test.upgradeVersion, function() {
+          "upgradeVersion: " + test.upgradeVersion +
+          "isCurrentStackPresent: " + test.isCurrentStackPresent, function() {
         this.authorizedMock.returns(test.isAuthorized);
         view.set('controller.requestInProgress', test.requestInProgress);
         view.set('parentView.repoVersions', [Em.Object.create({status: test.status})]);
         view.set('controller.isDowngrade', test.isDowngrade);
         view.set('controller.currentVersion.repository_name', test.repositoryName);
         view.set('controller.upgradeVersion', test.upgradeVersion);
+        view.set('isCurrentStackPresent', test.isCurrentStackPresent);
         expect(view.isDisabledOnInstalled()).to.be.equal(test.expected);
       });
     });


[20/50] [abbrv] ambari git commit: AMBARI-22132 Reconfigure spacing of widgets view. (atkach)

Posted by jl...@apache.org.
AMBARI-22132 Reconfigure spacing of widgets view. (atkach)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/3e6aa878
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/3e6aa878
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/3e6aa878

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 3e6aa87887932dbfe71dc24a1255320f1ab7bbd8
Parents: 8e7654a
Author: Andrii Tkach <at...@apache.org>
Authored: Wed Oct 4 19:24:58 2017 +0300
Committer: Andrii Tkach <at...@apache.org>
Committed: Wed Oct 4 19:27:17 2017 +0300

----------------------------------------------------------------------
 ambari-web/app/styles/alerts.less               |  8 +++----
 ambari-web/app/styles/dashboard.less            | 22 ++++++++++++++++----
 .../app/templates/main/service/info/summary.hbs |  5 ++---
 .../service/info/summary/master_components.hbs  |  4 ----
 4 files changed, 24 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/3e6aa878/ambari-web/app/styles/alerts.less
----------------------------------------------------------------------
diff --git a/ambari-web/app/styles/alerts.less b/ambari-web/app/styles/alerts.less
index 775c0ef..a1d3468 100644
--- a/ambari-web/app/styles/alerts.less
+++ b/ambari-web/app/styles/alerts.less
@@ -397,16 +397,16 @@
 
 .summary-value {
   .alerts-crit-count, .alerts-warn-count, .no-alerts-label {
-    padding: 2px 3px 2px 3px;
+    padding: 3px 0;
     font-size: 9px;
     border-radius: 50%;
-    width: 13px;
-    height: 13px;
+    width: 15px;
+    height: 15px;
     color: #ffffff;
     text-align: center;
     display: inline-block;
     position: relative;
-    top: -2px;
+    top: -3px;
   }
 }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/3e6aa878/ambari-web/app/styles/dashboard.less
----------------------------------------------------------------------
diff --git a/ambari-web/app/styles/dashboard.less b/ambari-web/app/styles/dashboard.less
index f413caa..02835bd 100644
--- a/ambari-web/app/styles/dashboard.less
+++ b/ambari-web/app/styles/dashboard.less
@@ -30,7 +30,7 @@
   .dashboard-widgets-box {
     clear: both;
     display: inline-block;
-    padding: 10px;
+    padding: 10px 1.1% 10px 1.1%;
     background-color: #ffffff;
   }
   #widgets-options-menu {
@@ -351,11 +351,14 @@
 
   #dashboard-widgets-container {
 
+    .dashboard-widgets-box {
+      padding: 10px 1.3% 10px 1.3%;
+    }
+
     #dashboard-widgets {
       .span2p4 {
-        float: left;
-        width: 22.3%;
-        *width: 22.3%;
+        width: 23.2%;
+        *width: 23.2%;
       }
       .img-thumbnail {
         .caption {
@@ -373,4 +376,15 @@
     }
   }
 
+}
+
+@media (min-width: 1500px) {
+  #dashboard-widgets-container {
+    #dashboard-widgets {
+      .span2p4 {
+        width: 23.4%;
+        *width: 23.4%;
+      }
+    }
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3e6aa878/ambari-web/app/templates/main/service/info/summary.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/service/info/summary.hbs b/ambari-web/app/templates/main/service/info/summary.hbs
index 789a6fc..b1b0a42 100644
--- a/ambari-web/app/templates/main/service/info/summary.hbs
+++ b/ambari-web/app/templates/main/service/info/summary.hbs
@@ -60,9 +60,8 @@
                 <i class="glyphicon glyphicon-bell"></i>
                 {{#if view.alertsCount}}
                   <span {{bindAttr class=":label view.hasCriticalAlerts:alerts-crit-count:alerts-warn-count"}}>
-                    {{view.alertsCount}}</span>
-                {{else}}
-                  <span class="label no-alerts-label">{{t services.service.summary.alerts.noAlerts}}</span>
+                    {{view.alertsCount}}
+                  </span>
                 {{/if}}
               </span>
             {{/if}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3e6aa878/ambari-web/app/templates/main/service/info/summary/master_components.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/service/info/summary/master_components.hbs b/ambari-web/app/templates/main/service/info/summary/master_components.hbs
index 8126d1f..8925d87 100644
--- a/ambari-web/app/templates/main/service/info/summary/master_components.hbs
+++ b/ambari-web/app/templates/main/service/info/summary/master_components.hbs
@@ -25,10 +25,6 @@
         <span {{action "showServiceAlertsPopup" comp target="controller"}} {{bindAttr class=":label comp.hasCriticalAlerts:alerts-crit-count:alerts-warn-count"}}>
           {{comp.alertsCount}}
         </span>
-      {{else}}
-        <span {{action "showServiceAlertsPopup" comp target="controller"}} class="label no-alerts-label">
-          {{t services.service.summary.alerts.noAlerts}}
-        </span>
       {{/if}}
     </div>
     <div {{bindAttr class=":summary-label comp.summaryLabelClassName"}}>


[18/50] [abbrv] ambari git commit: AMBARI-22129 Log Search UI: reorganize classes structure. (ababiichuk)

Posted by jl...@apache.org.
AMBARI-22129 Log Search UI: reorganize classes structure. (ababiichuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b4966c10
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b4966c10
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b4966c10

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: b4966c10b3369264c07a1daa934a4f872379cdb7
Parents: de981ca
Author: ababiichuk <ab...@hortonworks.com>
Authored: Wed Oct 4 15:41:26 2017 +0300
Committer: ababiichuk <ab...@hortonworks.com>
Committed: Wed Oct 4 17:09:13 2017 +0300

----------------------------------------------------------------------
 .../classes/active-service-log-entry.class.ts   |  23 --
 .../src/app/classes/active-service-log-entry.ts |  23 ++
 .../src/app/classes/histogram-options.class.ts  |  36 ---
 .../src/app/classes/histogram-options.ts        |  36 +++
 .../src/app/classes/list-item.class.ts          |  26 ---
 .../src/app/classes/list-item.ts                |  26 +++
 .../src/app/classes/models/app-settings.ts      |  27 +++
 .../src/app/classes/models/app-state.ts         |  43 ++++
 .../src/app/classes/models/audit-log-field.ts   | 225 +++++++++++++++++++
 .../src/app/classes/models/audit-log.ts         |  46 ++++
 .../src/app/classes/models/bar-graph.ts         |  24 ++
 .../src/app/classes/models/common-entry.ts      |  22 ++
 .../src/app/classes/models/count.ts             |  22 ++
 .../src/app/classes/models/filter.ts            |  25 +++
 .../src/app/classes/models/graph.ts             |  23 ++
 .../src/app/classes/models/log-field.ts         |  27 +++
 .../src/app/classes/models/log.ts               |  38 ++++
 .../src/app/classes/models/node.ts              |  30 +++
 .../src/app/classes/models/service-log-field.ts | 107 +++++++++
 .../src/app/classes/models/service-log.ts       |  27 +++
 .../app/classes/models/solr-collection-state.ts |  23 ++
 .../src/app/classes/models/store.ts             | 180 +++++++++++++++
 .../src/app/classes/models/user-config.ts       |  26 +++
 .../queries/audit-logs-query-params.class.ts    |  46 ----
 .../classes/queries/audit-logs-query-params.ts  |  46 ++++
 .../app/classes/queries/query-params.class.ts   |  23 --
 .../src/app/classes/queries/query-params.ts     |  23 ++
 ...ce-logs-histogram-query-params.class.spec.ts | 203 -----------------
 ...service-logs-histogram-query-params.class.ts |  70 ------
 .../service-logs-histogram-query-params.spec.ts | 203 +++++++++++++++++
 .../service-logs-histogram-query-params.ts      |  70 ++++++
 .../queries/service-logs-query-params.class.ts  |  30 ---
 .../queries/service-logs-query-params.ts        |  30 +++
 ...service-logs-truncated-query-params.class.ts |  36 ---
 .../service-logs-truncated-query-params.ts      |  36 +++
 .../classes/service-log-context-entry.class.ts  |  26 ---
 .../app/classes/service-log-context-entry.ts    |  26 +++
 .../dropdown-button.component.ts                |   2 +-
 .../dropdown-list/dropdown-list.component.ts    |   2 +-
 .../filter-button/filter-button.component.ts    |   2 +-
 .../filters-panel/filters-panel.component.ts    |   4 +-
 .../log-context/log-context.component.ts        |   4 +-
 .../logs-container/logs-container.component.ts  |  10 +-
 .../components/logs-list/logs-list.component.ts |   4 +-
 .../main-container/main-container.component.ts  |   6 +-
 .../menu-button/menu-button.component.ts        |   2 +-
 .../search-box/search-box.component.ts          |   2 +-
 .../time-histogram/time-histogram.component.ts  |   2 +-
 .../src/app/models/app-settings.model.ts        |  27 ---
 .../src/app/models/app-state.model.ts           |  43 ----
 .../src/app/models/audit-log-field.model.ts     | 225 -------------------
 .../src/app/models/audit-log.model.ts           |  46 ----
 .../src/app/models/bar-graph.model.ts           |  24 --
 .../src/app/models/common-entry.model.ts        |  22 --
 .../src/app/models/count.model.ts               |  22 --
 .../src/app/models/filter.model.ts              |  25 ---
 .../src/app/models/graph.model.ts               |  23 --
 .../src/app/models/log-field.model.ts           |  27 ---
 .../src/app/models/log.model.ts                 |  38 ----
 .../src/app/models/node.model.ts                |  30 ---
 .../src/app/models/service-log-field.model.ts   | 107 ---------
 .../src/app/models/service-log.model.ts         |  27 ---
 .../app/models/solr-collection-state.model.ts   |  23 --
 .../src/app/models/store.model.ts               | 180 ---------------
 .../src/app/models/user-config.model.ts         |  26 ---
 .../app/services/component-actions.service.ts   |   4 +-
 .../src/app/services/filtering.service.spec.ts  |   4 +-
 .../src/app/services/filtering.service.ts       |   4 +-
 .../src/app/services/http-client.service.ts     |   8 +-
 .../src/app/services/logs-container.service.ts  |   2 +-
 .../services/storage/app-settings.service.ts    |   4 +-
 .../app/services/storage/app-state.service.ts   |   4 +-
 .../storage/audit-logs-fields.service.ts        |   2 +-
 .../app/services/storage/audit-logs.service.ts  |   2 +-
 .../app/services/storage/clusters.service.ts    |   2 +-
 .../app/services/storage/components.service.ts  |   2 +-
 .../src/app/services/storage/filters.service.ts |   2 +-
 .../src/app/services/storage/graphs.service.ts  |   2 +-
 .../src/app/services/storage/hosts.service.ts   |   2 +-
 .../storage/service-logs-fields.service.ts      |   2 +-
 .../service-logs-histogram-data.service.ts      |   2 +-
 .../storage/service-logs-truncated.service.ts   |   2 +-
 .../services/storage/service-logs.service.ts    |   2 +-
 .../services/storage/user-configs.service.ts    |   2 +-
 84 files changed, 1481 insertions(+), 1481 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/classes/active-service-log-entry.class.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/classes/active-service-log-entry.class.ts b/ambari-logsearch/ambari-logsearch-web/src/app/classes/active-service-log-entry.class.ts
deleted file mode 100644
index d3d7d95..0000000
--- a/ambari-logsearch/ambari-logsearch-web/src/app/classes/active-service-log-entry.class.ts
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-export interface ActiveServiceLogEntry {
-  id: string;
-  host_name: string;
-  component_name: string;
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/classes/active-service-log-entry.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/classes/active-service-log-entry.ts b/ambari-logsearch/ambari-logsearch-web/src/app/classes/active-service-log-entry.ts
new file mode 100644
index 0000000..d3d7d95
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/classes/active-service-log-entry.ts
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+export interface ActiveServiceLogEntry {
+  id: string;
+  host_name: string;
+  component_name: string;
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/classes/histogram-options.class.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/classes/histogram-options.class.ts b/ambari-logsearch/ambari-logsearch-web/src/app/classes/histogram-options.class.ts
deleted file mode 100644
index dee5d98..0000000
--- a/ambari-logsearch/ambari-logsearch-web/src/app/classes/histogram-options.class.ts
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-export interface HistogramMarginOptions {
-  top: number;
-  right: number;
-  bottom: number;
-  left: number;
-}
-
-export interface HistogramStyleOptions {
-  margin?: HistogramMarginOptions;
-  height?: number;
-  tickPadding?: number;
-  columnWidth?: number;
-  dragAreaColor?: string;
-}
-
-export interface HistogramOptions extends HistogramStyleOptions {
-  keysWithColors: {[key: string]: string};
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/classes/histogram-options.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/classes/histogram-options.ts b/ambari-logsearch/ambari-logsearch-web/src/app/classes/histogram-options.ts
new file mode 100644
index 0000000..dee5d98
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/classes/histogram-options.ts
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+export interface HistogramMarginOptions {
+  top: number;
+  right: number;
+  bottom: number;
+  left: number;
+}
+
+export interface HistogramStyleOptions {
+  margin?: HistogramMarginOptions;
+  height?: number;
+  tickPadding?: number;
+  columnWidth?: number;
+  dragAreaColor?: string;
+}
+
+export interface HistogramOptions extends HistogramStyleOptions {
+  keysWithColors: {[key: string]: string};
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/classes/list-item.class.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/classes/list-item.class.ts b/ambari-logsearch/ambari-logsearch-web/src/app/classes/list-item.class.ts
deleted file mode 100644
index 1aaaecc..0000000
--- a/ambari-logsearch/ambari-logsearch-web/src/app/classes/list-item.class.ts
+++ /dev/null
@@ -1,26 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-export interface ListItem {
-  id?: string;
-  label?: string;
-  value: any;
-  iconClass?: string;
-  isChecked?: boolean;
-  action?: string;
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/classes/list-item.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/classes/list-item.ts b/ambari-logsearch/ambari-logsearch-web/src/app/classes/list-item.ts
new file mode 100644
index 0000000..1aaaecc
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/classes/list-item.ts
@@ -0,0 +1,26 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+export interface ListItem {
+  id?: string;
+  label?: string;
+  value: any;
+  iconClass?: string;
+  isChecked?: boolean;
+  action?: string;
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/app-settings.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/app-settings.ts b/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/app-settings.ts
new file mode 100644
index 0000000..11821a3
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/app-settings.ts
@@ -0,0 +1,27 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import * as moment from 'moment-timezone';
+
+export interface AppSettings {
+  timeZone: string;
+}
+
+export const defaultSettings: AppSettings = {
+  timeZone: moment.tz.guess()
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/app-state.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/app-state.ts b/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/app-state.ts
new file mode 100644
index 0000000..beeb670
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/app-state.ts
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import {ActiveServiceLogEntry} from '@app/classes/active-service-log-entry';
+
+export interface AppState {
+  isAuthorized: boolean;
+  isInitialLoading: boolean;
+  isLoginInProgress: boolean;
+  isAuditLogsSet: boolean;
+  isServiceLogsSet: boolean;
+  activeLogsType?: string;
+  isServiceLogsFileView: boolean;
+  isServiceLogContextView: boolean;
+  activeLog: ActiveServiceLogEntry | null;
+}
+
+export const initialState: AppState = {
+  isAuthorized: false,
+  isInitialLoading: false,
+  isLoginInProgress: false,
+  isAuditLogsSet: false,
+  isServiceLogsSet: false,
+  activeLogsType: 'serviceLogs', // TODO implement setting the parameter depending on user's navigation
+  isServiceLogsFileView: false,
+  isServiceLogContextView: false,
+  activeLog: null
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/audit-log-field.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/audit-log-field.ts b/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/audit-log-field.ts
new file mode 100644
index 0000000..40ad27e
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/audit-log-field.ts
@@ -0,0 +1,225 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import {LogField} from '@app/classes/models/log-field';
+
+const columnsNamesMap = {
+  access: {
+    displayName: 'logs.accessType',
+    isDisplayed: true
+  },
+  action: {
+    displayName: 'logs.action'
+  },
+  agent: {
+    displayName: 'logs.agent'
+  },
+  agentHost: {
+    displayName: 'logs.agentHost'
+  },
+  authType: {
+    displayName: 'logs.authType'
+  },
+  bundle_id: {
+    displayName: 'logs.bundleId'
+  },
+  case_id: {
+    displayName: 'logs.caseId'
+  },
+  cliIP: {
+    displayName: 'logs.clientIp',
+    isDisplayed: true
+  },
+  cliType: {
+    displayName: 'logs.clientType'
+  },
+  cluster: {
+    displayName: 'logs.cluster'
+  },
+  dst: {
+    displayName: 'logs.dst'
+  },
+  evtTime: {
+    displayName: 'logs.eventTime',
+    isDisplayed: true
+  },
+  file: {
+    displayName: 'logs.file'
+  },
+  host: {
+    displayName: 'logs.host'
+  },
+  id: {
+    displayName: 'logs.id'
+  },
+  ip: {
+    displayName: 'logs.ip'
+  },
+  level: {
+    displayName: 'logs.level'
+  },
+  log_message: {
+    displayName: 'logs.message'
+  },
+  logType: {
+    displayName: 'logs.logType'
+  },
+  logfile_line_number: {
+    displayName: 'logs.logfileLineNumber'
+  },
+  logger_name: {
+    displayName: 'logs.loggerName'
+  },
+  logtime: {
+    displayName: 'logs.logTime'
+  },
+  path: {
+    displayName: 'logs.path'
+  },
+  perm: {
+    displayName: 'logs.perm'
+  },
+  policy: {
+    displayName: 'logs.policy'
+  },
+  proxyUsers: {
+    displayName: 'logs.proxyUsers'
+  },
+  reason: {
+    displayName: 'logs.reason'
+  },
+  repo: {
+    displayName: 'logs.repo',
+    isDisplayed: true
+  },
+  repoType: {
+    displayName: 'logs.repoType'
+  },
+  req_caller_id: {
+    displayName: 'logs.reqCallerId'
+  },
+  reqContext: {
+    displayName: 'logs.reqContext'
+  },
+  reqData: {
+    displayName: 'logs.reqData'
+  },
+  req_self_id: {
+    displayName: 'logs.reqSelfId'
+  },
+  resType: {
+    displayName: 'logs.resType'
+  },
+  resource: {
+    displayName: 'logs.resource',
+    isDisplayed: true
+  },
+  result: {
+    displayName: 'logs.result',
+    isDisplayed: true
+  },
+  sess: {
+    displayName: 'logs.session'
+  },
+  text: {
+    displayName: 'logs.text'
+  },
+  type: {
+    displayName: 'logs.type'
+  },
+  ugi: {
+    displayName: 'logs.ugi'
+  },
+  reqUser: {
+    displayName: 'logs.user',
+    isDisplayed: true
+  },
+  ws_base_url: {
+    displayName: 'logs.baseUrl'
+  },
+  ws_command: {
+    displayName: 'logs.command'
+  },
+  ws_component: {
+    displayName: 'logs.component'
+  },
+  ws_details: {
+    displayName: 'logs.details'
+  },
+  ws_display_name: {
+    displayName: 'logs.displayName'
+  },
+  ws_os: {
+    displayName: 'logs.os'
+  },
+  ws_repo_id: {
+    displayName: 'logs.repoId'
+  },
+  ws_repo_version: {
+    displayName: 'logs.repoVersion'
+  },
+  ws_repositories: {
+    displayName: 'logs.repositories'
+  },
+  ws_request_id: {
+    displayName: 'logs.requestId'
+  },
+  ws_result_status: {
+    displayName: 'logs.resultStatus'
+  },
+  ws_roles: {
+    displayName: 'logs.roles'
+  },
+  ws_stack_version: {
+    displayName: 'logs.stackVersion'
+  },
+  ws_stack: {
+    displayName: 'logs.stack'
+  },
+  ws_status: {
+    displayName: 'logs.status'
+  },
+  ws_task_id: {
+    displayName: 'logs.taskId'
+  },
+  ws_version_note: {
+    displayName: 'logs.versionNote'
+  },
+  ws_version_number: {
+    displayName: 'logs.versionNumber'
+  },
+  tags: {
+    isAvailable: false
+  },
+  tags_str: {
+    isAvailable: false
+  },
+  seq_num: {
+    isAvailable: false
+  }
+};
+
+export class AuditLogField extends LogField {
+  constructor(name: string) {
+    super(name);
+    const preset = columnsNamesMap[this.name];
+    if (preset) {
+      Object.assign(this, preset);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/audit-log.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/audit-log.ts b/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/audit-log.ts
new file mode 100644
index 0000000..fbe0e46
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/audit-log.ts
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import {Log} from '@app/classes/models/log';
+
+export interface AuditLog extends Log {
+  policy?: string;
+  reason?: string;
+  result: number;
+  text?: string;
+  tags?: string[];
+  resource?: string;
+  sess?: string;
+  access?: string;
+  logType: string;
+  tags_str?: string;
+  resType?: string;
+  reqUser: string;
+  reqData?: string;
+  repoType: number;
+  repo: string;
+  proxyUsers?: string[];
+  evtTime: string;
+  enforcer: string;
+  reqContext?: string;
+  cliType?: string;
+  cliIP?: string;
+  agent?: string;
+  agentHost?: string;
+  action?: string;
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/bar-graph.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/bar-graph.ts b/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/bar-graph.ts
new file mode 100644
index 0000000..d872bd0
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/bar-graph.ts
@@ -0,0 +1,24 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import {CommonEntry} from '@app/classes/models/common-entry';
+
+export interface BarGraph {
+  dataCount: CommonEntry[];
+  name: string;
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/common-entry.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/common-entry.ts b/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/common-entry.ts
new file mode 100644
index 0000000..dad82ab
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/common-entry.ts
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+export interface CommonEntry {
+  name: string;
+  value: string;
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/count.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/count.ts b/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/count.ts
new file mode 100644
index 0000000..02fc41c
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/count.ts
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+export interface Count {
+  name: string;
+  count: number;
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/filter.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/filter.ts b/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/filter.ts
new file mode 100644
index 0000000..c7ff662
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/filter.ts
@@ -0,0 +1,25 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+export interface Filter {
+  label: string;
+  hosts: string[];
+  defaultLevels: string[];
+  overrideLevels: string[];
+  expiryTime: string;
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/graph.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/graph.ts b/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/graph.ts
new file mode 100644
index 0000000..be31f19
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/graph.ts
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+export interface Graph {
+  name: string;
+  count: string;
+  dataList?: Graph[];
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/log-field.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/log-field.ts b/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/log-field.ts
new file mode 100644
index 0000000..0e738ab
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/log-field.ts
@@ -0,0 +1,27 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+export class LogField {
+  constructor(name: string) {
+    this.name = name;
+  }
+  name: string;
+  displayName: string = this.name;
+  isDisplayed: boolean = false;
+  isAvailable: boolean = true;
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/log.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/log.ts b/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/log.ts
new file mode 100644
index 0000000..c598e41
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/log.ts
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+export interface Log {
+  type: string;
+  _version_: number;
+  id: string;
+  file?: string;
+  seq_num: number;
+  bundle_id?: string;
+  case_id?: string;
+  log_message: string;
+  logfile_line_number: number;
+  line_number?: number;
+  message_md5: string;
+  cluster: string;
+  event_count: number;
+  event_md5: string;
+  event_dur_ms: number;
+  _ttl_: string;
+  _expire_at_: number;
+  _router_field_?: number;
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/node.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/node.ts b/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/node.ts
new file mode 100644
index 0000000..a14e51a
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/node.ts
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import {CommonEntry} from '@app/classes/models/common-entry';
+
+export interface Node {
+  name: string;
+  type?: string;
+  value: string;
+  isParent: boolean;
+  isRoot: boolean;
+  childs?: Node[];
+  logLevelCount?: CommonEntry[];
+  vNodeList?: CommonEntry[];
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/service-log-field.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/service-log-field.ts b/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/service-log-field.ts
new file mode 100644
index 0000000..0c1c23e
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/service-log-field.ts
@@ -0,0 +1,107 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import {LogField} from '@app/classes/models/log-field';
+
+const columnsNamesMap = {
+  log_message: {
+    displayName: 'logs.message',
+    isDisplayed: true
+  },
+  bundle_id: {
+    displayName: 'logs.bundleId'
+  },
+  case_id: {
+    displayName: 'logs.caseId'
+  },
+  cluster: {
+    displayName: 'logs.cluster'
+  },
+  event_count: {
+    displayName: 'logs.eventCount'
+  },
+  file: {
+    displayName: 'logs.file'
+  },
+  host: {
+    displayName: 'logs.host'
+  },
+  id: {
+    displayName: 'logs.id'
+  },
+  ip: {
+    displayName: 'logs.ip'
+  },
+  level: {
+    displayName: 'logs.level',
+    isDisplayed: true
+  },
+  line_number: {
+    displayName: 'logs.lineNumber'
+  },
+  logtype: {
+    displayName: 'logs.logType'
+  },
+  logfile_line_number: {
+    displayName: 'logs.logfileLineNumber'
+  },
+  logger_name: {
+    displayName: 'logs.loggerName'
+  },
+  logtime: {
+    isDisplayed: true
+  },
+  method: {
+    displayName: 'logs.method'
+  },
+  path: {
+    displayName: 'logs.path'
+  },
+  rowtype: {
+    displayName: 'logs.rowType'
+  },
+  thread_name: {
+    displayName: 'logs.threadName'
+  },
+  type: {
+    displayName: 'logs.type',
+    isDisplayed: true
+  },
+  tags: {
+    isAvailable: false
+  },
+  text: {
+    isAvailable: false
+  },
+  message: {
+    isAvailable: false
+  },
+  seq_num: {
+    isAvailable: false
+  }
+};
+
+export class ServiceLogField extends LogField {
+  constructor(name: string) {
+    super(name);
+    const preset = columnsNamesMap[this.name];
+    if (preset) {
+      Object.assign(this, preset);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/service-log.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/service-log.ts b/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/service-log.ts
new file mode 100644
index 0000000..2ac026c
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/service-log.ts
@@ -0,0 +1,27 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import {Log} from '@app/classes/models/log';
+
+export interface ServiceLog extends Log {
+  path: string;
+  host: string;
+  level: string;
+  logtime: number;
+  ip: string;
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/solr-collection-state.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/solr-collection-state.ts b/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/solr-collection-state.ts
new file mode 100644
index 0000000..0824dda
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/solr-collection-state.ts
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+export interface SolrCollectionState {
+  znodeReady: boolean;
+  configurationUploaded: boolean;
+  solrCollectionReady: boolean;
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/store.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/store.ts b/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/store.ts
new file mode 100644
index 0000000..c62d3ee
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/store.ts
@@ -0,0 +1,180 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import {Observable} from 'rxjs/Observable';
+import {Store, Action} from '@ngrx/store';
+import {AppSettings} from '@app/classes/models/app-settings';
+import {AppState} from '@app/classes/models/app-state';
+import {AuditLog} from '@app/classes/models/audit-log';
+import {ServiceLog} from '@app/classes/models/service-log';
+import {BarGraph} from '@app/classes/models/bar-graph';
+import {Graph} from '@app/classes/models/graph';
+import {Node} from '@app/classes/models/node';
+import {UserConfig} from '@app/classes/models/user-config';
+import {Filter} from '@app/classes/models/filter';
+import {AuditLogField} from '@app/classes/models/audit-log-field';
+import {ServiceLogField} from '@app/classes/models/service-log-field';
+
+export const storeActions = {
+  'ARRAY.ADD': 'ADD',
+  'ARRAY.ADD.START': 'ADD_TO_START',
+  'ARRAY.DELETE.PRIMITIVE': 'DELETE_PRIMITIVE',
+  'ARRAY.DELETE.OBJECT': 'DELETE_OBJECT',
+  'ARRAY.CLEAR': 'CLEAR',
+  'ARRAY.MAP': 'MAP',
+
+  'OBJECT.SET': 'SET'
+};
+
+export interface AppStore {
+  appSettings: AppSettings;
+  appState: AppState;
+  auditLogs: AuditLog[];
+  serviceLogs: ServiceLog[];
+  serviceLogsHistogramData: BarGraph[];
+  serviceLogsTruncated: ServiceLog[];
+  graphs: Graph[];
+  hosts: Node[];
+  userConfigs: UserConfig[];
+  filters: Filter[];
+  clusters: string[];
+  components: Node[];
+  serviceLogsFields: ServiceLogField[];
+  auditLogsFields: AuditLogField[];
+}
+
+export class ModelService {
+
+  constructor(modelName: string, store: Store<AppStore>) {
+    this.modelName = modelName;
+    this.store = store;
+  }
+
+  protected modelName: string;
+
+  protected store: Store<AppStore>;
+
+  getAll(): Observable<any> {
+    return this.store.select(this.modelName);
+  }
+
+}
+
+export class CollectionModelService extends ModelService {
+
+  addInstance(instance: any): void {
+    this.addInstances([instance]);
+  }
+
+  addInstances(instances: any[]): void {
+    this.store.dispatch({
+      type: `${storeActions['ARRAY.ADD']}_${this.modelName}`,
+      payload: instances
+    });
+  }
+
+  addInstancesToStart(instances: any[]): void {
+    this.store.dispatch({
+      type: `${storeActions['ARRAY.ADD.START']}_${this.modelName}`,
+      payload: instances
+    });
+  }
+
+  deleteObjectInstance(instance: any): void {
+    this.store.dispatch({
+      type: `${storeActions['ARRAY.DELETE.OBJECT']}_${this.modelName}`,
+      payload: instance
+    });
+  }
+
+  deletePrimitiveInstance(instance: any): void {
+    this.store.dispatch({
+      type: `${storeActions['ARRAY.DELETE.PRIMITIVE']}_${this.modelName}`,
+      payload: instance
+    });
+  }
+
+  clear(): void {
+    this.store.dispatch({
+      type: `${storeActions['ARRAY.CLEAR']}_${this.modelName}`
+    });
+  }
+
+  mapCollection(modifier: (item: any) => {}): void {
+    this.store.dispatch({
+      type: `${storeActions['ARRAY.MAP']}_${this.modelName}`,
+      payload: {
+        modifier: modifier
+      }
+    });
+  }
+
+}
+
+export class ObjectModelService extends ModelService {
+
+  getParameter(key: string): Observable<any> {
+    return this.store.select(this.modelName, key);
+  }
+
+  setParameter(key: string, value: any): void {
+    let payload = {};
+    payload[key] = value;
+    this.setParameters(payload);
+  }
+
+  setParameters(params: any): void {
+    this.store.dispatch({
+      type: `${storeActions['OBJECT.SET']}_${this.modelName}`,
+      payload: params
+    });
+  }
+
+}
+
+export function getCollectionReducer(modelName: string, defaultState: any = []): any {
+  return (state: any = defaultState, action: Action) => {
+    switch (action.type) {
+      case `${storeActions['ARRAY.ADD']}_${modelName}`:
+        return [...state, ...action.payload];
+      case `${storeActions['ARRAY.ADD.START']}_${modelName}`:
+        return [...action.payload, ...state];
+      case `${storeActions['ARRAY.DELETE.OBJECT']}_${modelName}`:
+        return state.filter(instance => instance.id !== action.payload.id);
+      case `${storeActions['ARRAY.DELETE.PRIMITIVE']}_${modelName}`:
+        return state.filter(item => item !== action.payload);
+      case `${storeActions['ARRAY.CLEAR']}_${modelName}`:
+        return [];
+      case `${storeActions['ARRAY.MAP']}_${modelName}`:
+        return state.map(action.payload.modifier);
+      default:
+        return state;
+    }
+  };
+}
+
+export function getObjectReducer(modelName: string, defaultState: any = {}) {
+  return (state: any = defaultState, action: Action): any => {
+    switch (action.type) {
+      case `${storeActions['OBJECT.SET']}_${modelName}`:
+        return Object.assign({}, state, action.payload);
+      default:
+        return state;
+    }
+  };
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/user-config.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/user-config.ts b/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/user-config.ts
new file mode 100644
index 0000000..f52761c
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/classes/models/user-config.ts
@@ -0,0 +1,26 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+export interface UserConfig {
+  id: string;
+  userName: string;
+  filtername: string;
+  values: string;
+  shareNameList: string[];
+  rowType: string;
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/audit-logs-query-params.class.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/audit-logs-query-params.class.ts b/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/audit-logs-query-params.class.ts
deleted file mode 100644
index 3727abb..0000000
--- a/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/audit-logs-query-params.class.ts
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import {QueryParams} from '@app/classes/queries/query-params.class';
-
-export const defaultParams = {
-  page: '0',
-  pageSize: '10'
-};
-
-export class AuditLogsQueryParams extends QueryParams {
-  constructor(options: AuditLogsQueryParams) {
-    let finalParams = Object.assign({}, defaultParams, options);
-    const page = parseInt(finalParams.page),
-      pageSize = parseInt(finalParams.pageSize);
-    finalParams.startIndex = isNaN(page) || isNaN(pageSize) ? '' : (page * pageSize).toString();
-    super(finalParams);
-  }
-  page: string;
-  pageSize: string;
-  startIndex: string;
-  sortBy?: string;
-  sortType?: 'asc' | 'desc';
-  clusters?: string;
-  mustBe?: string;
-  mustNot?: string;
-  includeQuery?: string;
-  excludeQuery?: string;
-  from?: string;
-  to?: string;
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/audit-logs-query-params.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/audit-logs-query-params.ts b/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/audit-logs-query-params.ts
new file mode 100644
index 0000000..509fa04
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/audit-logs-query-params.ts
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import {QueryParams} from '@app/classes/queries/query-params';
+
+export const defaultParams = {
+  page: '0',
+  pageSize: '10'
+};
+
+export class AuditLogsQueryParams extends QueryParams {
+  constructor(options: AuditLogsQueryParams) {
+    let finalParams = Object.assign({}, defaultParams, options);
+    const page = parseInt(finalParams.page),
+      pageSize = parseInt(finalParams.pageSize);
+    finalParams.startIndex = isNaN(page) || isNaN(pageSize) ? '' : (page * pageSize).toString();
+    super(finalParams);
+  }
+  page: string;
+  pageSize: string;
+  startIndex: string;
+  sortBy?: string;
+  sortType?: 'asc' | 'desc';
+  clusters?: string;
+  mustBe?: string;
+  mustNot?: string;
+  includeQuery?: string;
+  excludeQuery?: string;
+  from?: string;
+  to?: string;
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/query-params.class.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/query-params.class.ts b/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/query-params.class.ts
deleted file mode 100644
index 83c3261..0000000
--- a/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/query-params.class.ts
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-export class QueryParams {
-  constructor(options: QueryParams) {
-    Object.assign(this, options);
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/query-params.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/query-params.ts b/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/query-params.ts
new file mode 100644
index 0000000..83c3261
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/query-params.ts
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+export class QueryParams {
+  constructor(options: QueryParams) {
+    Object.assign(this, options);
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/service-logs-histogram-query-params.class.spec.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/service-logs-histogram-query-params.class.spec.ts b/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/service-logs-histogram-query-params.class.spec.ts
deleted file mode 100644
index efa2459..0000000
--- a/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/service-logs-histogram-query-params.class.spec.ts
+++ /dev/null
@@ -1,203 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import {ServiceLogsHistogramQueryParams} from './service-logs-histogram-query-params.class';
-
-describe('ServiceLogsHistogramQueryParams', () => {
-
-  describe('constructor', () => {
-    const cases = [
-      {
-        options: {
-          from: '2017-01-01T00:00:00Z',
-          to: '2017-01-01T00:00:00.100Z'
-        },
-        unit: '+100MILLISECOND',
-        title: 'less than 1s'
-      },
-      {
-        options: {
-          from: '2017-01-01T00:00:00Z',
-          to: '2017-01-01T00:00:01Z'
-        },
-        unit: '+100MILLISECOND',
-        title: '1s'
-      },
-      {
-        options: {
-          from: '2017-01-01T00:00:00Z',
-          to: '2017-01-01T00:00:20Z'
-        },
-        unit: '+500MILLISECOND',
-        title: 'between 1s and 30s'
-      },
-      {
-        options: {
-          from: '2017-01-01T00:00:00Z',
-          to: '2017-01-01T00:00:20Z'
-        },
-        unit: '+500MILLISECOND',
-        title: '30s'
-      },
-      {
-        options: {
-          from: '2017-01-01T00:00:00Z',
-          to: '2017-01-01T00:00:40Z'
-        },
-        unit: '+2SECOND',
-        title: 'between 30s and 1m'
-      },
-      {
-        options: {
-          from: '2017-01-01T00:00:00Z',
-          to: '2017-01-01T00:01:00Z'
-        },
-        unit: '+2SECOND',
-        title: '1m'
-      },
-      {
-        options: {
-          from: '2017-01-01T00:00:00Z',
-          to: '2017-01-01T00:20:00Z'
-        },
-        unit: '+1MINUTE',
-        title: 'between 1m and 30m'
-      },
-      {
-        options: {
-          from: '2017-01-01T00:00:00Z',
-          to: '2017-01-01T00:30:00Z'
-        },
-        unit: '+2MINUTE',
-        title: '30m'
-      },
-      {
-        options: {
-          from: '2017-01-01T00:00:00Z',
-          to: '2017-01-01T01:00:00Z'
-        },
-        unit: '+2MINUTE',
-        title: 'between 30m and 2h'
-      },
-      {
-        options: {
-          from: '2017-01-01T00:00:00Z',
-          to: '2017-01-01T02:00:00Z'
-        },
-        unit: '+5MINUTE',
-        title: '2h'
-      },
-      {
-        options: {
-          from: '2017-01-01T00:00:00Z',
-          to: '2017-01-01T04:00:00Z'
-        },
-        unit: '+5MINUTE',
-        title: 'between 2h and 6h'
-      },
-      {
-        options: {
-          from: '2017-01-01T00:00:00Z',
-          to: '2017-01-01T06:00:00Z'
-        },
-        unit: '+10MINUTE',
-        title: '6h'
-      },
-      {
-        options: {
-          from: '2017-01-01T00:00:00Z',
-          to: '2017-01-01T08:00:00Z'
-        },
-        unit: '+10MINUTE',
-        title: 'between 6h and 10h'
-      },
-      {
-        options: {
-          from: '2017-01-01T00:00:00Z',
-          to: '2017-01-01T10:00:00Z'
-        },
-        unit: '+10MINUTE',
-        title: '10h'
-      },
-      {
-        options: {
-          from: '2017-01-01T00:00:00Z',
-          to: '2017-01-01T22:00:00Z'
-        },
-        unit: '+1HOUR',
-        title: 'between 10h and 1d'
-      },
-      {
-        options: {
-          from: '2017-01-01T00:00:00Z',
-          to: '2017-01-02T00:00:00Z'
-        },
-        unit: '+1HOUR',
-        title: '1d'
-      },
-      {
-        options: {
-          from: '2017-01-01T00:00:00Z',
-          to: '2017-01-10T00:00:00Z'
-        },
-        unit: '+8HOUR',
-        title: 'between 1d and 15d'
-      },
-      {
-        options: {
-          from: '2017-01-01T00:00:00Z',
-          to: '2017-01-16T00:00:00Z'
-        },
-        unit: '+1DAY',
-        title: '15d'
-      },
-      {
-        options: {
-          from: '2017-01-01T00:00:00Z',
-          to: '2017-03-31T00:00:00Z'
-        },
-        unit: '+1DAY',
-        title: 'between 15d and 3M'
-      },
-      {
-        options: {
-          from: '2017-01-01T00:00:00Z',
-          to: '2017-04-01T00:00:00Z'
-        },
-        unit: '+1DAY',
-        title: '3M'
-      },
-      {
-        options: {
-          from: '2017-01-01T00:00:00Z',
-          to: '2017-05-01T00:00:00Z'
-        },
-        unit: '+1MONTH',
-        title: 'over 3M'
-      }
-    ];
-
-    cases.forEach(test => {
-      it(test.title, () => {
-        const paramsObject = new ServiceLogsHistogramQueryParams(test.options);
-        expect(paramsObject.unit).toEqual(test.unit);
-      });
-    });
-  });
-
-});

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/service-logs-histogram-query-params.class.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/service-logs-histogram-query-params.class.ts b/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/service-logs-histogram-query-params.class.ts
deleted file mode 100644
index 2e2113f..0000000
--- a/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/service-logs-histogram-query-params.class.ts
+++ /dev/null
@@ -1,70 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import {QueryParams} from '@app/classes/queries/query-params.class';
-
-export class ServiceLogsHistogramQueryParams extends QueryParams {
-  constructor(options: ServiceLogsHistogramQueryParams) {
-    let unit;
-    const diffTimeStamp = new Date(options.to).valueOf() - new Date(options.from).valueOf();
-    switch (true) {
-      case diffTimeStamp <= 1000:
-        unit = '+100MILLISECOND';
-        break;
-      case diffTimeStamp <= 30000:
-        unit = '+500MILLISECOND';
-        break;
-      case diffTimeStamp <= 60000:
-        unit = '+2SECOND';
-        break;
-      case diffTimeStamp < 1800000:
-        unit = '+1MINUTE';
-        break;
-      case diffTimeStamp < 7200000:
-        unit = '+2MINUTE';
-        break;
-      case diffTimeStamp < 21600000:
-        unit = '+5MINUTE';
-        break;
-      case diffTimeStamp <= 36000000:
-        unit = '+10MINUTE';
-        break;
-      case diffTimeStamp <= 86400000:
-        unit = '+1HOUR';
-        break;
-      case diffTimeStamp < 1296000000:
-        unit = '+8HOUR';
-        break;
-      case diffTimeStamp <= 7776000000:
-        unit = '+1DAY';
-        break;
-      default:
-        unit = '+1MONTH';
-        break;
-    }
-    options.unit = unit;
-    super(options);
-  }
-  from: string;
-  to: string;
-  unit?: string;
-  clusters?: string;
-  level?: string;
-  includeQuery?: string;
-  excludeQuery?: string;
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/service-logs-histogram-query-params.spec.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/service-logs-histogram-query-params.spec.ts b/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/service-logs-histogram-query-params.spec.ts
new file mode 100644
index 0000000..eb891ed
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/service-logs-histogram-query-params.spec.ts
@@ -0,0 +1,203 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import {ServiceLogsHistogramQueryParams} from './service-logs-histogram-query-params';
+
+describe('ServiceLogsHistogramQueryParams', () => {
+
+  describe('constructor', () => {
+    const cases = [
+      {
+        options: {
+          from: '2017-01-01T00:00:00Z',
+          to: '2017-01-01T00:00:00.100Z'
+        },
+        unit: '+100MILLISECOND',
+        title: 'less than 1s'
+      },
+      {
+        options: {
+          from: '2017-01-01T00:00:00Z',
+          to: '2017-01-01T00:00:01Z'
+        },
+        unit: '+100MILLISECOND',
+        title: '1s'
+      },
+      {
+        options: {
+          from: '2017-01-01T00:00:00Z',
+          to: '2017-01-01T00:00:20Z'
+        },
+        unit: '+500MILLISECOND',
+        title: 'between 1s and 30s'
+      },
+      {
+        options: {
+          from: '2017-01-01T00:00:00Z',
+          to: '2017-01-01T00:00:20Z'
+        },
+        unit: '+500MILLISECOND',
+        title: '30s'
+      },
+      {
+        options: {
+          from: '2017-01-01T00:00:00Z',
+          to: '2017-01-01T00:00:40Z'
+        },
+        unit: '+2SECOND',
+        title: 'between 30s and 1m'
+      },
+      {
+        options: {
+          from: '2017-01-01T00:00:00Z',
+          to: '2017-01-01T00:01:00Z'
+        },
+        unit: '+2SECOND',
+        title: '1m'
+      },
+      {
+        options: {
+          from: '2017-01-01T00:00:00Z',
+          to: '2017-01-01T00:20:00Z'
+        },
+        unit: '+1MINUTE',
+        title: 'between 1m and 30m'
+      },
+      {
+        options: {
+          from: '2017-01-01T00:00:00Z',
+          to: '2017-01-01T00:30:00Z'
+        },
+        unit: '+2MINUTE',
+        title: '30m'
+      },
+      {
+        options: {
+          from: '2017-01-01T00:00:00Z',
+          to: '2017-01-01T01:00:00Z'
+        },
+        unit: '+2MINUTE',
+        title: 'between 30m and 2h'
+      },
+      {
+        options: {
+          from: '2017-01-01T00:00:00Z',
+          to: '2017-01-01T02:00:00Z'
+        },
+        unit: '+5MINUTE',
+        title: '2h'
+      },
+      {
+        options: {
+          from: '2017-01-01T00:00:00Z',
+          to: '2017-01-01T04:00:00Z'
+        },
+        unit: '+5MINUTE',
+        title: 'between 2h and 6h'
+      },
+      {
+        options: {
+          from: '2017-01-01T00:00:00Z',
+          to: '2017-01-01T06:00:00Z'
+        },
+        unit: '+10MINUTE',
+        title: '6h'
+      },
+      {
+        options: {
+          from: '2017-01-01T00:00:00Z',
+          to: '2017-01-01T08:00:00Z'
+        },
+        unit: '+10MINUTE',
+        title: 'between 6h and 10h'
+      },
+      {
+        options: {
+          from: '2017-01-01T00:00:00Z',
+          to: '2017-01-01T10:00:00Z'
+        },
+        unit: '+10MINUTE',
+        title: '10h'
+      },
+      {
+        options: {
+          from: '2017-01-01T00:00:00Z',
+          to: '2017-01-01T22:00:00Z'
+        },
+        unit: '+1HOUR',
+        title: 'between 10h and 1d'
+      },
+      {
+        options: {
+          from: '2017-01-01T00:00:00Z',
+          to: '2017-01-02T00:00:00Z'
+        },
+        unit: '+1HOUR',
+        title: '1d'
+      },
+      {
+        options: {
+          from: '2017-01-01T00:00:00Z',
+          to: '2017-01-10T00:00:00Z'
+        },
+        unit: '+8HOUR',
+        title: 'between 1d and 15d'
+      },
+      {
+        options: {
+          from: '2017-01-01T00:00:00Z',
+          to: '2017-01-16T00:00:00Z'
+        },
+        unit: '+1DAY',
+        title: '15d'
+      },
+      {
+        options: {
+          from: '2017-01-01T00:00:00Z',
+          to: '2017-03-31T00:00:00Z'
+        },
+        unit: '+1DAY',
+        title: 'between 15d and 3M'
+      },
+      {
+        options: {
+          from: '2017-01-01T00:00:00Z',
+          to: '2017-04-01T00:00:00Z'
+        },
+        unit: '+1DAY',
+        title: '3M'
+      },
+      {
+        options: {
+          from: '2017-01-01T00:00:00Z',
+          to: '2017-05-01T00:00:00Z'
+        },
+        unit: '+1MONTH',
+        title: 'over 3M'
+      }
+    ];
+
+    cases.forEach(test => {
+      it(test.title, () => {
+        const paramsObject = new ServiceLogsHistogramQueryParams(test.options);
+        expect(paramsObject.unit).toEqual(test.unit);
+      });
+    });
+  });
+
+});

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/service-logs-histogram-query-params.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/service-logs-histogram-query-params.ts b/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/service-logs-histogram-query-params.ts
new file mode 100644
index 0000000..148aa62
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/service-logs-histogram-query-params.ts
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import {QueryParams} from '@app/classes/queries/query-params';
+
+export class ServiceLogsHistogramQueryParams extends QueryParams {
+  constructor(options: ServiceLogsHistogramQueryParams) {
+    let unit;
+    const diffTimeStamp = new Date(options.to).valueOf() - new Date(options.from).valueOf();
+    switch (true) {
+      case diffTimeStamp <= 1000:
+        unit = '+100MILLISECOND';
+        break;
+      case diffTimeStamp <= 30000:
+        unit = '+500MILLISECOND';
+        break;
+      case diffTimeStamp <= 60000:
+        unit = '+2SECOND';
+        break;
+      case diffTimeStamp < 1800000:
+        unit = '+1MINUTE';
+        break;
+      case diffTimeStamp < 7200000:
+        unit = '+2MINUTE';
+        break;
+      case diffTimeStamp < 21600000:
+        unit = '+5MINUTE';
+        break;
+      case diffTimeStamp <= 36000000:
+        unit = '+10MINUTE';
+        break;
+      case diffTimeStamp <= 86400000:
+        unit = '+1HOUR';
+        break;
+      case diffTimeStamp < 1296000000:
+        unit = '+8HOUR';
+        break;
+      case diffTimeStamp <= 7776000000:
+        unit = '+1DAY';
+        break;
+      default:
+        unit = '+1MONTH';
+        break;
+    }
+    options.unit = unit;
+    super(options);
+  }
+  from: string;
+  to: string;
+  unit?: string;
+  clusters?: string;
+  level?: string;
+  includeQuery?: string;
+  excludeQuery?: string;
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/service-logs-query-params.class.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/service-logs-query-params.class.ts b/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/service-logs-query-params.class.ts
deleted file mode 100644
index 864b689..0000000
--- a/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/service-logs-query-params.class.ts
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import {AuditLogsQueryParams} from '@app/classes/queries/audit-logs-query-params.class';
-
-export class ServiceLogsQueryParams extends AuditLogsQueryParams {
-  level?: string;
-  file_name?: string;
-  bundle_id?: string;
-  hostList?: string;
-  find?: string;
-  sourceLogId?: string;
-  keywordType?: string;
-  token?: string;
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/service-logs-query-params.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/service-logs-query-params.ts b/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/service-logs-query-params.ts
new file mode 100644
index 0000000..0700a98
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/service-logs-query-params.ts
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import {AuditLogsQueryParams} from '@app/classes/queries/audit-logs-query-params';
+
+export class ServiceLogsQueryParams extends AuditLogsQueryParams {
+  level?: string;
+  file_name?: string;
+  bundle_id?: string;
+  hostList?: string;
+  find?: string;
+  sourceLogId?: string;
+  keywordType?: string;
+  token?: string;
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/service-logs-truncated-query-params.class.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/service-logs-truncated-query-params.class.ts b/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/service-logs-truncated-query-params.class.ts
deleted file mode 100644
index da05cee..0000000
--- a/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/service-logs-truncated-query-params.class.ts
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import {QueryParams} from '@app/classes/queries/query-params.class';
-
-export const defaultParams = {
-  numberRows: '10',
-  scrollType: ''
-};
-
-export class ServiceLogsTruncatedQueryParams extends QueryParams {
-  constructor(options: ServiceLogsTruncatedQueryParams) {
-    const finalParams = Object.assign({}, defaultParams, options);
-    super(finalParams);
-  }
-  id: string;
-  host_name: string;
-  component_name: string;
-  numberRows: string;
-  scrollType: 'before' | 'after' | '';
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/service-logs-truncated-query-params.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/service-logs-truncated-query-params.ts b/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/service-logs-truncated-query-params.ts
new file mode 100644
index 0000000..6f9de16
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/classes/queries/service-logs-truncated-query-params.ts
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import {QueryParams} from '@app/classes/queries/query-params';
+
+export const defaultParams = {
+  numberRows: '10',
+  scrollType: ''
+};
+
+export class ServiceLogsTruncatedQueryParams extends QueryParams {
+  constructor(options: ServiceLogsTruncatedQueryParams) {
+    const finalParams = Object.assign({}, defaultParams, options);
+    super(finalParams);
+  }
+  id: string;
+  host_name: string;
+  component_name: string;
+  numberRows: string;
+  scrollType: 'before' | 'after' | '';
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/classes/service-log-context-entry.class.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/classes/service-log-context-entry.class.ts b/ambari-logsearch/ambari-logsearch-web/src/app/classes/service-log-context-entry.class.ts
deleted file mode 100644
index 15c05fb..0000000
--- a/ambari-logsearch/ambari-logsearch-web/src/app/classes/service-log-context-entry.class.ts
+++ /dev/null
@@ -1,26 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-export interface ServiceLogContextEntry {
-  id: string;
-  time: number;
-  level: string;
-  message: string;
-  fileName: string | null;
-  lineNumber: number | null;
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/classes/service-log-context-entry.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/classes/service-log-context-entry.ts b/ambari-logsearch/ambari-logsearch-web/src/app/classes/service-log-context-entry.ts
new file mode 100644
index 0000000..15c05fb
--- /dev/null
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/classes/service-log-context-entry.ts
@@ -0,0 +1,26 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+export interface ServiceLogContextEntry {
+  id: string;
+  time: number;
+  level: string;
+  message: string;
+  fileName: string | null;
+  lineNumber: number | null;
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/components/dropdown-button/dropdown-button.component.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/components/dropdown-button/dropdown-button.component.ts b/ambari-logsearch/ambari-logsearch-web/src/app/components/dropdown-button/dropdown-button.component.ts
index 43d79f8..0bf4422 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/components/dropdown-button/dropdown-button.component.ts
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/components/dropdown-button/dropdown-button.component.ts
@@ -17,7 +17,7 @@
  */
 
 import {Component, OnInit, Input} from '@angular/core';
-import {ListItem} from '@app/classes/list-item.class';
+import {ListItem} from '@app/classes/list-item';
 import {ComponentActionsService} from '@app/services/component-actions.service';
 import {UtilsService} from '@app/services/utils.service';
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/components/dropdown-list/dropdown-list.component.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/components/dropdown-list/dropdown-list.component.ts b/ambari-logsearch/ambari-logsearch-web/src/app/components/dropdown-list/dropdown-list.component.ts
index 656c901..ef185d0 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/components/dropdown-list/dropdown-list.component.ts
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/components/dropdown-list/dropdown-list.component.ts
@@ -17,7 +17,7 @@
  */
 
 import {Component, AfterViewInit, Input, Output, EventEmitter, ViewChildren, ViewContainerRef, QueryList} from '@angular/core';
-import {ListItem} from '@app/classes/list-item.class';
+import {ListItem} from '@app/classes/list-item';
 import {ComponentGeneratorService} from '@app/services/component-generator.service';
 import {ComponentActionsService} from '@app/services/component-actions.service';
 


[33/50] [abbrv] ambari git commit: AMBARI-22124. Refactor AMS logic in stack advisors to service advisors.(vbrodetskyi)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/0f32765d/ambari-server/src/test/python/common-services/AMBARI_METRICS/test_service_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/common-services/AMBARI_METRICS/test_service_advisor.py b/ambari-server/src/test/python/common-services/AMBARI_METRICS/test_service_advisor.py
new file mode 100644
index 0000000..05254fe
--- /dev/null
+++ b/ambari-server/src/test/python/common-services/AMBARI_METRICS/test_service_advisor.py
@@ -0,0 +1,596 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import imp
+import json
+import os
+from unittest import TestCase
+
+from mock.mock import patch, MagicMock
+
+
+class TestAMBARI_METRICS010ServiceAdvisor(TestCase):
+
+  testDirectory = os.path.dirname(os.path.abspath(__file__))
+  stack_advisor_path = os.path.join(testDirectory, '../../../../main/resources/stacks/stack_advisor.py')
+  with open(stack_advisor_path, 'rb') as fp:
+    imp.load_module('stack_advisor', fp, stack_advisor_path, ('.py', 'rb', imp.PY_SOURCE))
+
+  serviceAdvisorPath = '../../../../main/resources/common-services/AMBARI_METRICS/0.1.0/service_advisor.py'
+  ambariMetrics010ServiceAdvisorPath = os.path.join(testDirectory, serviceAdvisorPath)
+  with open(ambariMetrics010ServiceAdvisorPath, 'rb') as fp:
+    service_advisor_impl = imp.load_module('service_advisor_impl', fp, ambariMetrics010ServiceAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
+
+  def setUp(self):
+    serviceAdvisorClass = getattr(self.service_advisor_impl, 'AMBARI_METRICSServiceAdvisor')
+    self.serviceAdvisor = serviceAdvisorClass()
+
+
+  def test_recommendAmsConfigurations(self):
+    configurations = {
+      "hadoop-env": {
+        "properties": {
+          "hdfs_user": "hdfs",
+          "proxyuser_group": "users"
+        }
+      }
+    }
+
+    hosts = {
+      "items": [
+        {
+          "href": "/api/v1/hosts/host1",
+          "Hosts": {
+            "cpu_count": 1,
+            "host_name": "c6401.ambari.apache.org",
+            "os_arch": "x86_64",
+            "os_type": "centos6",
+            "ph_cpu_count": 1,
+            "public_host_name": "public.c6401.ambari.apache.org",
+            "rack_info": "/default-rack",
+            "total_mem": 2097152,
+            "disk_info": [{
+              "size": '80000000',
+              "mountpoint": "/"
+            }]
+          }
+        },
+        {
+          "href": "/api/v1/hosts/host2",
+          "Hosts": {
+            "cpu_count": 1,
+            "host_name": "c6402.ambari.apache.org",
+            "os_arch": "x86_64",
+            "os_type": "centos6",
+            "ph_cpu_count": 1,
+            "public_host_name": "public.c6402.ambari.apache.org",
+            "rack_info": "/default-rack",
+            "total_mem": 1048576,
+            "disk_info": [{
+              "size": '800000000',
+              "mountpoint": "/"
+            }]
+          }
+        }
+      ]}
+
+
+    services1 = {
+      "services": [
+        {
+          "StackServices": {
+            "service_name": "HDFS"
+          }, "components": [
+          {
+            "StackServiceComponents": {
+              "component_name": "NAMENODE",
+              "hostnames": ["c6401.ambari.apache.org"]
+            }
+          }
+        ]
+        },
+        {
+          "StackServices": {
+            "service_name": "AMBARI_METRICS"
+          },
+          "components": [
+            {
+              "StackServiceComponents": {
+                "component_name": "METRICS_COLLECTOR",
+                "hostnames": ["c6401.ambari.apache.org", "c6402.ambari.apache.org"]
+              }
+            }, {
+              "StackServiceComponents": {
+                "component_name": "METRICS_MONITOR",
+                "hostnames": ["c6401.ambari.apache.org", "c6402.ambari.apache.org"]
+              }
+            }
+          ]
+        }],
+      "configurations": configurations,
+      "ambari-server-properties": {"ambari-server.user":"ambari_user"}
+    }
+
+    clusterData = {
+      "totalAvailableRam": 2048
+    }
+
+    expected = {'ams-env': {'properties': {'metrics_collector_heapsize': '512'}},
+                  'ams-grafana-env': {'properties': {},
+                                                             'property_attributes': {'metrics_grafana_password': {'visible': 'false'}}},
+                  'ams-hbase-env': {'properties': {'hbase_log_dir': '/var/log/ambari-metrics-collector',
+                                                                                       'hbase_master_heapsize': '512',
+                                                                                       'hbase_master_xmn_size': '102',
+                                                                                       'hbase_regionserver_heapsize': '1024',
+                                                                                       'regionserver_xmn_size': '128'}},
+                  'ams-hbase-site': {'properties': {'hbase.cluster.distributed': 'true',
+                                                                                         'hbase.hregion.memstore.flush.size': '134217728',
+                                                                                         'hbase.regionserver.global.memstore.lowerLimit': '0.3',
+                                                                                         'hbase.regionserver.global.memstore.upperLimit': '0.35',
+                                                                                         'hbase.rootdir': '/user/ams/hbase',
+                                                                                         'hbase.tmp.dir': '/var/lib/ambari-metrics-collector/hbase-tmp',
+                                                                                         'hbase.zookeeper.property.clientPort': '2181',
+                                                                                         'hfile.block.cache.size': '0.3'}},
+                  'ams-site': {'properties': {'timeline.metrics.cache.commit.interval': '10',
+                                                                             'timeline.metrics.cache.size': '100',
+                                                                             'timeline.metrics.cluster.aggregate.splitpoints': 'master.Balancer.BalancerCluster_95th_percentile',
+                                                                             'timeline.metrics.host.aggregate.splitpoints': 'master.Balancer.BalancerCluster_95th_percentile',
+                                                                             'timeline.metrics.service.handler.thread.count': '20',
+                                                                             'timeline.metrics.service.operation.mode': 'distributed',
+                                                                             'timeline.metrics.service.watcher.disabled': 'true',
+                                                                             'timeline.metrics.service.webapp.address': '0.0.0.0:6188'}},
+                  'hadoop-env': {'properties': {'hdfs_user': 'hdfs',
+                                                                                 'proxyuser_group': 'users'}}}
+
+    self.serviceAdvisor.getServiceConfigurationRecommendations(configurations, clusterData, services1, hosts)
+    self.assertEquals(configurations, expected)
+
+    services1 = {
+      "services": [
+        {
+          "StackServices": {
+            "service_name": "HDFS"
+          }, "components": [
+          {
+            "StackServiceComponents": {
+              "component_name": "NAMENODE",
+              "hostnames": ["c6401.ambari.apache.org"]
+            }
+          }
+        ]
+        },
+        {
+          "StackServices": {
+            "service_name": "AMBARI_METRICS"
+          },
+          "components": [
+            {
+              "StackServiceComponents": {
+                "component_name": "METRICS_COLLECTOR",
+                "hostnames": ["c6401.ambari.apache.org"]
+              }
+            }, {
+              "StackServiceComponents": {
+                "component_name": "METRICS_MONITOR",
+                "hostnames": ["c6401.ambari.apache.org", "c6402.ambari.apache.org"]
+              }
+            }
+          ]
+        }],
+      "configurations": configurations,
+      "ambari-server-properties": {"ambari-server.user":"ambari_user"}
+    }
+    expected = {'ams-env': {'properties': {'metrics_collector_heapsize': '512'}},
+                  'ams-grafana-env': {'properties': {},
+                                                             'property_attributes': {'metrics_grafana_password': {'visible': 'false'}}},
+                  'ams-hbase-env': {'properties': {'hbase_log_dir': '/var/log/ambari-metrics-collector',
+                                                                                       'hbase_master_heapsize': '512',
+                                                                                       'hbase_master_xmn_size': '102',
+                                                                                       'hbase_regionserver_heapsize': '1024',
+                                                                                       'regionserver_xmn_size': '128'}},
+                  'ams-hbase-site': {'properties': {'hbase.cluster.distributed': 'true',
+                                                                                         'hbase.hregion.memstore.flush.size': '134217728',
+                                                                                         'hbase.regionserver.global.memstore.lowerLimit': '0.3',
+                                                                                         'hbase.regionserver.global.memstore.upperLimit': '0.35',
+                                                                                         'hbase.rootdir': '/user/ams/hbase',
+                                                                                         'hbase.tmp.dir': '/var/lib/ambari-metrics-collector/hbase-tmp',
+                                                                                         'hbase.zookeeper.property.clientPort': '2181',
+                                                                                         'hfile.block.cache.size': '0.3',
+                                                                                         'phoenix.coprocessor.maxMetaDataCacheSize': '20480000'}},
+                  'ams-site': {'properties': {'timeline.metrics.cache.commit.interval': '10',
+                                                                             'timeline.metrics.cache.size': '100',
+                                                                             'timeline.metrics.cluster.aggregate.splitpoints': 'master.Balancer.BalancerCluster_95th_percentile',
+                                                                             'timeline.metrics.host.aggregate.splitpoints': 'master.Balancer.BalancerCluster_95th_percentile',
+                                                                             'timeline.metrics.service.handler.thread.count': '20',
+                                                                             'timeline.metrics.service.operation.mode': 'distributed',
+                                                                             'timeline.metrics.service.watcher.disabled': 'true',
+                                                                             'timeline.metrics.service.webapp.address': '0.0.0.0:6188'}},
+                  'hadoop-env': {'properties': {'hdfs_user': 'hdfs',
+                                                                                 'proxyuser_group': 'users'}}}
+    self.serviceAdvisor.getServiceConfigurationRecommendations(configurations, clusterData, services1, hosts)
+    self.assertEquals(configurations, expected)
+
+
+  def test_validateAmsSiteConfigurations(self):
+    configurations = {
+      "hdfs-site": {
+        "properties": {
+          'dfs.datanode.data.dir': "/hadoop/data"
+        }
+      },
+      "core-site": {
+        "properties": {
+          "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020"
+        }
+      },
+      "ams-site": {
+        "properties": {
+          "timeline.metrics.service.operation.mode": "embedded"
+        }
+      }
+    }
+    recommendedDefaults = {
+      'hbase.rootdir': 'file:///var/lib/ambari-metrics-collector/hbase',
+      'hbase.tmp.dir': '/var/lib/ambari-metrics-collector/hbase',
+      'hbase.cluster.distributed': 'false'
+    }
+    properties = {
+      'hbase.rootdir': 'file:///var/lib/ambari-metrics-collector/hbase',
+      'hbase.tmp.dir' : '/var/lib/ambari-metrics-collector/hbase',
+      'hbase.cluster.distributed': 'false',
+      'timeline.metrics.service.operation.mode' : 'embedded'
+    }
+    host1 = {
+      "href" : "/api/v1/hosts/host1",
+      "Hosts" : {
+        "cpu_count" : 1,
+        "host_name" : "host1",
+        "os_arch" : "x86_64",
+        "os_type" : "centos6",
+        "ph_cpu_count" : 1,
+        "public_host_name" : "host1",
+        "rack_info" : "/default-rack",
+        "total_mem" : 2097152,
+        "disk_info": [
+          {
+            "available": str(15<<30), # 15 GB
+            "type": "ext4",
+            "mountpoint": "/"
+          }
+        ]
+      }
+    }
+    host2 = {
+      "href" : "/api/v1/hosts/host2",
+      "Hosts" : {
+        "cpu_count" : 1,
+        "host_name" : "host2",
+        "os_arch" : "x86_64",
+        "os_type" : "centos6",
+        "ph_cpu_count" : 1,
+        "public_host_name" : "host2",
+        "rack_info" : "/default-rack",
+        "total_mem" : 2097152,
+        "disk_info": [
+          {
+            "available": str(15<<30), # 15 GB
+            "type": "ext4",
+            "mountpoint": "/"
+          }
+        ]
+      }
+    }
+
+    hosts = {
+      "items" : [
+        host1, host2
+      ]
+    }
+
+    services = {
+      "services":  [
+        {
+          "StackServices": {
+            "service_name": "AMBARI_METRICS"
+          },
+          "components": [
+            {
+              "StackServiceComponents": {
+                "component_name": "METRICS_COLLECTOR",
+                "hostnames": ["host1", "host2"]
+              }
+            }, {
+              "StackServiceComponents": {
+                "component_name": "METRICS_MONITOR",
+                "hostnames": ["host1", "host2"]
+              }
+            }
+          ]
+        },
+        {
+          "StackServices": {
+            "service_name": "HDFS"
+          },
+          "components": [
+            {
+              "StackServiceComponents": {
+                "component_name": "DATANODE",
+                "hostnames": ["host1"]
+              }
+            }
+          ]
+        }
+      ],
+      "configurations": configurations
+    }
+    # only 1 partition, enough disk space, no warnings
+    res = self.serviceAdvisor.getAMBARI_METRICSValidator().validateAmsSiteConfigurationsFromHDP206(properties, recommendedDefaults, configurations, services, hosts)
+    expected = [{'config-name': 'timeline.metrics.service.operation.mode',
+                    'config-type': 'ams-site',
+                    'level': 'ERROR',
+                    'message': "Correct value should be 'distributed' for clusters with more then 1 Metrics collector",
+                    'type': 'configuration'}]
+    self.assertEquals(res, expected)
+
+
+    services = {
+      "services":  [
+        {
+          "StackServices": {
+            "service_name": "AMBARI_METRICS"
+          },
+          "components": [
+            {
+              "StackServiceComponents": {
+                "component_name": "METRICS_COLLECTOR",
+                "hostnames": ["host1"]
+              }
+            }, {
+              "StackServiceComponents": {
+                "component_name": "METRICS_MONITOR",
+                "hostnames": ["host1"]
+              }
+            }
+          ]
+        },
+        {
+          "StackServices": {
+            "service_name": "HDFS"
+          },
+          "components": [
+            {
+              "StackServiceComponents": {
+                "component_name": "DATANODE",
+                "hostnames": ["host1"]
+              }
+            }
+          ]
+        }
+      ],
+      "configurations": configurations
+    }
+    res = self.serviceAdvisor.getAMBARI_METRICSValidator().validateAmsSiteConfigurationsFromHDP206(properties, recommendedDefaults, configurations, services, hosts)
+    expected = []
+    self.assertEquals(res, expected)
+
+  def test_validateAmsHbaseSiteConfigurations(self):
+    configurations = {
+      "hdfs-site": {
+        "properties": {
+          'dfs.datanode.data.dir': "/hadoop/data"
+        }
+      },
+      "core-site": {
+        "properties": {
+          "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020"
+        }
+      },
+      "ams-site": {
+        "properties": {
+          "timeline.metrics.service.operation.mode": "embedded"
+        }
+      }
+    }
+
+    recommendedDefaults = {
+      'hbase.rootdir': 'file:///var/lib/ambari-metrics-collector/hbase',
+      'hbase.tmp.dir': '/var/lib/ambari-metrics-collector/hbase',
+      'hbase.cluster.distributed': 'false'
+    }
+    properties = {
+      'hbase.rootdir': 'file:///var/lib/ambari-metrics-collector/hbase',
+      'hbase.tmp.dir' : '/var/lib/ambari-metrics-collector/hbase',
+      'hbase.cluster.distributed': 'false'
+    }
+    host = {
+      "href" : "/api/v1/hosts/host1",
+      "Hosts" : {
+        "cpu_count" : 1,
+        "host_name" : "host1",
+        "os_arch" : "x86_64",
+        "os_type" : "centos6",
+        "ph_cpu_count" : 1,
+        "public_host_name" : "host1",
+        "rack_info" : "/default-rack",
+        "total_mem" : 2097152,
+        "disk_info": [
+          {
+            "available": str(15<<30), # 15 GB
+            "type": "ext4",
+            "mountpoint": "/"
+          }
+        ]
+      }
+    }
+
+    hosts = {
+      "items" : [
+        host
+      ]
+    }
+
+    services = {
+      "services":  [
+        {
+          "StackServices": {
+            "service_name": "AMBARI_METRICS"
+          },
+          "components": [
+            {
+              "StackServiceComponents": {
+                "component_name": "METRICS_COLLECTOR",
+                "hostnames": ["host1"]
+              }
+            }, {
+              "StackServiceComponents": {
+                "component_name": "METRICS_MONITOR",
+                "hostnames": ["host1"]
+              }
+            }
+          ]
+        },
+        {
+          "StackServices": {
+            "service_name": "HDFS"
+          },
+          "components": [
+            {
+              "StackServiceComponents": {
+                "component_name": "DATANODE",
+                "hostnames": ["host1"]
+              }
+            }
+          ]
+        }
+      ],
+      "configurations": configurations
+    }
+
+    # only 1 partition, enough disk space, no warnings
+    res = self.serviceAdvisor.getAMBARI_METRICSValidator().validateAmsHbaseSiteConfigurationsFromHDP206(properties, recommendedDefaults, configurations, services, hosts)
+    expected = []
+    self.assertEquals(res, expected)
+
+
+    # 1 partition, no enough disk space
+    host['Hosts']['disk_info'] = [
+      {
+        "available" : '1',
+        "type" : "ext4",
+        "mountpoint" : "/"
+      }
+    ]
+    res = self.serviceAdvisor.getAMBARI_METRICSValidator().validateAmsHbaseSiteConfigurationsFromHDP206(properties, recommendedDefaults, configurations, services, hosts)
+    expected = [
+      {'config-name': 'hbase.rootdir',
+       'config-type': 'ams-hbase-site',
+       'level': 'WARN',
+       'message': 'Ambari Metrics disk space requirements not met. '
+                  '\nRecommended disk space for partition / is 10G',
+       'type': 'configuration'
+      }
+    ]
+    self.assertEquals(res, expected)
+
+    # 2 partitions
+    host['Hosts']['disk_info'] = [
+      {
+        "available": str(15<<30), # 15 GB
+        "type" : "ext4",
+        "mountpoint" : "/grid/0"
+      },
+      {
+        "available" : str(15<<30), # 15 GB
+        "type" : "ext4",
+        "mountpoint" : "/"
+      }
+    ]
+    recommendedDefaults = {
+      'hbase.rootdir': 'file:///grid/0/var/lib/ambari-metrics-collector/hbase',
+      'hbase.tmp.dir': '/var/lib/ambari-metrics-collector/hbase',
+      'hbase.cluster.distributed': 'false'
+    }
+    properties = {
+      'hbase.rootdir': 'file:///grid/0/var/lib/ambari-metrics-collector/hbase',
+      'hbase.tmp.dir' : '/var/lib/ambari-metrics-collector/hbase',
+      'hbase.cluster.distributed': 'false'
+    }
+    res = self.serviceAdvisor.getAMBARI_METRICSValidator().validateAmsHbaseSiteConfigurationsFromHDP206(properties, recommendedDefaults, configurations, services, hosts)
+    expected = []
+    self.assertEquals(res, expected)
+
+    # dfs.dir & hbase.rootdir crosscheck + root partition + hbase.rootdir == hbase.tmp.dir warnings
+    properties = {
+      'hbase.rootdir': 'file:///var/lib/ambari-metrics-collector/hbase',
+      'hbase.tmp.dir' : '/var/lib/ambari-metrics-collector/hbase',
+      'hbase.cluster.distributed': 'false'
+    }
+
+    res = self.serviceAdvisor.getAMBARI_METRICSValidator().validateAmsHbaseSiteConfigurationsFromHDP206(properties, recommendedDefaults, configurations, services, hosts)
+    expected = [
+      {
+        'config-name': 'hbase.rootdir',
+        'config-type': 'ams-hbase-site',
+        'level': 'WARN',
+        'message': 'It is not recommended to use root partition for hbase.rootdir',
+        'type': 'configuration'
+      },
+      {
+        'config-name': 'hbase.tmp.dir',
+        'config-type': 'ams-hbase-site',
+        'level': 'WARN',
+        'message': 'Consider not using / partition for storing metrics temporary data. '
+                   '/ partition is already used as hbase.rootdir to store metrics data',
+        'type': 'configuration'
+      },
+      {
+        'config-name': 'hbase.rootdir',
+        'config-type': 'ams-hbase-site',
+        'level': 'WARN',
+        'message': 'Consider not using / partition for storing metrics data. '
+                   '/ is already used by datanode to store HDFS data',
+        'type': 'configuration'
+      }
+    ]
+    self.assertEquals(res, expected)
+
+    # incorrect hbase.rootdir in distributed mode
+    properties = {
+      'hbase.rootdir': 'file:///grid/0/var/lib/ambari-metrics-collector/hbase',
+      'hbase.tmp.dir' : '/var/lib/ambari-metrics-collector/hbase',
+      'hbase.cluster.distributed': 'false'
+    }
+    configurations['ams-site']['properties']['timeline.metrics.service.operation.mode'] = 'distributed'
+    res = self.serviceAdvisor.getAMBARI_METRICSValidator().validateAmsHbaseSiteConfigurationsFromHDP206(properties, recommendedDefaults, configurations, services, hosts)
+    expected = [
+      {
+        'config-name': 'hbase.rootdir',
+        'config-type': 'ams-hbase-site',
+        'level': 'WARN',
+        'message': 'In distributed mode hbase.rootdir should point to HDFS.',
+        'type': 'configuration'
+      },
+      {
+        'config-name': 'hbase.cluster.distributed',
+        'config-type': 'ams-hbase-site',
+        'level': 'ERROR',
+        'message': 'hbase.cluster.distributed property should be set to true for distributed mode',
+        'type': 'configuration'
+      }
+    ]
+    self.assertEquals(res, expected)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/0f32765d/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
index 65b23b0..38d6ecd 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
@@ -2315,214 +2315,6 @@ class TestHDP206StackAdvisor(TestCase):
 
 
 
-  def test_recommendAmsConfigurations(self):
-    configurations = {
-      "hadoop-env": {
-        "properties": {
-          "hdfs_user": "hdfs",
-          "proxyuser_group": "users"
-        }
-      }
-    }
-
-    hosts = {
-      "items": [
-        {
-          "href": "/api/v1/hosts/host1",
-          "Hosts": {
-            "cpu_count": 1,
-            "host_name": "c6401.ambari.apache.org",
-            "os_arch": "x86_64",
-            "os_type": "centos6",
-            "ph_cpu_count": 1,
-            "public_host_name": "public.c6401.ambari.apache.org",
-            "rack_info": "/default-rack",
-            "total_mem": 2097152,
-            "disk_info": [{
-              "size": '80000000',
-              "mountpoint": "/"
-            }]
-          }
-        },
-        {
-          "href": "/api/v1/hosts/host2",
-          "Hosts": {
-            "cpu_count": 1,
-            "host_name": "c6402.ambari.apache.org",
-            "os_arch": "x86_64",
-            "os_type": "centos6",
-            "ph_cpu_count": 1,
-            "public_host_name": "public.c6402.ambari.apache.org",
-            "rack_info": "/default-rack",
-            "total_mem": 1048576,
-            "disk_info": [{
-              "size": '800000000',
-              "mountpoint": "/"
-            }]
-          }
-        }
-      ]}
-
-
-    services1 = {
-      "services": [
-        {
-          "StackServices": {
-            "service_name": "HDFS"
-          }, "components": [
-          {
-            "StackServiceComponents": {
-              "component_name": "NAMENODE",
-              "hostnames": ["c6401.ambari.apache.org"]
-            }
-          }
-        ]
-        },
-        {
-          "StackServices": {
-            "service_name": "AMBARI_METRICS"
-          },
-          "components": [
-            {
-              "StackServiceComponents": {
-                "component_name": "METRICS_COLLECTOR",
-                "hostnames": ["c6401.ambari.apache.org", "c6402.ambari.apache.org"]
-              }
-            }, {
-              "StackServiceComponents": {
-                "component_name": "METRICS_MONITOR",
-                "hostnames": ["c6401.ambari.apache.org", "c6402.ambari.apache.org"]
-              }
-            }
-          ]
-        }],
-      "configurations": configurations,
-      "ambari-server-properties": {"ambari-server.user":"ambari_user"}
-    }
-
-    clusterData = {
-      "totalAvailableRam": 2048
-    }
-
-    expected = {'ams-env': {'properties': {'metrics_collector_heapsize': '512'}},
-                  'ams-grafana-env': {'properties': {},
-                                                             'property_attributes': {'metrics_grafana_password': {'visible': 'false'}}},
-                  'ams-hbase-env': {'properties': {'hbase_log_dir': '/var/log/ambari-metrics-collector',
-                                                                                       'hbase_master_heapsize': '512',
-                                                                                       'hbase_master_xmn_size': '102',
-                                                                                       'hbase_regionserver_heapsize': '1024',
-                                                                                       'regionserver_xmn_size': '128'}},
-                  'ams-hbase-site': {'properties': {'hbase.cluster.distributed': 'true',
-                                                                                         'hbase.hregion.memstore.flush.size': '134217728',
-                                                                                         'hbase.regionserver.global.memstore.lowerLimit': '0.3',
-                                                                                         'hbase.regionserver.global.memstore.upperLimit': '0.35',
-                                                                                         'hbase.rootdir': '/user/ams/hbase',
-                                                                                         'hbase.tmp.dir': '/var/lib/ambari-metrics-collector/hbase-tmp',
-                                                                                         'hbase.zookeeper.property.clientPort': '2181',
-                                                                                         'hfile.block.cache.size': '0.3'}},
-                  'ams-site': {'properties': {'timeline.metrics.cache.commit.interval': '10',
-                                                                             'timeline.metrics.cache.size': '100',
-                                                                             'timeline.metrics.cluster.aggregate.splitpoints': 'master.Balancer.BalancerCluster_95th_percentile',
-                                                                             'timeline.metrics.host.aggregate.splitpoints': 'master.Balancer.BalancerCluster_95th_percentile',
-                                                                             'timeline.metrics.service.handler.thread.count': '20',
-                                                                             'timeline.metrics.service.operation.mode': 'distributed',
-                                                                             'timeline.metrics.service.watcher.disabled': 'true',
-                                                                             'timeline.metrics.service.webapp.address': '0.0.0.0:6188'}},
-                  'hadoop-env': {'properties': {'hdfs_user': 'hdfs',
-                                                                                 'proxyuser_group': 'users'}}}
-
-    self.stackAdvisor.recommendAmsConfigurations(configurations, clusterData, services1, hosts)
-    self.assertEquals(configurations, expected)
-
-    services1 = {
-      "services": [
-        {
-          "StackServices": {
-            "service_name": "HDFS"
-          }, "components": [
-          {
-            "StackServiceComponents": {
-              "component_name": "NAMENODE",
-              "hostnames": ["c6401.ambari.apache.org"]
-            }
-          }
-        ]
-        },
-        {
-          "StackServices": {
-            "service_name": "AMBARI_METRICS"
-          },
-          "components": [
-            {
-              "StackServiceComponents": {
-                "component_name": "METRICS_COLLECTOR",
-                "hostnames": ["c6401.ambari.apache.org"]
-              }
-            }, {
-              "StackServiceComponents": {
-                "component_name": "METRICS_MONITOR",
-                "hostnames": ["c6401.ambari.apache.org", "c6402.ambari.apache.org"]
-              }
-            }
-          ]
-        }],
-      "configurations": configurations,
-      "ambari-server-properties": {"ambari-server.user":"ambari_user"}
-    }
-    expected = {'ams-env': {'properties': {'metrics_collector_heapsize': '512'}},
-                  'ams-grafana-env': {'properties': {},
-                                                             'property_attributes': {'metrics_grafana_password': {'visible': 'false'}}},
-                  'ams-hbase-env': {'properties': {'hbase_log_dir': '/var/log/ambari-metrics-collector',
-                                                                                       'hbase_master_heapsize': '512',
-                                                                                       'hbase_master_xmn_size': '102',
-                                                                                       'hbase_regionserver_heapsize': '1024',
-                                                                                       'regionserver_xmn_size': '128'}},
-                  'ams-hbase-site': {'properties': {'hbase.cluster.distributed': 'true',
-                                                                                         'hbase.hregion.memstore.flush.size': '134217728',
-                                                                                         'hbase.regionserver.global.memstore.lowerLimit': '0.3',
-                                                                                         'hbase.regionserver.global.memstore.upperLimit': '0.35',
-                                                                                         'hbase.rootdir': '/user/ams/hbase',
-                                                                                         'hbase.tmp.dir': '/var/lib/ambari-metrics-collector/hbase-tmp',
-                                                                                         'hbase.zookeeper.property.clientPort': '2181',
-                                                                                         'hfile.block.cache.size': '0.3',
-                                                                                         'phoenix.coprocessor.maxMetaDataCacheSize': '20480000'}},
-                  'ams-site': {'properties': {'timeline.metrics.cache.commit.interval': '10',
-                                                                             'timeline.metrics.cache.size': '100',
-                                                                             'timeline.metrics.cluster.aggregate.splitpoints': 'master.Balancer.BalancerCluster_95th_percentile',
-                                                                             'timeline.metrics.host.aggregate.splitpoints': 'master.Balancer.BalancerCluster_95th_percentile',
-                                                                             'timeline.metrics.service.handler.thread.count': '20',
-                                                                             'timeline.metrics.service.operation.mode': 'distributed',
-                                                                             'timeline.metrics.service.watcher.disabled': 'true',
-                                                                             'timeline.metrics.service.webapp.address': '0.0.0.0:6188'}},
-                  'hadoop-env': {'properties': {'hdfs_user': 'hdfs',
-                                                                                 'proxyuser_group': 'users'}}}
-    self.stackAdvisor.recommendAmsConfigurations(configurations, clusterData, services1, hosts)
-    self.assertEquals(configurations, expected)
-
-  def test_getHostNamesWithComponent(self):
-
-    services = {
-      "services":  [
-        {
-          "StackServices": {
-            "service_name": "SERVICE"
-          },
-          "components": [
-            {
-              "StackServiceComponents": {
-                "component_name": "COMPONENT",
-                "hostnames": ["host1","host2","host3"]
-              }
-            }
-          ]
-        }
-      ],
-      "configurations": {}
-    }
-
-    result = self.stackAdvisor.getHostNamesWithComponent("SERVICE","COMPONENT", services)
-    expected = ["host1","host2","host3"]
-    self.assertEquals(result, expected)
 
 
   def test_getZKHostPortString(self):
@@ -2908,374 +2700,6 @@ class TestHDP206StackAdvisor(TestCase):
     validation_problems = self.stackAdvisor.validateHDFSConfigurations(properties, recommendedDefaults, configurations, services, hosts)
     self.assertEquals(validation_problems, expected)
 
-  def test_validateAmsSiteConfigurations(self):
-    configurations = {
-      "hdfs-site": {
-        "properties": {
-          'dfs.datanode.data.dir': "/hadoop/data"
-        }
-      },
-      "core-site": {
-        "properties": {
-          "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020"
-        }
-      },
-      "ams-site": {
-        "properties": {
-          "timeline.metrics.service.operation.mode": "embedded"
-        }
-      }
-    }
-    recommendedDefaults = {
-      'hbase.rootdir': 'file:///var/lib/ambari-metrics-collector/hbase',
-      'hbase.tmp.dir': '/var/lib/ambari-metrics-collector/hbase',
-      'hbase.cluster.distributed': 'false'
-    }
-    properties = {
-      'hbase.rootdir': 'file:///var/lib/ambari-metrics-collector/hbase',
-      'hbase.tmp.dir' : '/var/lib/ambari-metrics-collector/hbase',
-      'hbase.cluster.distributed': 'false',
-      'timeline.metrics.service.operation.mode' : 'embedded'
-    }
-    host1 = {
-      "href" : "/api/v1/hosts/host1",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "host_name" : "host1",
-        "os_arch" : "x86_64",
-        "os_type" : "centos6",
-        "ph_cpu_count" : 1,
-        "public_host_name" : "host1",
-        "rack_info" : "/default-rack",
-        "total_mem" : 2097152,
-        "disk_info": [
-          {
-            "available": str(15<<30), # 15 GB
-            "type": "ext4",
-            "mountpoint": "/"
-          }
-        ]
-      }
-    }
-    host2 = {
-      "href" : "/api/v1/hosts/host2",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "host_name" : "host2",
-        "os_arch" : "x86_64",
-        "os_type" : "centos6",
-        "ph_cpu_count" : 1,
-        "public_host_name" : "host2",
-        "rack_info" : "/default-rack",
-        "total_mem" : 2097152,
-        "disk_info": [
-          {
-            "available": str(15<<30), # 15 GB
-            "type": "ext4",
-            "mountpoint": "/"
-          }
-        ]
-      }
-    }
-
-    hosts = {
-      "items" : [
-        host1, host2
-      ]
-    }
-
-    services = {
-      "services":  [
-        {
-          "StackServices": {
-            "service_name": "AMBARI_METRICS"
-          },
-          "components": [
-            {
-              "StackServiceComponents": {
-                "component_name": "METRICS_COLLECTOR",
-                "hostnames": ["host1", "host2"]
-              }
-            }, {
-              "StackServiceComponents": {
-                "component_name": "METRICS_MONITOR",
-                "hostnames": ["host1", "host2"]
-              }
-            }
-          ]
-        },
-        {
-          "StackServices": {
-            "service_name": "HDFS"
-          },
-          "components": [
-            {
-              "StackServiceComponents": {
-                "component_name": "DATANODE",
-                "hostnames": ["host1"]
-              }
-            }
-          ]
-        }
-      ],
-      "configurations": configurations
-    }
-    # only 1 partition, enough disk space, no warnings
-    res = self.stackAdvisor.validateAmsSiteConfigurations(properties, recommendedDefaults, configurations, services, hosts)
-    expected = [{'config-name': 'timeline.metrics.service.operation.mode',
-                    'config-type': 'ams-site',
-                    'level': 'ERROR',
-                    'message': "Correct value should be 'distributed' for clusters with more then 1 Metrics collector",
-                    'type': 'configuration'}]
-    self.assertEquals(res, expected)
-
-
-    services = {
-      "services":  [
-        {
-          "StackServices": {
-            "service_name": "AMBARI_METRICS"
-          },
-          "components": [
-            {
-              "StackServiceComponents": {
-                "component_name": "METRICS_COLLECTOR",
-                "hostnames": ["host1"]
-              }
-            }, {
-              "StackServiceComponents": {
-                "component_name": "METRICS_MONITOR",
-                "hostnames": ["host1"]
-              }
-            }
-          ]
-        },
-        {
-          "StackServices": {
-            "service_name": "HDFS"
-          },
-          "components": [
-            {
-              "StackServiceComponents": {
-                "component_name": "DATANODE",
-                "hostnames": ["host1"]
-              }
-            }
-          ]
-        }
-      ],
-      "configurations": configurations
-    }
-    res = self.stackAdvisor.validateAmsSiteConfigurations(properties, recommendedDefaults, configurations, services, hosts)
-    expected = []
-    self.assertEquals(res, expected)
-
-  def test_validateAmsHbaseSiteConfigurations(self):
-    configurations = {
-      "hdfs-site": {
-        "properties": {
-          'dfs.datanode.data.dir': "/hadoop/data"
-        }
-      },
-      "core-site": {
-        "properties": {
-          "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020"
-        }
-      },
-      "ams-site": {
-        "properties": {
-          "timeline.metrics.service.operation.mode": "embedded"
-        }
-      }
-    }
-
-    recommendedDefaults = {
-      'hbase.rootdir': 'file:///var/lib/ambari-metrics-collector/hbase',
-      'hbase.tmp.dir': '/var/lib/ambari-metrics-collector/hbase',
-      'hbase.cluster.distributed': 'false'
-    }
-    properties = {
-      'hbase.rootdir': 'file:///var/lib/ambari-metrics-collector/hbase',
-      'hbase.tmp.dir' : '/var/lib/ambari-metrics-collector/hbase',
-      'hbase.cluster.distributed': 'false'
-    }
-    host = {
-      "href" : "/api/v1/hosts/host1",
-      "Hosts" : {
-        "cpu_count" : 1,
-        "host_name" : "host1",
-        "os_arch" : "x86_64",
-        "os_type" : "centos6",
-        "ph_cpu_count" : 1,
-        "public_host_name" : "host1",
-        "rack_info" : "/default-rack",
-        "total_mem" : 2097152,
-        "disk_info": [
-          {
-            "available": str(15<<30), # 15 GB
-            "type": "ext4",
-            "mountpoint": "/"
-          }
-        ]
-      }
-    }
-
-    hosts = {
-      "items" : [
-        host
-      ]
-    }
-
-    services = {
-      "services":  [
-        {
-          "StackServices": {
-            "service_name": "AMBARI_METRICS"
-          },
-          "components": [
-            {
-              "StackServiceComponents": {
-                "component_name": "METRICS_COLLECTOR",
-                "hostnames": ["host1"]
-              }
-            }, {
-              "StackServiceComponents": {
-                "component_name": "METRICS_MONITOR",
-                "hostnames": ["host1"]
-              }
-            }
-          ]
-        },
-        {
-          "StackServices": {
-            "service_name": "HDFS"
-          },
-          "components": [
-            {
-              "StackServiceComponents": {
-                "component_name": "DATANODE",
-                "hostnames": ["host1"]
-              }
-            }
-          ]
-        }
-      ],
-      "configurations": configurations
-    }
-
-    # only 1 partition, enough disk space, no warnings
-    res = self.stackAdvisor.validateAmsHbaseSiteConfigurations(properties, recommendedDefaults, configurations, services, hosts)
-    expected = []
-    self.assertEquals(res, expected)
-
-
-    # 1 partition, no enough disk space
-    host['Hosts']['disk_info'] = [
-      {
-        "available" : '1',
-        "type" : "ext4",
-        "mountpoint" : "/"
-      }
-    ]
-    res = self.stackAdvisor.validateAmsHbaseSiteConfigurations(properties, recommendedDefaults, configurations, services, hosts)
-    expected = [
-      {'config-name': 'hbase.rootdir',
-       'config-type': 'ams-hbase-site',
-       'level': 'WARN',
-       'message': 'Ambari Metrics disk space requirements not met. '
-                  '\nRecommended disk space for partition / is 10G',
-       'type': 'configuration'
-      }
-    ]
-    self.assertEquals(res, expected)
-
-    # 2 partitions
-    host['Hosts']['disk_info'] = [
-      {
-        "available": str(15<<30), # 15 GB
-        "type" : "ext4",
-        "mountpoint" : "/grid/0"
-      },
-      {
-        "available" : str(15<<30), # 15 GB
-        "type" : "ext4",
-        "mountpoint" : "/"
-      }
-    ]
-    recommendedDefaults = {
-      'hbase.rootdir': 'file:///grid/0/var/lib/ambari-metrics-collector/hbase',
-      'hbase.tmp.dir': '/var/lib/ambari-metrics-collector/hbase',
-      'hbase.cluster.distributed': 'false'
-    }
-    properties = {
-      'hbase.rootdir': 'file:///grid/0/var/lib/ambari-metrics-collector/hbase',
-      'hbase.tmp.dir' : '/var/lib/ambari-metrics-collector/hbase',
-      'hbase.cluster.distributed': 'false'
-    }
-    res = self.stackAdvisor.validateAmsHbaseSiteConfigurations(properties, recommendedDefaults, configurations, services, hosts)
-    expected = []
-    self.assertEquals(res, expected)
-
-    # dfs.dir & hbase.rootdir crosscheck + root partition + hbase.rootdir == hbase.tmp.dir warnings
-    properties = {
-      'hbase.rootdir': 'file:///var/lib/ambari-metrics-collector/hbase',
-      'hbase.tmp.dir' : '/var/lib/ambari-metrics-collector/hbase',
-      'hbase.cluster.distributed': 'false'
-    }
-
-    res = self.stackAdvisor.validateAmsHbaseSiteConfigurations(properties, recommendedDefaults, configurations, services, hosts)
-    expected = [
-      {
-        'config-name': 'hbase.rootdir',
-        'config-type': 'ams-hbase-site',
-        'level': 'WARN',
-        'message': 'It is not recommended to use root partition for hbase.rootdir',
-        'type': 'configuration'
-      },
-      {
-        'config-name': 'hbase.tmp.dir',
-        'config-type': 'ams-hbase-site',
-        'level': 'WARN',
-        'message': 'Consider not using / partition for storing metrics temporary data. '
-                   '/ partition is already used as hbase.rootdir to store metrics data',
-        'type': 'configuration'
-      },
-      {
-        'config-name': 'hbase.rootdir',
-        'config-type': 'ams-hbase-site',
-        'level': 'WARN',
-        'message': 'Consider not using / partition for storing metrics data. '
-                   '/ is already used by datanode to store HDFS data',
-        'type': 'configuration'
-      }
-    ]
-    self.assertEquals(res, expected)
-
-    # incorrect hbase.rootdir in distributed mode
-    properties = {
-      'hbase.rootdir': 'file:///grid/0/var/lib/ambari-metrics-collector/hbase',
-      'hbase.tmp.dir' : '/var/lib/ambari-metrics-collector/hbase',
-      'hbase.cluster.distributed': 'false'
-    }
-    configurations['ams-site']['properties']['timeline.metrics.service.operation.mode'] = 'distributed'
-    res = self.stackAdvisor.validateAmsHbaseSiteConfigurations(properties, recommendedDefaults, configurations, services, hosts)
-    expected = [
-      {
-        'config-name': 'hbase.rootdir',
-        'config-type': 'ams-hbase-site',
-        'level': 'WARN',
-        'message': 'In distributed mode hbase.rootdir should point to HDFS.',
-        'type': 'configuration'
-      },
-      {
-        'config-name': 'hbase.cluster.distributed',
-        'config-type': 'ams-hbase-site',
-        'level': 'ERROR',
-        'message': 'hbase.cluster.distributed property should be set to true for distributed mode',
-        'type': 'configuration'
-      }
-    ]
-    self.assertEquals(res, expected)
 
   def test_validateStormSiteConfigurations(self):
     configurations = {

http://git-wip-us.apache.org/repos/asf/ambari/blob/0f32765d/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
index d6b572e..6f9e18e 100644
--- a/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.2/common/test_stack_advisor.py
@@ -2300,517 +2300,6 @@ class TestHDP22StackAdvisor(TestCase):
     self.stackAdvisor.recommendMapReduce2Configurations(configurations, clusterData, services, hosts)
     self.assertEquals(configurations, expected)
 
-  def test_recommendAmsConfigurations(self):
-    configurations = {}
-    clusterData = {}
-
-    services = {
-      "services":  [ {
-        "StackServices": {
-          "service_name": "AMBARI_METRICS"
-        },
-        "components": [{
-          "StackServiceComponents": {
-            "component_name": "METRICS_COLLECTOR",
-            "hostnames": ["host1"]
-          }
-
-        }, {
-          "StackServiceComponents": {
-            "component_name": "METRICS_MONITOR",
-            "hostnames": ["host1"]
-          }
-
-        }]
-      }],
-      "configurations": []
-    }
-    hosts = {
-      "items": [{
-        "Hosts": {
-          "host_name": "host1",
-
-        }
-      }]
-    }
-
-    # 1-node cluster
-    expected = {
-      "ams-hbase-env": {
-        "properties": {
-          "hbase_master_xmn_size": "128",
-          "hbase_master_heapsize": "512",
-          "hbase_regionserver_heapsize": "512"
-        }
-      },
-      "ams-grafana-env": {
-        "properties" : {},
-        "property_attributes": {
-          "metrics_grafana_password": {
-            "visible": "false"
-          }
-        }
-      },
-      "ams-env": {
-        "properties": {
-          "metrics_collector_heapsize": "512",
-        }
-      },
-      "ams-hbase-site": {
-        "properties": {
-          "phoenix.coprocessor.maxMetaDataCacheSize": "20480000",
-          "hbase.regionserver.global.memstore.lowerLimit": "0.3",
-          "hbase.regionserver.global.memstore.upperLimit": "0.35",
-          "hbase.hregion.memstore.flush.size": "134217728",
-          "hfile.block.cache.size": "0.3",
-          "hbase.cluster.distributed": "false",
-          "hbase.rootdir": "file:///var/lib/ambari-metrics-collector/hbase",
-          "hbase.tmp.dir": "/var/lib/ambari-metrics-collector/hbase-tmp",
-          "hbase.zookeeper.property.clientPort": "61181",
-        }
-      },
-      "ams-site": {
-        "properties": {
-          "timeline.metrics.cluster.aggregate.splitpoints": "mem_buffered",
-          "timeline.metrics.host.aggregate.splitpoints": "mem_buffered",
-          "timeline.metrics.service.handler.thread.count": "20",
-          'timeline.metrics.service.webapp.address': '0.0.0.0:6188',
-          'timeline.metrics.service.watcher.disabled': 'false',
-          'timeline.metrics.cache.size': '100',
-          'timeline.metrics.cache.commit.interval': '10'
-        }
-      }
-    }
-    self.stackAdvisor.recommendAmsConfigurations(configurations, clusterData, services, hosts)
-    self.assertEquals(configurations, expected)
-
-    # 100-nodes cluster, but still only 1 sink (METRICS_COLLECTOR)
-    for i in range(2, 201):
-      hosts['items'].extend([{
-        "Hosts": {
-          "host_name": "host" + str(i)
-          }
-      }])
-
-    services['services'] = [
-      {
-        "StackServices": {
-          "service_name": "AMBARI_METRICS"
-        },
-        "components": [
-          {
-            "StackServiceComponents": {
-              "component_name": "METRICS_COLLECTOR",
-              "hostnames": ["host1"]
-            }
-          },
-          {
-            "StackServiceComponents": {
-              "component_name": "METRICS_MONITOR",
-              "hostnames": ["host" + str(i) for i in range(1, 201)]
-            }
-          }
-        ]
-      }
-    ]
-
-    expected["ams-site"]['properties']['timeline.metrics.cache.size'] = '500'
-    expected["ams-site"]['properties']['timeline.metrics.cache.commit.interval'] = '7'
-    expected["ams-hbase-env"]['properties']['hbase_master_heapsize'] = '2560'
-    expected["ams-hbase-env"]['properties']['hbase_master_xmn_size'] = '448'
-    expected["ams-env"]['properties']['metrics_collector_heapsize'] = '896'
-
-    self.stackAdvisor.recommendAmsConfigurations(configurations, clusterData, services, hosts)
-    self.assertEquals(configurations, expected)
-
-    # 200 nodes, but with HDFS and YARN services installed on all nodes
-    services['services'] = [
-      {
-        "StackServices": {
-          "service_name": "HDFS"
-        },
-        "components": [
-          {
-            "StackServiceComponents": {
-              "component_name": "NAMENODE",
-              "hostnames": ["host1"]
-            }
-          },
-          {
-            "StackServiceComponents": {
-              "component_name": "DATANODE",
-              "hostnames": ["host" + str(i) for i in range(1, 201)]
-            }
-          }
-        ]
-      },
-      {
-        "StackServices": {
-          "service_name": "YARN"
-        },
-        "components": [
-          {
-            "StackServiceComponents": {
-              "component_name": "RESOURCEMANAGER",
-              "hostnames": ["host1"]
-            }
-          },
-          {
-            "StackServiceComponents": {
-              "component_name": "NODEMANAGER",
-              "hostnames": ["host" + str(i) for i in range(1, 201)]
-            }
-          }
-        ]
-      },
-      {
-        "StackServices": {
-          "service_name": "AMBARI_METRICS"
-        },
-        "components": [
-          {
-            "StackServiceComponents": {
-              "component_name": "METRICS_COLLECTOR",
-              "hostnames": ["host1"]
-            }
-          },
-          {
-            "StackServiceComponents": {
-              "component_name": "METRICS_MONITOR",
-              "hostnames": ["host" + str(i) for i in range(1, 201)]
-            }
-          }
-        ]
-      }
-
-    ]
-    expected["ams-site"]['properties']['timeline.metrics.host.aggregate.splitpoints'] = 'dfs.FSNamesystem.FilesTotal,' \
-                                                                                        'dfs.datanode.WritesFromRemoteClient,' \
-                                                                                        'ipc.IPC.numCallsInReplicationQueue,' \
-                                                                                        'mapred.ShuffleMetrics.ShuffleOutputsFailed,' \
-                                                                                        'mem_buffered,' \
-                                                                                        'read_count,' \
-                                                                                        'regionserver.Server.percentFilesLocal,' \
-                                                                                        'rpcdetailed.rpcdetailed.RegisterNodeManagerNumOps,' \
-                                                                                        'sdisk_vdb_write_count'
-    expected["ams-site"]['properties']['timeline.metrics.cluster.aggregate.splitpoints'] = 'mem_total'
-
-    expected["ams-site"]['properties']['timeline.metrics.cache.size'] = '600'
-    expected["ams-site"]['properties']['timeline.metrics.cache.commit.interval'] = '6'
-    expected["ams-hbase-env"]['properties']['hbase_master_heapsize'] = '6656'
-    expected["ams-hbase-env"]['properties']['hbase_master_xmn_size'] = '1088'
-    expected["ams-env"]['properties']['metrics_collector_heapsize'] = '2176'
-
-    self.stackAdvisor.recommendAmsConfigurations(configurations, clusterData, services, hosts)
-    self.assertEquals(configurations, expected)
-
-    # Test splitpoints, AMS embedded mode
-    services['changed-configurations'] = [
-      {
-        "type": "ams-hbase-env",
-        "name": "hbase_master_heapsize",
-        "old_value": "1024"
-      }
-    ]
-
-    services['configurations'] = {
-      'core-site': {'properties': {}},
-      'ams-site': {'properties': {}},
-      'ams-hbase-site': {'properties': {}},
-      'ams-hbase-env': {'properties': {}}
-    }
-
-    # Embedded mode, 512m master heapsize, no splitpoints recommended
-    services["configurations"]['ams-hbase-env']['properties']['hbase_master_heapsize'] = '512'
-    services["configurations"]['ams-hbase-site']['properties']['hbase.regionserver.global.memstore.upperLimit'] = '0.4'
-    services["configurations"]['ams-hbase-site']['properties']['hbase.hregion.memstore.flush.size'] = '134217728'
-
-    expected['ams-site']['properties']['timeline.metrics.host.aggregate.splitpoints'] = 'mem_total'
-    expected['ams-site']['properties']['timeline.metrics.cluster.aggregate.splitpoints'] = 'mem_total'
-    expected['ams-hbase-env']['properties']['hbase_master_heapsize'] = '512'
-
-    self.stackAdvisor.recommendAmsConfigurations(configurations, clusterData, services, hosts)
-    self.assertEquals(configurations, expected)
-
-    # Embedded mode, 4096m master heapsize, some splitpoints recommended
-    services["configurations"]['ams-hbase-env']['properties']['hbase_master_heapsize'] = '4096'
-    expected['ams-site']['properties']['timeline.metrics.host.aggregate.splitpoints'] = 'dfs.namenode.BlockReportAvgTime,' \
-                                                                                        'master.AssignmentManger.Assign_mean,' \
-                                                                                        'regionserver.Server.Append_median,' \
-                                                                                        'rpcdetailed.rpcdetailed.client.CheckAccessNumOps'
-    expected['ams-site']['properties']['timeline.metrics.cluster.aggregate.splitpoints'] = 'mem_total'
-    expected['ams-hbase-env']['properties']['hbase_master_heapsize'] = '4096'
-    self.stackAdvisor.recommendAmsConfigurations(configurations, clusterData, services, hosts)
-    self.assertEquals(configurations, expected)
-
-
-    # Embedded mode, 8192m master heapsize, more splitpoints recommended
-    services["configurations"]['ams-hbase-env']['properties']['hbase_master_heapsize'] = '8192'
-    expected['ams-hbase-env']['properties']['hbase_master_heapsize'] = '8192'
-    self.stackAdvisor.recommendAmsConfigurations(configurations, clusterData, services, hosts)
-    self.assertEquals(len(configurations['ams-site']['properties']['timeline.metrics.host.aggregate.splitpoints'].split(',')), 13)
-    self.assertEquals(len(configurations['ams-site']['properties']['timeline.metrics.cluster.aggregate.splitpoints'].split(',')), 2)
-
-    # Test splitpoints, AMS distributed mode
-    services['changed-configurations'] = [
-      {
-        "type": "ams-hbase-env",
-        "name": "hbase_regionserver_heapsize",
-        "old_value": "512"
-      }
-    ]
-    services["configurations"]['ams-site']['properties']['timeline.metrics.service.operation.mode'] = 'distributed'
-    services["configurations"]["core-site"]["properties"]["fs.defaultFS"] = 'hdfs://host1:8020'
-    expected['ams-hbase-site']['properties']['hbase.cluster.distributed'] = 'true'
-    expected['ams-hbase-site']['properties']['hbase.rootdir'] = '/user/ams/hbase'
-    expected['ams-hbase-site']['properties']['hbase.zookeeper.property.clientPort'] = '2181'
-    expected['ams-hbase-env']['properties']['hbase_master_heapsize'] = '512'
-    expected['ams-hbase-site']['properties']['dfs.client.read.shortcircuit'] = 'true'
-
-    # Distributed mode, low memory, no splitpoints recommended
-    expected['ams-site']['properties']['timeline.metrics.host.aggregate.splitpoints'] = 'mem_total'
-    expected['ams-site']['properties']['timeline.metrics.cluster.aggregate.splitpoints'] = 'mem_total'
-    expected['ams-hbase-env']['properties']['hbase_regionserver_heapsize'] = '6656'
-    expected["ams-hbase-env"]['properties']['hbase_master_xmn_size'] = '102'
-    expected['ams-hbase-env']['properties']['regionserver_xmn_size'] = '1024'
-    expected['ams-site']['properties']['timeline.metrics.service.watcher.disabled'] = 'true'
-    self.stackAdvisor.recommendAmsConfigurations(configurations, clusterData, services, hosts)
-    self.assertEquals(configurations, expected)
-
-    # Distributed mode, more memory, more splitpoints recommended
-    services["configurations"]['ams-hbase-env']['properties']['hbase_regionserver_heapsize'] = '8192'
-    expected['ams-hbase-env']['properties']['hbase_regionserver_heapsize'] = '8192'
-    self.stackAdvisor.recommendAmsConfigurations(configurations, clusterData, services, hosts)
-    self.assertEquals(len(configurations['ams-site']['properties']['timeline.metrics.host.aggregate.splitpoints'].split(',')), 13)
-    self.assertEquals(len(configurations['ams-site']['properties']['timeline.metrics.cluster.aggregate.splitpoints'].split(',')), 2)
-
-    # 2000-nodes cluster
-    for i in range(202, 2001):
-        hosts['items'].extend([{
-            "Hosts": {
-                "host_name": "host" + str(i)
-            }
-        }])
-
-    services['services'] = [
-        {
-            "StackServices": {
-                "service_name": "AMBARI_METRICS"
-            },
-            "components": [
-                {
-                    "StackServiceComponents": {
-                        "component_name": "METRICS_COLLECTOR",
-                        "hostnames": ["host1"]
-                    }
-                },
-                {
-                    "StackServiceComponents": {
-                        "component_name": "METRICS_MONITOR",
-                        "hostnames": ["host" + str(i) for i in range(1, 2001)]
-                    }
-                }
-            ]
-        }
-    ]
-
-    self.stackAdvisor.recommendAmsConfigurations(configurations, clusterData, services, hosts)
-    self.assertEquals(configurations["ams-site"]['properties']['timeline.metrics.cache.size'], '700')
-    self.assertEquals(configurations["ams-site"]['properties']['timeline.metrics.cache.commit.interval'], '5')
-
-    # 500 Nodes with HDFS, YARN, HIVE, STORM, HBASE, KAFKA, AMS
-    node_count = 500
-    hosts = {
-      "items": []
-    }
-    for i in range(1, node_count):
-      hosts['items'].extend([{
-        "Hosts": {
-          "host_name": "host" + str(i)
-          }
-      }])
-
-    services['services'] = [
-      {
-        "StackServices": {
-          "service_name": "HDFS"
-        },
-        "components": [
-          {
-            "StackServiceComponents": {
-              "component_name": "NAMENODE",
-              "hostnames": ["host1"]
-            }
-          } ,
-          {
-            "StackServiceComponents": {
-              "component_name": "SECONDARY_NAMENODE",
-              "hostnames": ["host2"]
-            }
-          },
-          {
-            "StackServiceComponents": {
-              "component_name": "DATANODE",
-              "hostnames": ["host" + str(i) for i in range(6, node_count + 1)]
-            }
-          }
-        ]
-      },
-      {
-        "StackServices": {
-          "service_name": "HBASE"
-        },
-        "components": [
-          {
-            "StackServiceComponents": {
-              "component_name": "HBASE_MASTER",
-              "hostnames": ["host3"]
-            }
-          },
-          {
-            "StackServiceComponents": {
-              "component_name": "HBASE_REGIONSERVER",
-              "hostnames": ["host" + str(i) for i in range(6, node_count + 1)]
-            }
-          }
-        ]
-      },
-      {
-        "StackServices": {
-          "service_name": "YARN"
-        },
-        "components": [
-          {
-            "StackServiceComponents": {
-              "component_name": "RESOURCEMANAGER",
-              "hostnames": ["host4"]
-            }
-          },
-          {
-            "StackServiceComponents": {
-              "component_name": "NODEMANAGER",
-              "hostnames": ["host" + str(i) for i in range(6, node_count + 1)]
-            }
-          }
-        ]
-      },
-      {
-        "StackServices": {
-          "service_name": "HIVE"
-        },
-        "components": [
-          {
-            "StackServiceComponents": {
-              "component_name": "HIVE_METASTORE",
-              "hostnames": ["host3"]
-            }
-          },
-          {
-            "StackServiceComponents": {
-              "component_name": "HIVE_SERVER",
-              "hostnames": ["host3"]
-            }
-          }
-        ]
-      },
-      {
-        "StackServices": {
-          "service_name": "STORM"
-        },
-        "components": [
-          {
-            "StackServiceComponents": {
-              "component_name": "NIMBUS",
-              "hostnames": ["host" + str(i) for i in range(1, 6)]
-            }
-          }
-        ]
-      },
-      {
-        "StackServices": {
-          "service_name": "KAFKA"
-        },
-        "components": [
-          {
-            "StackServiceComponents": {
-              "component_name": "KAFKA_BROKER",
-              "hostnames": ["host" + str(i) for i in range(1, 6)]
-            }
-          }
-        ]
-      },
-      {
-        "StackServices": {
-          "service_name": "AMBARI_METRICS"
-        },
-        "components": [
-          {
-            "StackServiceComponents": {
-              "component_name": "METRICS_COLLECTOR",
-              "hostnames": ["host6"]
-            }
-          },
-          {
-            "StackServiceComponents": {
-              "component_name": "METRICS_MONITOR",
-              "hostnames": ["host" + str(i) for i in range(6, node_count + 1)]
-            }
-          }
-        ]
-      }
-    ]
-
-    services['configurations'] = {
-      'core-site': {'properties': {}},
-      'ams-site': {'properties': {}},
-      'ams-hbase-site': {'properties': {}},
-      'ams-hbase-env': {'properties': {}}
-    }
-    services["configurations"]['ams-site']['properties']['timeline.metrics.service.operation.mode'] = 'distributed'
-
-    expected['ams-hbase-site']['properties']['hbase.cluster.distributed'] = 'true'
-    expected['ams-hbase-site']['properties']['hbase.rootdir'] = '/user/ams/hbase'
-    expected['ams-hbase-site']['properties']['hbase.zookeeper.property.clientPort'] = '2181'
-
-    expected["ams-site"]['properties']['timeline.metrics.host.aggregate.splitpoints'] = 'default.General.active_calls_api_get_all_databases,' \
-                                                                                        'default.General.api_get_database_mean,' \
-                                                                                        'default.General.gc.PS-MarkSweep.count,' \
-                                                                                        'dfs.FsVolume.TotalDataFileIos,' \
-                                                                                        'disk_free,' \
-                                                                                        'jvm.JvmMetrics.MemHeapMaxM,' \
-                                                                                        'kafka.network.RequestMetrics.RemoteTimeMs.request.Metadata.75percentile,' \
-                                                                                        'kafka.network.RequestMetrics.ResponseQueueTimeMs.request.Update.Metadata.mean,' \
-                                                                                        'load_one,master.FileSystem.MetaHlogSplitTime_75th_percentile,' \
-                                                                                        'metricssystem.MetricsSystem.NumActiveSources,' \
-                                                                                        'regionserver.Server.Append_95th_percentile,' \
-                                                                                        'regionserver.Server.blockCacheEvictionCount,' \
-                                                                                        'rpc.rpc.client.SentBytes,' \
-                                                                                        'sdisk_vda1_write_bytes'
-    expected["ams-site"]['properties']['timeline.metrics.cluster.aggregate.splitpoints'] = 'ipc.IPC.authorizationSuccesses,' \
-                                                                                           'metricssystem.MetricsSystem.PublishNumOps'
-
-    expected["ams-site"]['properties']['timeline.metrics.cache.size'] = '700'
-    expected["ams-site"]['properties']['timeline.metrics.cache.commit.interval'] = '5'
-    expected["ams-site"]['properties']['timeline.metrics.service.resultset.fetchSize'] = '5000'
-    expected["ams-site"]['properties']['phoenix.query.maxGlobalMemoryPercentage'] = '30'
-
-    expected["ams-env"]['properties']['metrics_collector_heapsize'] = '7040'
-
-    expected["ams-hbase-env"]['properties']['hbase_master_heapsize'] = '512'
-    expected["ams-hbase-env"]['properties']['hbase_master_xmn_size'] = '102'
-
-    expected["ams-hbase-env"]['properties']['hbase_regionserver_heapsize'] = '21120'
-    expected["ams-hbase-env"]['properties']['regionserver_xmn_size'] = '3200'
-
-    expected["ams-hbase-site"]['properties']['phoenix.query.maxGlobalMemoryPercentage'] = '20'
-    expected['ams-hbase-site']['properties']['hbase.hregion.memstore.flush.size'] = '268435456'
-    expected['ams-hbase-site']['properties']['hbase.regionserver.handler.count'] = '60'
-    expected['ams-hbase-site']['properties']['hbase.regionserver.hlog.blocksize'] = '134217728'
-    expected['ams-hbase-site']['properties']['hbase.regionserver.maxlogs'] = '64'
-    expected['ams-hbase-site']['properties']['phoenix.coprocessor.maxMetaDataCacheSize'] = '40960000'
-
-    self.stackAdvisor.recommendAmsConfigurations(configurations, clusterData, services, hosts)
-    self.assertEquals(configurations, expected)
 
   def test_recommendHbaseConfigurations(self):
     servicesList = ["HBASE"]


[41/50] [abbrv] ambari git commit: AMBARI-22158. Ambari schema upgrade fails when upgrading ambari from 2.5.1.0 to 2.6.0.0 and using oracle as database (dlysnichenko)

Posted by jl...@apache.org.
AMBARI-22158. Ambari schema upgrade fails when upgrading ambari from 2.5.1.0 to 2.6.0.0 and using oracle as database (dlysnichenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f44c8669
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f44c8669
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f44c8669

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: f44c86690a64523aed04bfd76e5c38e194e731ba
Parents: b358ee2
Author: Lisnichenko Dmitro <dl...@hortonworks.com>
Authored: Fri Oct 6 17:02:13 2017 +0300
Committer: Lisnichenko Dmitro <dl...@hortonworks.com>
Committed: Fri Oct 6 17:03:10 2017 +0300

----------------------------------------------------------------------
 .../org/apache/ambari/server/upgrade/UpgradeCatalog260.java    | 6 ++++++
 1 file changed, 6 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f44c8669/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog260.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog260.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog260.java
index c31469e..866a501 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog260.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog260.java
@@ -144,11 +144,14 @@ public class UpgradeCatalog260 extends AbstractUpgradeCatalog {
   public static final String CURRENT = "CURRENT";
   public static final String SELECTED = "1";
   public static final String VIEWURL_TABLE = "viewurl";
+  public static final String VIEWINSTANCE_TABLE = "viewinstance";
   public static final String PK_VIEWURL = "PK_viewurl";
   public static final String URL_ID_COLUMN = "url_id";
   public static final String STALE_POSTGRESS_VIEWURL_PKEY = "viewurl_pkey";
   public static final String USERS_TABLE = "users";
   public static final String STALE_POSTGRESS_USERS_LDAP_USER_KEY = "users_ldap_user_key";
+  public static final String SHORT_URL_COLUMN = "short_url";
+  public static final String FK_INSTANCE_URL_ID = "FK_instance_url_id";
 
 
   /**
@@ -205,8 +208,11 @@ public class UpgradeCatalog260 extends AbstractUpgradeCatalog {
    * Adds the {@value #PK_VIEWURL} constraint.
    */
   private void addViewUrlPKConstraint() throws SQLException {
+    dbAccessor.dropFKConstraint(VIEWINSTANCE_TABLE, FK_INSTANCE_URL_ID);
     dbAccessor.dropPKConstraint(VIEWURL_TABLE, STALE_POSTGRESS_VIEWURL_PKEY);
     dbAccessor.addPKConstraint(VIEWURL_TABLE, PK_VIEWURL, URL_ID_COLUMN);
+    dbAccessor.addFKConstraint(VIEWINSTANCE_TABLE, FK_INSTANCE_URL_ID,
+        SHORT_URL_COLUMN, VIEWURL_TABLE, URL_ID_COLUMN, false);
   }
 
   /**


[21/50] [abbrv] ambari git commit: AMBARI-22123 - Adding Components On Patched Clusters Can Result In Symlink Issues With conf Directories (jonathanhurley)

Posted by jl...@apache.org.
AMBARI-22123 - Adding Components On Patched Clusters Can Result In Symlink Issues With conf Directories (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/158bd656
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/158bd656
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/158bd656

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 158bd656dfe973d7aa487ae90735b315778c5463
Parents: 3e6aa87
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Wed Oct 4 13:52:48 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Wed Oct 4 13:52:48 2017 -0400

----------------------------------------------------------------------
 .../libraries/functions/conf_select.py          | 285 +++++++------------
 .../custom_actions/scripts/install_packages.py  |   6 +-
 .../scripts/shared_initialization.py            |   6 +-
 .../src/test/python/TestAmbariServer.py         |   4 +-
 ambari-server/src/test/python/TestMpacks.py     |  12 +-
 .../hooks/after-INSTALL/test_after_install.py   |  50 +---
 .../stacks/2.2/common/test_conf_select.py       |  13 +-
 .../HIVE/package/scripts/hive_client.py         |   2 -
 .../HIVE/package/scripts/hive_metastore.py      |   1 -
 .../HIVE/package/scripts/hive_server.py         |   2 +-
 .../package/scripts/hive_server_interactive.py  |   1 -
 .../HIVE/package/scripts/webhcat_server.py      |   2 -
 .../scripts/application_timeline_server.py      |   1 -
 .../YARN/package/scripts/historyserver.py       |   1 -
 .../YARN/package/scripts/mapreduce2_client.py   |   2 -
 .../YARN/package/scripts/nodemanager.py         |   1 -
 .../YARN/package/scripts/resourcemanager.py     |   1 -
 .../YARN/package/scripts/yarn_client.py         |   1 -
 18 files changed, 139 insertions(+), 252 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/158bd656/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py b/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
index f330f39..c89e767 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
@@ -26,7 +26,6 @@ import subprocess
 import ambari_simplejson as json
 
 # Local Imports
-import stack_select
 from resource_management.core import shell
 from resource_management.libraries.functions.format import format
 from resource_management.libraries.script.script import Script
@@ -43,9 +42,6 @@ from resource_management.core.shell import as_sudo
 from resource_management.libraries.functions.stack_features import check_stack_feature
 from resource_management.libraries.functions import StackFeature
 
-DIRECTORY_TYPE_BACKUP = "backup"
-DIRECTORY_TYPE_CURRENT = "current"
-
 def _get_cmd(command, package, version):
   conf_selector_path = stack_tools.get_stack_tool_path(stack_tools.CONF_SELECTOR_NAME)
   return ('ambari-python-wrap', conf_selector_path, command, '--package', package, '--stack-version', version, '--conf-version', '0')
@@ -98,11 +94,17 @@ def create(stack_name, package, version, dry_run = False):
   :param dry_run: False to create the versioned config directory, True to only return what would be created
   :return List of directories created
   """
-  Logger.info("Checking if need to create versioned conf dir /etc/{0}/{1}/0".format(package, version))
   if not _valid(stack_name, package, version):
-    Logger.info("Will not create it since parameters are not valid.")
+    Logger.info("Unable to create versioned configuration directories since the parameters supplied do not support it")
     return []
 
+  # clarify the logging of what we're doing ...
+  if dry_run:
+    Logger.info(
+      "Checking to see which directories will be created for {0} on version {1}".format(package, version))
+  else:
+    Logger.info("Creating /etc/{0}/{1}/0 if it does not exist".format(package, version))
+
   command = "dry-run-create" if dry_run else "create-conf-dir"
 
   code, stdout, stderr = shell.call(_get_cmd(command, package, version), logoutput=False, quiet=False, sudo=True, stderr = subprocess.PIPE)
@@ -129,17 +131,13 @@ def create(stack_name, package, version, dry_run = False):
   return created_directories
 
 
-def select(stack_name, package, version, try_create=True, ignore_errors=False):
+def select(stack_name, package, version, ignore_errors=False):
   """
-  Selects a config version for the specified package. If this detects that
-  the stack supports configuration versioning but /etc/<component>/conf is a
-  directory, then it will attempt to bootstrap the conf.backup directory and change
-  /etc/<component>/conf into a symlink.
+  Selects a config version for the specified package.
 
   :param stack_name: the name of the stack
   :param package: the name of the package, as-used by <conf-selector-tool>
   :param version: the version number to create
-  :param try_create: optional argument to attempt to create the directory before setting it
   :param ignore_errors: optional argument to ignore any error and simply log a warning
   """
   try:
@@ -147,67 +145,8 @@ def select(stack_name, package, version, try_create=True, ignore_errors=False):
     if not _valid(stack_name, package, version):
       return
 
-    if try_create:
-      create(stack_name, package, version)
-
+    create(stack_name, package, version)
     shell.checked_call(_get_cmd("set-conf-dir", package, version), logoutput=False, quiet=False, sudo=True)
-
-    # for consistency sake, we must ensure that the /etc/<component>/conf symlink exists and
-    # points to <stack-root>/current/<component>/conf - this is because some people still prefer to
-    # use /etc/<component>/conf even though <stack-root> is the "future"
-    package_dirs = get_package_dirs()
-    if package in package_dirs:
-      Logger.info("Ensuring that {0} has the correct symlink structure".format(package))
-
-      directory_list = package_dirs[package]
-      for directory_structure in directory_list:
-        conf_dir = directory_structure["conf_dir"]
-        current_dir = directory_structure["current_dir"]
-
-        # if /etc/<component>/conf is missing or is not a symlink
-        if not os.path.islink(conf_dir):
-          # if /etc/<component>/conf is not a link and it exists, convert it to a symlink
-          if os.path.exists(conf_dir):
-            parent_directory = os.path.dirname(conf_dir)
-            conf_backup_dir = os.path.join(parent_directory, "conf.backup")
-
-            # create conf.backup and copy files to it (if it doesn't exist)
-            Execute(("cp", "-R", "-p", conf_dir, conf_backup_dir),
-              not_if = format("test -e {conf_backup_dir}"), sudo = True)
-
-            # delete the old /etc/<component>/conf directory and link to the backup
-            Directory(conf_dir, action="delete")
-            Link(conf_dir, to = conf_backup_dir)
-          else:
-            # missing entirely
-            # /etc/<component>/conf -> <stack-root>/current/<component>/conf
-            if package in ["atlas", ]:
-              #HACK for Atlas
-              '''
-              In the case of Atlas, the Hive RPM installs /usr/$stack/$version/atlas with some partial packages that
-              contain Hive hooks, while the Atlas RPM is responsible for installing the full content.
-
-              If the user does not have Atlas currently installed on their stack, then /usr/$stack/current/atlas-client
-              will be a broken symlink, and we should not create the
-              symlink /etc/atlas/conf -> /usr/$stack/current/atlas-client/conf .
-              If we mistakenly create this symlink, then when the user performs an EU/RU and then adds Atlas service
-              then the Atlas RPM will not be able to copy its artifacts into /etc/atlas/conf directory and therefore
-              prevent Ambari from by copying those unmanaged contents into /etc/atlas/$version/0
-              '''
-              component_list = default("/localComponents", [])
-              if "ATLAS_SERVER" in component_list or "ATLAS_CLIENT" in component_list:
-                Logger.info("Atlas is installed on this host.")
-                parent_dir = os.path.dirname(current_dir)
-                if os.path.exists(parent_dir):
-                  Link(conf_dir, to=current_dir)
-                else:
-                  Logger.info("Will not create symlink from {0} to {1} because the destination's parent dir does not exist.".format(conf_dir, current_dir))
-              else:
-                Logger.info("Will not create symlink from {0} to {1} because Atlas is not installed on this host.".format(conf_dir, current_dir))
-            else:
-              # Normal path for other packages
-              Link(conf_dir, to=current_dir)
-
   except Exception, exception:
     if ignore_errors is True:
       Logger.warning("Could not select the directory for package {0}. Error: {1}".format(package,
@@ -242,145 +181,117 @@ def get_hadoop_conf_dir():
   return hadoop_conf_dir
 
 
-def convert_conf_directories_to_symlinks(package, version, dirs, skip_existing_links=True,
-    link_to=DIRECTORY_TYPE_CURRENT):
+def convert_conf_directories_to_symlinks(package, version, dirs):
   """
-  Assumes HDP 2.3+, moves around directories and creates the conf symlink for the given package.
-  If the package does not exist, then no work is performed.
+  Reverses the symlinks created by the package installer and invokes the conf-select tool to
+  create versioned configuration directories for the given package. If the package does not exist,
+  then no work is performed.
 
-  - Creates a /etc/<component>/conf.backup directory
-  - Copies all configs from /etc/<component>/conf to conf.backup
-  - Removes /etc/<component>/conf
   - Creates /etc/<component>/<version>/0 via <conf-selector-tool>
+  - Creates a /etc/<component>/conf.backup directory, if needed
+  - Copies all configs from /etc/<component>/conf to conf.backup, if needed
+  - Removes /etc/<component>/conf, if needed
   - <stack-root>/current/<component>-client/conf -> /etc/<component>/<version>/0 via <conf-selector-tool>
-  - Links /etc/<component>/conf to <something> depending on function paramter
-  -- /etc/<component>/conf -> <stack-root>/current/[component]-client/conf (usually)
-  -- /etc/<component>/conf -> /etc/<component>/conf.backup (only when supporting < HDP 2.3)
+  - Links /etc/<component>/conf -> <stack-root>/current/[component]-client/conf
 
   :param package: the package to create symlinks for (zookeeper, falcon, etc)
   :param version: the version number to use with <conf-selector-tool> (2.3.0.0-1234)
   :param dirs: the directories associated with the package (from get_package_dirs())
-  :param skip_existing_links: True to not do any work if already a symlink
-  :param link_to: link to "current" or "backup"
   """
-  # lack of enums makes this possible - we need to know what to link to
-  if link_to not in [DIRECTORY_TYPE_CURRENT, DIRECTORY_TYPE_BACKUP]:
-    raise Fail("Unsupported 'link_to' argument. Could not link package {0}".format(package))
-
+  # if the conf_dir doesn't exist, then that indicates that the package's service is not installed
+  # on this host and nothing should be done with conf symlinks
   stack_name = Script.get_stack_name()
-  bad_dirs = []
-  for dir_def in dirs:
-    if not os.path.exists(dir_def['conf_dir']):
-      bad_dirs.append(dir_def['conf_dir'])
-
-  if len(bad_dirs) > 0:
-    Logger.info("Skipping {0} as it does not exist.".format(",".join(bad_dirs)))
-    return
-
-  # existing links should be skipped since we assume there's no work to do
-  # they should be checked against the correct target though
-  if skip_existing_links:
-    bad_dirs = []
-    for dir_def in dirs:
-      # check if conf is a link already
-      old_conf = dir_def['conf_dir']
-      if os.path.islink(old_conf):
-        # it's already a link; make sure it's a link to where we want it
-        if link_to == DIRECTORY_TYPE_BACKUP:
-          target_conf_dir = _get_backup_conf_directory(old_conf)
-        else:
-          target_conf_dir = dir_def['current_dir']
-
-        # the link isn't to the right spot; re-link it
-        if os.readlink(old_conf) != target_conf_dir:
-          Logger.info("Re-linking symlink {0} to {1}".format(old_conf, target_conf_dir))
-
-          Link(old_conf, action = "delete")
-          Link(old_conf, to = target_conf_dir)
-        else:
-          Logger.info("{0} is already linked to {1}".format(old_conf, os.path.realpath(old_conf)))
-
-        bad_dirs.append(old_conf)
+  for directory_struct in dirs:
+    if not os.path.exists(directory_struct['conf_dir']):
+      Logger.info("Skipping the conf-select tool on {0} since {1} does not exist.".format(
+        package, directory_struct['conf_dir']))
 
-  if len(bad_dirs) > 0:
-    return
-
-  # make backup dir and copy everything in case configure() was called after install()
-  for dir_def in dirs:
-    old_conf = dir_def['conf_dir']
-    backup_dir = _get_backup_conf_directory(old_conf)
-    Logger.info("Backing up {0} to {1} if destination doesn't exist already.".format(old_conf, backup_dir))
-    Execute(("cp", "-R", "-p", unicode(old_conf), unicode(backup_dir)),
-      not_if = format("test -e {backup_dir}"), sudo = True)
-
-  # we're already in the HDP stack
-  # Create the versioned /etc/[component]/[version]/0 folder.
-  # The component must be installed on the host.
-  versioned_confs = create(stack_name, package, version, dry_run = True)
+      return
 
-  Logger.info("Package {0} will have new conf directories: {1}".format(package, ", ".join(versioned_confs)))
+  # determine which directories would be created, if any are needed
+  dry_run_directory = create(stack_name, package, version, dry_run = True)
 
   need_dirs = []
-  for d in versioned_confs:
+  for d in dry_run_directory:
     if not os.path.exists(d):
       need_dirs.append(d)
 
+  # log that we'll actually be creating some directories soon
   if len(need_dirs) > 0:
-    create(stack_name, package, version)
+    Logger.info("Package {0} will have the following new configuration directories created: {1}".format(
+      package, ", ".join(dry_run_directory)))
 
-    # find the matching definition and back it up (not the most efficient way) ONLY if there is more than one directory
-    if len(dirs) > 1:
-      for need_dir in need_dirs:
-        for dir_def in dirs:
-          if 'prefix' in dir_def and need_dir.startswith(dir_def['prefix']):
-            old_conf = dir_def['conf_dir']
-            versioned_conf = need_dir
-            Execute(as_sudo(["cp", "-R", "-p", os.path.join(old_conf, "*"), versioned_conf], auto_escape=False),
-              only_if = format("ls -d {old_conf}/*"))
-    elif 1 == len(dirs) and 1 == len(need_dirs):
-      old_conf = dirs[0]['conf_dir']
-      versioned_conf = need_dirs[0]
-      Execute(as_sudo(["cp", "-R", "-p", os.path.join(old_conf, "*"), versioned_conf], auto_escape=False),
-        only_if = format("ls -d {old_conf}/*"))
+  # Create the versioned /etc/[component]/[version]/0 folder (using create-conf-dir) and then
+  # set it for the installed component:
+  # - Creates /etc/<component>/<version>/0
+  # - Links <stack-root>/<version>/<component>/conf -> /etc/<component>/<version>/0
+  select(stack_name, package, version, ignore_errors = True)
 
+  # check every existing link to see if it's a link and if it's pointed to the right spot
+  for directory_struct in dirs:
+    try:
+      # check if conf is a link already
+      old_conf = directory_struct['conf_dir']
+      current_dir = directory_struct['current_dir']
+      if os.path.islink(old_conf):
+        # it's already a link; make sure it's a link to where we want it
+        if os.readlink(old_conf) != current_dir:
+          # the link isn't to the right spot; re-link it
+          Logger.info("Re-linking symlink {0} to {1}".format(old_conf, current_dir))
+          Link(old_conf, action = "delete")
+          Link(old_conf, to = current_dir)
+        else:
+          Logger.info("{0} is already linked to {1}".format(old_conf, current_dir))
+      elif os.path.isdir(old_conf):
+        # the /etc/<component>/conf directory is not a link, so turn it into one
+        Logger.info("{0} is a directory - it must be converted into a symlink".format(old_conf))
 
-  # <stack-root>/current/[component] is already set to to the correct version, e.g., <stack-root>/[version]/[component]
+        backup_dir = _get_backup_conf_directory(old_conf)
+        Logger.info("Backing up {0} to {1} if destination doesn't exist already.".format(old_conf, backup_dir))
+        Execute(("cp", "-R", "-p", old_conf, backup_dir),
+          not_if = format("test -e {backup_dir}"), sudo = True)
 
-  select(stack_name, package, version, ignore_errors = True)
+        # delete the old /etc/<component>/conf directory now that it's been backed up
+        Directory(old_conf, action = "delete")
 
-  # Symlink /etc/[component]/conf to /etc/[component]/conf.backup
-  try:
-    # No more references to /etc/[component]/conf
-    for dir_def in dirs:
-      # E.g., /etc/[component]/conf
-      new_symlink = dir_def['conf_dir']
-
-      # Delete the existing directory/link so that linking will work
-      if not os.path.islink(new_symlink):
-        Directory(new_symlink, action = "delete")
+        # link /etc/[component]/conf -> <stack-root>/current/[component]-client/conf
+        Link(old_conf, to = current_dir)
       else:
-        Link(new_symlink, action = "delete")
-
-      old_conf = dir_def['conf_dir']
-      backup_dir = _get_backup_conf_directory(old_conf)
-      # link /etc/[component]/conf -> /etc/[component]/conf.backup
-      # or
-      # link /etc/[component]/conf -> <stack-root>/current/[component]-client/conf
-      if link_to == DIRECTORY_TYPE_BACKUP:
-        Link(new_symlink, to=backup_dir)
-      else:
-        Link(new_symlink, to=dir_def['current_dir'])
-
-        #HACK
+        # missing entirely
+        # /etc/<component>/conf -> <stack-root>/current/<component>/conf
         if package in ["atlas", ]:
-          Logger.info("Seeding the new conf symlink {0} from the old backup directory {1} in case any "
-                      "unmanaged artifacts are needed.".format(new_symlink, backup_dir))
-          # If /etc/[component]/conf.backup exists, then copy any artifacts not managed by Ambari to the new symlink target
-          # Be careful not to clobber any existing files.
-          Execute(as_sudo(["cp", "-R", "--no-clobber", os.path.join(backup_dir, "*"), new_symlink], auto_escape=False),
-                  only_if=format("test -e {new_symlink}"))
-  except Exception, e:
-    Logger.warning("Could not change symlink for package {0} to point to {1} directory. Error: {2}".format(package, link_to, e))
+          # HACK for Atlas
+          '''
+          In the case of Atlas, the Hive RPM installs /usr/$stack/$version/atlas with some partial packages that
+          contain Hive hooks, while the Atlas RPM is responsible for installing the full content.
+    
+          If the user does not have Atlas currently installed on their stack, then /usr/$stack/current/atlas-client
+          will be a broken symlink, and we should not create the
+          symlink /etc/atlas/conf -> /usr/$stack/current/atlas-client/conf .
+          If we mistakenly create this symlink, then when the user performs an EU/RU and then adds Atlas service
+          then the Atlas RPM will not be able to copy its artifacts into /etc/atlas/conf directory and therefore
+          prevent Ambari from by copying those unmanaged contents into /etc/atlas/$version/0
+          '''
+          component_list = default("/localComponents", [])
+          if "ATLAS_SERVER" in component_list or "ATLAS_CLIENT" in component_list:
+            Logger.info("Atlas is installed on this host.")
+            parent_dir = os.path.dirname(current_dir)
+            if os.path.exists(parent_dir):
+              Link(old_conf, to = current_dir)
+            else:
+              Logger.info(
+                "Will not create symlink from {0} to {1} because the destination's parent dir does not exist.".format(
+                  old_conf, current_dir))
+          else:
+            Logger.info(
+            "Will not create symlink from {0} to {1} because Atlas is not installed on this host.".format(
+              old_conf, current_dir))
+        else:
+          # Normal path for other packages
+          Link(old_conf, to = current_dir)
+
+    except Exception, e:
+      Logger.warning("Could not change symlink for package {0} to point to current directory. Error: {1}".format(package, e))
 
 
 def _seed_new_configuration_directories(package, created_directories):
@@ -460,4 +371,4 @@ def _get_backup_conf_directory(old_conf):
   """
   old_parent = os.path.abspath(os.path.join(old_conf, os.pardir))
   backup_dir = os.path.join(old_parent, "conf.backup")
-  return backup_dir
+  return backup_dir
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/158bd656/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py b/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py
index dcf3544..c5e4ae7 100644
--- a/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py
+++ b/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py
@@ -146,10 +146,10 @@ class InstallPackages(Script):
 
     # if installing a version of HDP that needs some symlink love, then create them
     if is_package_install_successful and 'actual_version' in self.structured_output:
-      self._create_config_links_if_necessary(stack_id, self.structured_output['actual_version'])
+      self._relink_configurations_with_conf_select(stack_id, self.structured_output['actual_version'])
 
 
-  def _create_config_links_if_necessary(self, stack_id, stack_version):
+  def _relink_configurations_with_conf_select(self, stack_id, stack_version):
     """
     Sets up the required structure for /etc/<component>/conf symlinks and <stack-root>/current
     configuration symlinks IFF the current stack is < HDP 2.3+ and the new stack is >= HDP 2.3
@@ -177,7 +177,7 @@ class InstallPackages(Script):
       Link("/usr/bin/conf-select", to="/usr/bin/hdfconf-select")
 
     for package_name, directories in conf_select.get_package_dirs().iteritems():
-      conf_select.select(self.stack_name, package_name, stack_version, ignore_errors = True)
+      conf_select.convert_conf_directories_to_symlinks(package_name, stack_version, directories)
 
   def compute_actual_version(self):
     """

http://git-wip-us.apache.org/repos/asf/ambari/blob/158bd656/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/shared_initialization.py
index 67c3ba8..1a4b074 100644
--- a/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/shared_initialization.py
@@ -116,7 +116,7 @@ def load_version(struct_out_file):
 
 def link_configs(struct_out_file):
   """
-  Links configs, only on a fresh install of HDP-2.3 and higher
+  Use the conf_select module to link configuration directories correctly.
   """
   import params
 
@@ -128,5 +128,5 @@ def link_configs(struct_out_file):
 
   # On parallel command execution this should be executed by a single process at a time.
   with FcntlBasedProcessLock(params.link_configs_lock_file, enabled = params.is_parallel_execution_enabled, skip_fcntl_failures = True):
-    for k, v in conf_select.get_package_dirs().iteritems():
-      conf_select.convert_conf_directories_to_symlinks(k, json_version, v)
\ No newline at end of file
+    for package_name, directories in conf_select.get_package_dirs().iteritems():
+      conf_select.convert_conf_directories_to_symlinks(package_name, json_version, directories)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/158bd656/ambari-server/src/test/python/TestAmbariServer.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/TestAmbariServer.py b/ambari-server/src/test/python/TestAmbariServer.py
index 1c4ebaf..d064b00 100644
--- a/ambari-server/src/test/python/TestAmbariServer.py
+++ b/ambari-server/src/test/python/TestAmbariServer.py
@@ -3693,6 +3693,7 @@ class TestAmbariServer(TestCase):
   @patch("ambari_server.dbConfiguration_linux.LinuxDBMSConfig.ensure_jdbc_driver_installed")
   @patch("ambari_server.dbConfiguration_linux.get_YN_input")
   @patch("ambari_server.serverSetup.update_properties")
+  @patch("ambari_server.dbConfiguration.get_ambari_properties")
   @patch("ambari_server.dbConfiguration_linux.get_ambari_properties")
   @patch("ambari_server.dbConfiguration_linux.store_password_file")
   @patch("ambari_server.dbConfiguration_linux.run_os_command")
@@ -3724,7 +3725,7 @@ class TestAmbariServer(TestCase):
                  get_YN_input_mock, gvsi_mock, gvsi_1_mock,
                  read_password_mock, verify_setup_allowed_method, is_jdbc_user_changed_mock, check_postgre_up_mock,
                  configure_postgres_mock, run_os_command_1_mock,
-                 store_password_file_mock, get_ambari_properties_1_mock, update_properties_mock,
+                 store_password_file_mock, get_ambari_properties_1_mock, get_ambari_properties_2_mock, update_properties_mock,
                  get_YN_input_1_mock, ensure_jdbc_driver_installed_mock,
                  remove_file_mock, isfile_mock, exists_mock,
                  run_os_command_mock, get_pw_nam_mock):
@@ -3768,6 +3769,7 @@ class TestAmbariServer(TestCase):
     read_password_mock.return_value = "bigdata2"
     get_ambari_properties_mock.return_value = properties
     get_ambari_properties_1_mock.return_value = properties
+    get_ambari_properties_2_mock.return_value = properties
     store_password_file_mock.return_value = "encrypted_bigdata2"
     ensure_jdbc_driver_installed_mock.return_value = True
     check_postgre_up_mock.return_value = (PGConfig.PG_STATUS_RUNNING, 0, "", "")

http://git-wip-us.apache.org/repos/asf/ambari/blob/158bd656/ambari-server/src/test/python/TestMpacks.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/TestMpacks.py b/ambari-server/src/test/python/TestMpacks.py
index 98de76c..b290665 100644
--- a/ambari-server/src/test/python/TestMpacks.py
+++ b/ambari-server/src/test/python/TestMpacks.py
@@ -260,6 +260,7 @@ class TestMpacks(TestCase):
   @patch("os.path.exists")
   @patch("shutil.move")
   @patch("os.mkdir")
+  @patch("ambari_server.setupMpacks.read_ambari_user")
   @patch("ambari_server.setupMpacks.create_symlink")
   @patch("ambari_server.setupMpacks.get_ambari_version")
   @patch("ambari_server.setupMpacks.get_ambari_properties")
@@ -272,7 +273,7 @@ class TestMpacks(TestCase):
   @patch("ambari_server.setupMpacks.set_file_permissions")
   def test_install_stack_mpack(self, set_file_permissions_mock, validate_purge_mock, run_os_command_mock, download_mpack_mock, expand_mpack_mock, purge_stacks_and_mpacks_mock,
                                      add_replay_log_mock, get_ambari_properties_mock, get_ambari_version_mock,
-                                     create_symlink_mock, os_mkdir_mock, shutil_move_mock, os_path_exists_mock):
+                                     create_symlink_mock, read_ambari_user_mock, os_mkdir_mock, shutil_move_mock, os_path_exists_mock):
     options = self._create_empty_options_mock()
     options.mpack_path = "/path/to/mystack.tar.gz"
     options.purge = True
@@ -409,6 +410,7 @@ class TestMpacks(TestCase):
   @patch("os.path.exists")
   @patch("shutil.move")
   @patch("os.mkdir")
+  @patch("ambari_server.setupMpacks.read_ambari_user")
   @patch("ambari_server.setupMpacks.create_symlink")
   @patch("ambari_server.setupMpacks.get_ambari_version")
   @patch("ambari_server.setupMpacks.get_ambari_properties")
@@ -420,7 +422,7 @@ class TestMpacks(TestCase):
 
   def test_install_extension_mpack(self, set_file_permissions_mock, download_mpack_mock, expand_mpack_mock, add_replay_log_mock,
       purge_stacks_and_mpacks_mock, get_ambari_properties_mock, get_ambari_version_mock,
-      create_symlink_mock, os_mkdir_mock, shutil_move_mock, os_path_exists_mock):
+      create_symlink_mock, read_ambari_user_mock, os_mkdir_mock, shutil_move_mock, os_path_exists_mock):
     options = self._create_empty_options_mock()
     options.mpack_path = "/path/to/myextension.tar.gz"
     options.purge = False
@@ -486,6 +488,7 @@ class TestMpacks(TestCase):
   @patch("os.symlink")
   @patch("shutil.move")
   @patch("os.mkdir")
+  @patch("ambari_server.setupMpacks.read_ambari_user")
   @patch("ambari_server.setupMpacks.create_symlink")
   @patch("ambari_server.setupMpacks.get_ambari_version")
   @patch("ambari_server.setupMpacks.get_ambari_properties")
@@ -496,7 +499,7 @@ class TestMpacks(TestCase):
   @patch("ambari_server.setupMpacks.set_file_permissions")
   def test_install_addon_service_mpack(self, set_file_permissions_mock, download_mpack_mock, expand_mpack_mock, purge_stacks_and_mpacks_mock,
                                        add_replay_log_mock, get_ambari_properties_mock, get_ambari_version_mock,
-                                       create_symlink_mock, os_mkdir_mock, shutil_move_mock,os_symlink_mock,
+                                       create_symlink_mock, read_ambari_user_mock, os_mkdir_mock, shutil_move_mock,os_symlink_mock,
                                        os_path_isdir_mock, os_path_exists_mock ):
     options = self._create_empty_options_mock()
     options.mpack_path = "/path/to/myservice.tar.gz"
@@ -575,6 +578,7 @@ class TestMpacks(TestCase):
   @patch("os.path.exists")
   @patch("shutil.move")
   @patch("os.mkdir")
+  @patch("ambari_server.setupMpacks.read_ambari_user")
   @patch("ambari_server.setupMpacks.create_symlink")
   @patch("ambari_server.setupMpacks.get_ambari_version")
   @patch("ambari_server.setupMpacks.get_ambari_properties")
@@ -588,7 +592,7 @@ class TestMpacks(TestCase):
 
   def test_upgrade_stack_mpack(self, set_file_permissions_mock, run_os_command_mock, download_mpack_mock, expand_mpack_mock, purge_stacks_and_mpacks_mock,
                                _uninstall_mpack_mock, add_replay_log_mock, get_ambari_properties_mock,
-                               get_ambari_version_mock, create_symlink_mock, os_mkdir_mock, shutil_move_mock,
+                               get_ambari_version_mock, create_symlink_mock, read_ambari_user_mock, os_mkdir_mock, shutil_move_mock,
                                os_path_exists_mock, create_symlink_using_path_mock):
     options = self._create_empty_options_mock()
     options.mpack_path = "/path/to/mystack-1.0.0.1.tar.gz"

http://git-wip-us.apache.org/repos/asf/ambari/blob/158bd656/ambari-server/src/test/python/stacks/2.0.6/hooks/after-INSTALL/test_after_install.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/hooks/after-INSTALL/test_after_install.py b/ambari-server/src/test/python/stacks/2.0.6/hooks/after-INSTALL/test_after_install.py
index d792192..19c81a8 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/hooks/after-INSTALL/test_after_install.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/hooks/after-INSTALL/test_after_install.py
@@ -19,6 +19,7 @@ limitations under the License.
 '''
 
 import json
+
 from mock.mock import MagicMock, patch
 from stacks.utils.RMFTestCase import *
 from resource_management.core.logger import Logger
@@ -63,7 +64,7 @@ class TestHookAfterInstall(RMFTestCase):
                               create_parents = True)
     self.assertNoMoreResources()
 
-
+  @patch("os.path.isdir", new = MagicMock(return_value = True))
   @patch("shared_initialization.load_version", new = MagicMock(return_value="2.3.0.0-1234"))
   @patch("resource_management.libraries.functions.conf_select.create")
   @patch("resource_management.libraries.functions.conf_select.select")
@@ -115,25 +116,17 @@ class TestHookAfterInstall(RMFTestCase):
       for dir_def in dir_defs:
         conf_dir = dir_def['conf_dir']
         conf_backup_dir = conf_dir + ".backup"
+        current_dir = dir_def['current_dir']
         self.assertResourceCalled('Execute', ('cp', '-R', '-p', conf_dir, conf_backup_dir),
             not_if = 'test -e ' + conf_backup_dir,
             sudo = True,)
 
-      for dir_def in dir_defs:
-        conf_dir = dir_def['conf_dir']
-        current_dir = dir_def['current_dir']
-        self.assertResourceCalled('Directory', conf_dir,
-            action = ['delete'],)
-        self.assertResourceCalled('Link', conf_dir,
-            to = current_dir,)
-
-      #HACK for Atlas
-      if package in ["atlas", ]:
-        self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E cp -R --no-clobber /etc/atlas/conf.backup/* /etc/atlas/conf',
-                                  only_if = 'test -e ' + "/etc/atlas/conf")
+        self.assertResourceCalled('Directory', conf_dir, action = ['delete'],)
+        self.assertResourceCalled('Link', conf_dir, to = current_dir,)
 
     self.assertNoMoreResources()
 
+  @patch("os.path.isdir", new = MagicMock(return_value = True))
   @patch("shared_initialization.load_version", new = MagicMock(return_value="2.3.0.0-1234"))
   @patch("resource_management.libraries.functions.conf_select.create")
   @patch("resource_management.libraries.functions.conf_select.select")
@@ -191,22 +184,13 @@ class TestHookAfterInstall(RMFTestCase):
       for dir_def in dir_defs:
         conf_dir = dir_def['conf_dir']
         conf_backup_dir = conf_dir + ".backup"
+        current_dir = dir_def['current_dir']
         self.assertResourceCalled('Execute', ('cp', '-R', '-p', conf_dir, conf_backup_dir),
             not_if = 'test -e ' + conf_backup_dir,
             sudo = True,)
 
-      for dir_def in dir_defs:
-        conf_dir = dir_def['conf_dir']
-        current_dir = dir_def['current_dir']
-        self.assertResourceCalled('Directory', conf_dir,
-            action = ['delete'],)
-        self.assertResourceCalled('Link', conf_dir,
-            to = current_dir,)
-
-      #HACK for Atlas
-      if package in ["atlas", ]:
-        self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E cp -R --no-clobber /etc/atlas/conf.backup/* /etc/atlas/conf',
-                                  only_if = 'test -e ' + "/etc/atlas/conf")
+        self.assertResourceCalled('Directory', conf_dir, action = ['delete'],)
+        self.assertResourceCalled('Link', conf_dir, to = current_dir,)
 
     self.assertNoMoreResources()
 
@@ -252,6 +236,7 @@ class TestHookAfterInstall(RMFTestCase):
     self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hive-server2', '2.3.0.0-1234'),
       sudo = True)
 
+  @patch("os.path.isdir", new = MagicMock(return_value = True))
   @patch("shared_initialization.load_version", new = MagicMock(return_value="2.3.0.0-1234"))
   @patch("resource_management.libraries.functions.conf_select.create")
   @patch("resource_management.libraries.functions.conf_select.select")
@@ -302,22 +287,13 @@ class TestHookAfterInstall(RMFTestCase):
       for dir_def in dir_defs:
         conf_dir = dir_def['conf_dir']
         conf_backup_dir = conf_dir + ".backup"
+        current_dir = dir_def['current_dir']
         self.assertResourceCalled('Execute', ('cp', '-R', '-p', conf_dir, conf_backup_dir),
             not_if = 'test -e ' + conf_backup_dir,
             sudo = True,)
 
-      for dir_def in dir_defs:
-        conf_dir = dir_def['conf_dir']
-        current_dir = dir_def['current_dir']
-        self.assertResourceCalled('Directory', conf_dir,
-            action = ['delete'],)
-        self.assertResourceCalled('Link', conf_dir,
-            to = current_dir,)
-
-      #HACK for Atlas
-      if package in ["atlas", ]:
-        self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E cp -R --no-clobber /etc/atlas/conf.backup/* /etc/atlas/conf',
-                                  only_if = 'test -e ' + "/etc/atlas/conf")
+        self.assertResourceCalled('Directory', conf_dir, action = ['delete'],)
+        self.assertResourceCalled('Link', conf_dir, to = current_dir,)
 
     self.assertNoMoreResources()
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/158bd656/ambari-server/src/test/python/stacks/2.2/common/test_conf_select.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/common/test_conf_select.py b/ambari-server/src/test/python/stacks/2.2/common/test_conf_select.py
index 2eeec46..92dd634 100644
--- a/ambari-server/src/test/python/stacks/2.2/common/test_conf_select.py
+++ b/ambari-server/src/test/python/stacks/2.2/common/test_conf_select.py
@@ -100,12 +100,13 @@ class TestConfSelect(RMFTestCase):
 
 
   @patch("resource_management.core.shell.call")
+  @patch.object(os.path, "isdir")
   @patch.object(os.path, "exists")
   @patch.object(os.path, "islink")
   @patch("resource_management.libraries.functions.conf_select._valid", new = MagicMock(return_value = True))
   @patch("resource_management.libraries.functions.conf_select.create", new = MagicMock(return_value = ["/etc/hadoop/2.3.0.0-1234/0"]))
   @patch("resource_management.libraries.functions.conf_select.select", new = MagicMock())
-  def test_symlink_conversion_to_current(self, islink_mock, path_mock, shell_call_mock):
+  def test_symlink_conversion_to_current(self, islink_mock, path_mock, isdir_mock, shell_call_mock):
     """
     Tests that conf-select creates the correct symlink directories.
     :return:
@@ -134,6 +135,13 @@ class TestConfSelect(RMFTestCase):
 
       return False
 
+    def isdir_mock_call(path):
+      if path == "/etc/hadoop/conf":
+        return True
+
+      return False
+
+
     packages = conf_select.get_package_dirs()
 
     path_mock.side_effect = path_mock_call
@@ -175,7 +183,6 @@ class TestConfSelect(RMFTestCase):
     """
     packages = conf_select.get_package_dirs()
 
-    conf_select.convert_conf_directories_to_symlinks("hadoop", "2.3.0.0-1234",
-      packages["hadoop"], link_to = conf_select.DIRECTORY_TYPE_BACKUP)
+    conf_select.convert_conf_directories_to_symlinks("hadoop", "2.3.0.0-1234", packages["hadoop"])
 
     self.assertEqual(pprint.pformat(self.env.resource_list), "[]")
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/158bd656/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/HIVE/package/scripts/hive_client.py
----------------------------------------------------------------------
diff --git a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/HIVE/package/scripts/hive_client.py b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/HIVE/package/scripts/hive_client.py
index 3d9bfd7..55cf61a 100755
--- a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/HIVE/package/scripts/hive_client.py
+++ b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/HIVE/package/scripts/hive_client.py
@@ -59,8 +59,6 @@ class HiveClientDefault(HiveClient):
     import params
     env.set_params(params)
     if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      conf_select.select(params.stack_name, "hive", params.version)
-      conf_select.select(params.stack_name, "hadoop", params.version)
       stack_select.select("hadoop-client", params.version)
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/158bd656/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/HIVE/package/scripts/hive_metastore.py
----------------------------------------------------------------------
diff --git a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/HIVE/package/scripts/hive_metastore.py b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/HIVE/package/scripts/hive_metastore.py
index a49bbd9..a69460e 100755
--- a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/HIVE/package/scripts/hive_metastore.py
+++ b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/HIVE/package/scripts/hive_metastore.py
@@ -105,7 +105,6 @@ class HiveMetastoreDefault(HiveMetastore):
     is_upgrade = params.upgrade_direction == Direction.UPGRADE
 
     if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      conf_select.select(params.stack_name, "hive", params.version)
       stack_select.select("hive-metastore", params.version)
 
     if is_upgrade and params.stack_version_formatted_major and \

http://git-wip-us.apache.org/repos/asf/ambari/blob/158bd656/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/HIVE/package/scripts/hive_server.py
----------------------------------------------------------------------
diff --git a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/HIVE/package/scripts/hive_server.py b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/HIVE/package/scripts/hive_server.py
index 31b083b..6aeaf80 100755
--- a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/HIVE/package/scripts/hive_server.py
+++ b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/HIVE/package/scripts/hive_server.py
@@ -119,7 +119,7 @@ class HiveServerDefault(HiveServer):
     env.set_params(params)
 
     if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      conf_select.select(params.stack_name, "hive", params.version)
+
       stack_select.select("hive-server2", params.version)
 
       # Copy mapreduce.tar.gz and tez.tar.gz to HDFS

http://git-wip-us.apache.org/repos/asf/ambari/blob/158bd656/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/HIVE/package/scripts/hive_server_interactive.py
----------------------------------------------------------------------
diff --git a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/HIVE/package/scripts/hive_server_interactive.py b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/HIVE/package/scripts/hive_server_interactive.py
index 2df001c..beb1220 100755
--- a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/HIVE/package/scripts/hive_server_interactive.py
+++ b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/HIVE/package/scripts/hive_server_interactive.py
@@ -87,7 +87,6 @@ class HiveServerInteractiveDefault(HiveServerInteractive):
 
       if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
         stack_select.select("hive-server2-hive2", params.version)
-        conf_select.select(params.stack_name, "hive2", params.version)
 
         # Copy hive.tar.gz and tez.tar.gz used by Hive Interactive to HDFS
         resource_created = copy_to_hdfs(

http://git-wip-us.apache.org/repos/asf/ambari/blob/158bd656/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/HIVE/package/scripts/webhcat_server.py
----------------------------------------------------------------------
diff --git a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/HIVE/package/scripts/webhcat_server.py b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/HIVE/package/scripts/webhcat_server.py
index 34687c4..ee9087d 100755
--- a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/HIVE/package/scripts/webhcat_server.py
+++ b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/HIVE/package/scripts/webhcat_server.py
@@ -79,8 +79,6 @@ class WebHCatServerDefault(WebHCatServer):
 
     if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version): 
       # webhcat has no conf, but uses hadoop home, so verify that regular hadoop conf is set
-      conf_select.select(params.stack_name, "hive-hcatalog", params.version)
-      conf_select.select(params.stack_name, "hadoop", params.version)
       stack_select.select("hive-webhcat", params.version)
 
   def security_status(self, env):

http://git-wip-us.apache.org/repos/asf/ambari/blob/158bd656/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/application_timeline_server.py
----------------------------------------------------------------------
diff --git a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/application_timeline_server.py b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/application_timeline_server.py
index 4ec6aa7..e3a81cf 100755
--- a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/application_timeline_server.py
+++ b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/application_timeline_server.py
@@ -72,7 +72,6 @@ class ApplicationTimelineServerDefault(ApplicationTimelineServer):
     env.set_params(params)
 
     if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      conf_select.select(params.stack_name, "hadoop", params.version)
       stack_select.select("hadoop-yarn-timelineserver", params.version)
 
   def status(self, env):

http://git-wip-us.apache.org/repos/asf/ambari/blob/158bd656/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/historyserver.py
----------------------------------------------------------------------
diff --git a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/historyserver.py b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/historyserver.py
index 34c683a..f933e91 100755
--- a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/historyserver.py
+++ b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/historyserver.py
@@ -79,7 +79,6 @@ class HistoryServerDefault(HistoryServer):
     env.set_params(params)
 
     if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      conf_select.select(params.stack_name, "hadoop", params.version)
       stack_select.select("hadoop-mapreduce-historyserver", params.version)
       # MC Hammer said, "Can't touch this"
       copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)

http://git-wip-us.apache.org/repos/asf/ambari/blob/158bd656/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/mapreduce2_client.py
----------------------------------------------------------------------
diff --git a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/mapreduce2_client.py b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/mapreduce2_client.py
index 424157b..8de9d56 100755
--- a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/mapreduce2_client.py
+++ b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/mapreduce2_client.py
@@ -71,7 +71,6 @@ class MapReduce2Client(Script):
       # Because this script was called from ru_execute_tasks.py which already enters an Environment with its own basedir,
       # must change it now so this function can find the Jinja Templates for the service.
       env.config.basedir = base_dir
-      conf_select.select(params.stack_name, conf_select_name, params.version)
       self.configure(env, config_dir=config_dir)
 
 
@@ -90,7 +89,6 @@ class MapReduce2ClientDefault(MapReduce2Client):
     env.set_params(params)
 
     if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      conf_select.select(params.stack_name, "hadoop", params.version)
       stack_select.select("hadoop-client", params.version)
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/158bd656/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/nodemanager.py
----------------------------------------------------------------------
diff --git a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/nodemanager.py b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/nodemanager.py
index b235cad..4f3eecb 100755
--- a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/nodemanager.py
+++ b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/nodemanager.py
@@ -74,7 +74,6 @@ class NodemanagerDefault(Nodemanager):
     env.set_params(params)
 
     if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      conf_select.select(params.stack_name, "hadoop", params.version)
       stack_select.select("hadoop-yarn-nodemanager", params.version)
 
   def post_upgrade_restart(self, env, upgrade_type=None):

http://git-wip-us.apache.org/repos/asf/ambari/blob/158bd656/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/resourcemanager.py b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/resourcemanager.py
index 71c7bc1..12c279a 100755
--- a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/resourcemanager.py
+++ b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/resourcemanager.py
@@ -114,7 +114,6 @@ class ResourcemanagerDefault(Resourcemanager):
     env.set_params(params)
 
     if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      conf_select.select(params.stack_name, "hadoop", params.version)
       stack_select.select("hadoop-yarn-resourcemanager", params.version)
 
   def start(self, env, upgrade_type=None):

http://git-wip-us.apache.org/repos/asf/ambari/blob/158bd656/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/yarn_client.py
----------------------------------------------------------------------
diff --git a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/yarn_client.py b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/yarn_client.py
index 4d65a40..5cd2e69 100755
--- a/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/yarn_client.py
+++ b/contrib/management-packs/odpi-ambari-mpack/src/main/resources/stacks/ODPi/2.0/services/YARN/package/scripts/yarn_client.py
@@ -59,7 +59,6 @@ class YarnClientDefault(YarnClient):
     env.set_params(params)
 
     if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
-      conf_select.select(params.stack_name, "hadoop", params.version)
       stack_select.select("hadoop-client", params.version)
 
 


[10/50] [abbrv] ambari git commit: AMBARI-20634. Cleanup unused files and data in ambari-agent code (aonishuk)

Posted by jl...@apache.org.
AMBARI-20634. Cleanup unused files and data in ambari-agent code (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c28b797d
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c28b797d
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c28b797d

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: c28b797d2723ee11725db2b68c7ae0cabda5bc25
Parents: 5b36cdf
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Tue Oct 3 16:14:20 2017 +0300
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Tue Oct 3 16:14:20 2017 +0300

----------------------------------------------------------------------
 .../main/python/ambari_agent/AmbariConfig.py    |  74 +-------
 .../main/python/ambari_agent/ProcessHelper.py   |  71 --------
 .../src/main/python/ambari_agent/StatusCheck.py | 142 ---------------
 .../src/main/python/ambari_agent/main.py        |  12 +-
 .../src/test/python/ambari_agent/TestMain.py    |   8 +-
 .../python/ambari_agent/TestProcessHelper.py    |  70 --------
 .../test/python/ambari_agent/TestStatusCheck.py | 180 -------------------
 7 files changed, 14 insertions(+), 543 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/c28b797d/ambari-agent/src/main/python/ambari_agent/AmbariConfig.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/AmbariConfig.py b/ambari-agent/src/main/python/ambari_agent/AmbariConfig.py
index 95e4712..fcbc21c 100644
--- a/ambari-agent/src/main/python/ambari_agent/AmbariConfig.py
+++ b/ambari-agent/src/main/python/ambari_agent/AmbariConfig.py
@@ -30,6 +30,9 @@ from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 
 logger = logging.getLogger(__name__)
 
+"""
+The below config is necessary only for unit tests.
+"""
 content = """
 
 [server]
@@ -75,77 +78,6 @@ log_command_executes = 0
 
 """.format(ps=os.sep)
 
-servicesToPidNames = {
-  'GLUSTERFS': 'glusterd.pid$',
-  'NAMENODE': 'hadoop-{USER}-namenode.pid$',
-  'SECONDARY_NAMENODE': 'hadoop-{USER}-secondarynamenode.pid$',
-  'DATANODE': 'hadoop-{USER}-datanode.pid$',
-  'JOBTRACKER': 'hadoop-{USER}-jobtracker.pid$',
-  'TASKTRACKER': 'hadoop-{USER}-tasktracker.pid$',
-  'RESOURCEMANAGER': 'yarn-{USER}-resourcemanager.pid$',
-  'NODEMANAGER': 'yarn-{USER}-nodemanager.pid$',
-  'HISTORYSERVER': 'mapred-{USER}-historyserver.pid$',
-  'JOURNALNODE': 'hadoop-{USER}-journalnode.pid$',
-  'ZKFC': 'hadoop-{USER}-zkfc.pid$',
-  'OOZIE_SERVER': 'oozie.pid',
-  'ZOOKEEPER_SERVER': 'zookeeper_server.pid',
-  'FLUME_SERVER': 'flume-node.pid',
-  'TEMPLETON_SERVER': 'templeton.pid',
-  'HBASE_MASTER': 'hbase-{USER}-master.pid',
-  'HBASE_REGIONSERVER': 'hbase-{USER}-regionserver.pid',
-  'HCATALOG_SERVER': 'webhcat.pid',
-  'KERBEROS_SERVER': 'kadmind.pid',
-  'HIVE_SERVER': 'hive-server.pid',
-  'HIVE_METASTORE': 'hive.pid',
-  'HIVE_SERVER_INTERACTIVE': 'hive-interactive.pid',
-  'MYSQL_SERVER': 'mysqld.pid',
-  'HUE_SERVER': '/var/run/hue/supervisor.pid',
-  'WEBHCAT_SERVER': 'webhcat.pid',
-}
-
-# Each service, which's pid depends on user should provide user mapping
-servicesToLinuxUser = {
-  'NAMENODE': 'hdfs_user',
-  'SECONDARY_NAMENODE': 'hdfs_user',
-  'DATANODE': 'hdfs_user',
-  'JOURNALNODE': 'hdfs_user',
-  'ZKFC': 'hdfs_user',
-  'JOBTRACKER': 'mapred_user',
-  'TASKTRACKER': 'mapred_user',
-  'RESOURCEMANAGER': 'yarn_user',
-  'NODEMANAGER': 'yarn_user',
-  'HISTORYSERVER': 'mapred_user',
-  'HBASE_MASTER': 'hbase_user',
-  'HBASE_REGIONSERVER': 'hbase_user',
-}
-
-pidPathVars = [
-  {'var': 'glusterfs_pid_dir_prefix',
-   'defaultValue': '/var/run'},
-  {'var': 'hadoop_pid_dir_prefix',
-   'defaultValue': '/var/run/hadoop'},
-  {'var': 'hadoop_pid_dir_prefix',
-   'defaultValue': '/var/run/hadoop'},
-  {'var': 'hbase_pid_dir',
-   'defaultValue': '/var/run/hbase'},
-  {'var': 'zk_pid_dir',
-   'defaultValue': '/var/run/zookeeper'},
-  {'var': 'oozie_pid_dir',
-   'defaultValue': '/var/run/oozie'},
-  {'var': 'hcat_pid_dir',
-   'defaultValue': '/var/run/webhcat'},
-  {'var': 'hive_pid_dir',
-   'defaultValue': '/var/run/hive'},
-  {'var': 'mysqld_pid_dir',
-   'defaultValue': '/var/run/mysqld'},
-  {'var': 'hcat_pid_dir',
-   'defaultValue': '/var/run/webhcat'},
-  {'var': 'yarn_pid_dir_prefix',
-   'defaultValue': '/var/run/hadoop-yarn'},
-  {'var': 'mapred_pid_dir_prefix',
-   'defaultValue': '/var/run/hadoop-mapreduce'},
-]
-
 
 class AmbariConfig:
   TWO_WAY_SSL_PROPERTY = "security.server.two_way_ssl"

http://git-wip-us.apache.org/repos/asf/ambari/blob/c28b797d/ambari-agent/src/main/python/ambari_agent/ProcessHelper.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/ProcessHelper.py b/ambari-agent/src/main/python/ambari_agent/ProcessHelper.py
deleted file mode 100644
index bc2f827..0000000
--- a/ambari-agent/src/main/python/ambari_agent/ProcessHelper.py
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import os
-import logging
-import traceback
-import sys
-from ambari_commons.shell import getTempFiles
-
-logger = logging.getLogger()
-
-if 'AMBARI_PID_DIR' in os.environ:
-    piddir = os.environ['AMBARI_PID_DIR']
-else:
-    piddir = "/var/run/ambari-agent"
-
-pidfile = os.path.join(piddir, "ambari-agent.pid")
-
-
-def _clean():
-  logger.info("Removing pid file")
-  try:
-    os.unlink(pidfile)
-  except Exception as ex:
-    traceback.print_exc()
-    logger.warn("Unable to remove pid file: %s", ex)
-
-  logger.info("Removing temp files")
-  for f in getTempFiles():
-    if os.path.exists(f):
-      try:
-        os.unlink(f)
-      except Exception as ex:
-        traceback.print_exc()
-        logger.warn("Unable to remove: %s, %s", f, ex)
-
-
-def stopAgent():
-  _clean()
-  sys.exit(0)
-
-
-def restartAgent():
-  _clean()
-
-  executable = sys.executable
-  args = sys.argv[:]
-  args.insert(0, executable)
-
-  logger.info("Restarting self: %s %s", executable, args)
-
-  os.execvp(executable, args)
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/c28b797d/ambari-agent/src/main/python/ambari_agent/StatusCheck.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/StatusCheck.py b/ambari-agent/src/main/python/ambari_agent/StatusCheck.py
deleted file mode 100644
index 5231f72..0000000
--- a/ambari-agent/src/main/python/ambari_agent/StatusCheck.py
+++ /dev/null
@@ -1,142 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import logging
-import os
-import re
-import string
-from ambari_commons.shell import shellRunner
-
-
-logger = logging.getLogger()
-
-
-class StatusCheck:
-    
-  USER_PATTERN='{USER}'
-  firstInit = True
-
-  def listFiles(self, dir):
-    basedir = dir
-    logger.debug("Files in " + os.path.abspath(dir) + ": ")
-    subdirlist = []
-    try:
-      if os.path.isdir(dir):
-        for item in os.listdir(dir):
-            if os.path.isfile(item) and item.endswith('.pid'):
-              self.pidFilesDict[item.split(os.sep).pop()] = os.getcwd() + os.sep + item
-            else:
-              subdirlist.append(os.path.join(basedir, item))
-        for subdir in subdirlist:
-            self.listFiles(subdir)
-      else:
-        if dir.endswith('.pid'):
-          self.pidFilesDict[dir.split(os.sep).pop()] = dir
-    except OSError as e:
-      logger.info(e.strerror + ' to ' + e.filename)
-      
-  def fillDirValues(self):
-    try:
-      for pidVar in self.pidPathVars:
-        pidVarName = pidVar['var']
-        pidDefaultvalue = pidVar['defaultValue']
-        if self.globalConfig.has_key(pidVarName):
-          self.pidPathes.append(self.globalConfig[pidVarName])
-        else:
-          self.pidPathes.append(pidDefaultvalue)
-    except Exception as e:
-        logger.error("Error while filling directories values " + str(e))
-        
-  def __init__(self, serviceToPidDict, pidPathVars, globalConfig,
-    servicesToLinuxUser):
-    
-    self.serToPidDict = serviceToPidDict.copy()
-    self.pidPathVars = pidPathVars
-    self.pidPathes = []
-    self.sh = shellRunner()
-    self.pidFilesDict = {}
-    self.globalConfig = globalConfig
-    self.servicesToLinuxUser = servicesToLinuxUser
-    
-    self.fillDirValues()
-    
-    for pidPath in self.pidPathes:
-      self.listFiles(pidPath)
-
-    for service, pid in self.serToPidDict.items():
-      if self.servicesToLinuxUser.has_key(service):
-        linuxUserKey = self.servicesToLinuxUser[service]
-        if self.globalConfig.has_key(linuxUserKey):
-          self.serToPidDict[service] = string.replace(pid, self.USER_PATTERN,
-            self.globalConfig[linuxUserKey])
-      else:
-        if self.USER_PATTERN in pid:
-          logger.error('There is no linux user mapping for component: ' + service)
-
-    if StatusCheck.firstInit:
-      logger.info('Service to pid dictionary: ' + str(self.serToPidDict))
-      StatusCheck.firstInit = False
-    else:
-      logger.debug('Service to pid dictionary: ' + str(self.serToPidDict))
-
-  def getIsLive(self, pidPath):
-
-    if not pidPath:
-      return False
-
-    isLive = False
-    pid = -1
-    try:
-      pidFile = open(pidPath, 'r')
-      pid = int(pidFile.readline())
-    except IOError, e:
-      logger.warn("Can not open file " + str(pidPath) + " due to " + str(e))
-      return isLive
-    res = self.sh.run(['ps -p', str(pid), '-f'])
-    lines = res['output'].strip().split(os.linesep)
-    try:
-      procInfo = lines[1]
-      isLive = not procInfo == None
-    except IndexError:
-      logger.info("Process is dead. Checking " + str(pidPath))
-    return isLive
-
-  def getStatus(self, serviceCode):
-    try:
-      pidPath = None
-      pidPattern = self.serToPidDict[serviceCode]
-      logger.debug('pidPattern: ' + pidPattern)
-    except KeyError as e:
-      logger.warn('There is no mapping for ' + serviceCode)
-      return None
-    try:
-      for pidFile in self.pidFilesDict.keys():
-        if re.match(pidPattern, pidFile):
-          pidPath = self.pidFilesDict[pidFile]          
-      logger.debug('pidPath: ' + str(pidPath))
-      result = self.getIsLive(pidPath)
-      return result
-    except KeyError:
-      logger.info('Pid file was not found')
-      return False
-
-  def getSerToPidDict(self):
-    return self.serToPidDict
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/c28b797d/ambari-agent/src/main/python/ambari_agent/main.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/main.py b/ambari-agent/src/main/python/ambari_agent/main.py
index 19c92b0..5fcd051 100644
--- a/ambari-agent/src/main/python/ambari_agent/main.py
+++ b/ambari-agent/src/main/python/ambari_agent/main.py
@@ -89,7 +89,6 @@ import time
 import locale
 import platform
 import ConfigParser
-import ProcessHelper
 import resource
 from logging.handlers import SysLogHandler
 from Controller import Controller
@@ -118,6 +117,9 @@ agentPid = os.getpid()
 # Global variables to be set later.
 home_dir = ""
 
+agent_piddir = os.environ['AMBARI_PID_DIR'] if 'AMBARI_PID_DIR' in os.environ else "/var/run/ambari-agent"
+agent_pidfile = os.path.join(agent_piddir, "ambari-agent.pid")
+
 config = AmbariConfig.AmbariConfig()
 
 # TODO AMBARI-18733, remove this global variable and calculate it based on home_dir once it is set.
@@ -260,8 +262,8 @@ def perform_prestart_checks(expected_hostname):
       logger.error(msg)
       sys.exit(1)
   # Check if there is another instance running
-  if os.path.isfile(ProcessHelper.pidfile) and not OSCheck.get_os_family() == OSConst.WINSRV_FAMILY:
-    print("%s already exists, exiting" % ProcessHelper.pidfile)
+  if os.path.isfile(agent_pidfile) and not OSCheck.get_os_family() == OSConst.WINSRV_FAMILY:
+    print("%s already exists, exiting" % agent_pidfile)
     sys.exit(1)
   # check if ambari prefix exists
   elif config.has_option('agent', 'prefix') and not os.path.isdir(os.path.abspath(config.get('agent', 'prefix'))):
@@ -281,14 +283,14 @@ def perform_prestart_checks(expected_hostname):
 
 def daemonize():
   pid = str(os.getpid())
-  file(ProcessHelper.pidfile, 'w').write(pid)
+  file(agent_pidfile, 'w').write(pid)
 
 def stop_agent():
 # stop existing Ambari agent
   pid = -1
   runner = shellRunner()
   try:
-    with open(ProcessHelper.pidfile, 'r') as f:
+    with open(agent_pidfile, 'r') as f:
       pid = f.read()
     pid = int(pid)
     

http://git-wip-us.apache.org/repos/asf/ambari/blob/c28b797d/ambari-agent/src/test/python/ambari_agent/TestMain.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestMain.py b/ambari-agent/src/test/python/ambari_agent/TestMain.py
index a04b85d..504ca08 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestMain.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestMain.py
@@ -224,14 +224,14 @@ class TestMain(unittest.TestCase):
   def test_daemonize_and_stop(self, exists_mock, sleep_mock):
     from ambari_commons.shell import shellRunnerLinux
 
-    oldpid = ProcessHelper.pidfile
+    oldpid = main.agent_pidfile
     pid = str(os.getpid())
     _, tmpoutfile = tempfile.mkstemp()
-    ProcessHelper.pidfile = tmpoutfile
+    main.agent_pidfile = tmpoutfile
 
     # Test daemonization
     main.daemonize()
-    saved = open(ProcessHelper.pidfile, 'r').read()
+    saved = open(main.agent_pidfile, 'r').read()
     self.assertEqual(pid, saved)
 
     main.GRACEFUL_STOP_TRIES = 1
@@ -269,7 +269,7 @@ class TestMain(unittest.TestCase):
                                   call(['ambari-sudo.sh', 'kill', '-9', pid])])
 
     # Restore
-    ProcessHelper.pidfile = oldpid
+    main.pidfile = oldpid
     os.remove(tmpoutfile)
 
   @patch("os.rmdir")

http://git-wip-us.apache.org/repos/asf/ambari/blob/c28b797d/ambari-agent/src/test/python/ambari_agent/TestProcessHelper.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestProcessHelper.py b/ambari-agent/src/test/python/ambari_agent/TestProcessHelper.py
deleted file mode 100644
index f30d45d..0000000
--- a/ambari-agent/src/test/python/ambari_agent/TestProcessHelper.py
+++ /dev/null
@@ -1,70 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import os
-import tempfile
-import unittest
-from mock.mock import patch, MagicMock
-from ambari_agent import ProcessHelper
-
-from only_for_platform import not_for_platform, PLATFORM_WINDOWS
-
-@not_for_platform(PLATFORM_WINDOWS)
-class TestProcessHelper(unittest.TestCase):
-
-  @patch.object(ProcessHelper, "getTempFiles")
-  def test_clean(self, getTempFilesMock):
-
-    tf1 = tempfile.NamedTemporaryFile(delete=False)
-    tf2 = tempfile.NamedTemporaryFile(delete=False)
-    tf3 = tempfile.NamedTemporaryFile(delete=False)
-
-    getTempFilesMock.return_value = [tf2.name, tf3.name]
-    ProcessHelper.pidfile = tf1.name
-    ProcessHelper.logger = MagicMock()
-
-    ProcessHelper._clean()
-
-    self.assertFalse(os.path.exists(tf1.name))
-    self.assertFalse(os.path.exists(tf2.name))
-    self.assertFalse(os.path.exists(tf3.name))
-
-
-  @patch("sys.exit")
-  @patch.object(ProcessHelper, "_clean")
-  def test_stopAgent(self, _clean_mock, sys_exit_mock):
-
-    ProcessHelper.stopAgent()
-    self.assertTrue(_clean_mock.called)
-    self.assertTrue(sys_exit_mock.called)
-
-
-  @patch("os.execvp")
-  @patch.object(ProcessHelper, "_clean")
-  def test_restartAgent(self, _clean_mock, execMock):
-
-    ProcessHelper.logger = MagicMock()
-    ProcessHelper.restartAgent()
-
-    self.assertTrue(_clean_mock.called)
-    self.assertTrue(execMock.called)
-    self.assertEqual(2, len(execMock.call_args_list[0]))
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/c28b797d/ambari-agent/src/test/python/ambari_agent/TestStatusCheck.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestStatusCheck.py b/ambari-agent/src/test/python/ambari_agent/TestStatusCheck.py
deleted file mode 100644
index 34c57cc..0000000
--- a/ambari-agent/src/test/python/ambari_agent/TestStatusCheck.py
+++ /dev/null
@@ -1,180 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-import string
-import random
-import os
-from unittest import TestCase
-from ambari_commons import OSCheck
-from ambari_agent.StatusCheck import StatusCheck
-import logging
-
-from mock.mock import patch
-from mock.mock import MagicMock
-
-from only_for_platform import os_distro_value
-
-USERNAME_LENGTH=10
-USERNAME_CHARS=string.ascii_uppercase +string.ascii_lowercase + string.digits + '-_'
-
-PID_DIR='/pids_dir'
-
-COMPONENT_LIVE = 'LIVE_COMPONENT'
-COMPONENT_LIVE_PID = 'live_' + StatusCheck.USER_PATTERN + '_comp.pid'
-
-COMPONENT_DEAD = 'DEAD_COMPONENT'
-COMPONENT_DEAD_PID = 'dead_' + StatusCheck.USER_PATTERN + '_comp.pid'
-
-class TestStatusCheck(TestCase):
-
-  logger = logging.getLogger()
-
-  def generateUserName(self):
-    return ''.join(random.choice(USERNAME_CHARS) for x in range(USERNAME_LENGTH))
-
-  def setUp(self):
-
-    self.pidPathVars = [
-      {'var' : '',
-      'defaultValue' : PID_DIR}
-    ]
-
-    self.serviceToPidDict = {
-      COMPONENT_LIVE : COMPONENT_LIVE_PID,
-      COMPONENT_DEAD : COMPONENT_DEAD_PID
-    }
-
-    live_user = self.generateUserName()
-    self.logger.info('Live user: ' + live_user)
-    self.live_pid_file_name = string.replace(COMPONENT_LIVE_PID, StatusCheck.USER_PATTERN, live_user)
-    self.live_pid_full_path = PID_DIR + os.sep + self.live_pid_file_name
-
-    dead_user = self.generateUserName()
-    self.logger.info('Dead user: ' + live_user)
-    self.dead_pid_file_name = string.replace(COMPONENT_DEAD_PID, StatusCheck.USER_PATTERN, dead_user)
-    self.dead_pid_full_path = PID_DIR + os.sep + self.dead_pid_file_name
-
-    self.pidFilesDict = {self.live_pid_file_name : self.live_pid_full_path,
-                         self.dead_pid_file_name : self.dead_pid_full_path}
-
-    self.is_live_values = {self.live_pid_full_path : True,
-                      self.dead_pid_full_path : False}
-    
-    self.servicesToLinuxUser = {COMPONENT_LIVE : 'live_user',
-                                COMPONENT_DEAD : 'dead_user'}
-
-    self.globalConfig = {'live_user' : live_user,
-                         'dead_user' : dead_user}
-
-    
-  # Ensure that status checker return True for running process
-  @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
-  @patch.object(StatusCheck, 'getIsLive')
-  def test_live(self, get_is_live_mock):
-
-    statusCheck = StatusCheck(self.serviceToPidDict, self.pidPathVars,
-      self.globalConfig, self.servicesToLinuxUser)
-
-    self.assertTrue(StatusCheck.USER_PATTERN in self.serviceToPidDict[COMPONENT_LIVE])
-    self.assertTrue(StatusCheck.USER_PATTERN in self.serviceToPidDict[COMPONENT_DEAD])
-
-    statusCheck.pidFilesDict = self.pidFilesDict
-    
-    get_is_live_mock.side_effect = lambda pid_path : self.is_live_values[pid_path]
-    
-    status = statusCheck.getStatus(COMPONENT_LIVE)
-    self.assertEqual(status, True)
-
-  @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
-  @patch.object(logger, 'info')
-  def test_dont_relog_serToPidDict(self, logger_info_mock):
-    TestStatusCheck.timesLogged = 0
-
-    def my_side_effect(*args, **kwargs):
-      TestStatusCheck.timesLogged += args[0].find('Service to pid dictionary: ')+1
-      
-
-    logger_info_mock.side_effect = my_side_effect
-    
-    # call this three times
-    statusCheck = StatusCheck(self.serviceToPidDict, self.pidPathVars,
-      self.globalConfig, self.servicesToLinuxUser)
-    statusCheck = StatusCheck(self.serviceToPidDict, self.pidPathVars,
-      self.globalConfig, self.servicesToLinuxUser)
-    statusCheck = StatusCheck(self.serviceToPidDict, self.pidPathVars,
-      self.globalConfig, self.servicesToLinuxUser)
-    # logged not more then once
-    self.assert_(TestStatusCheck.timesLogged <= 1, "test_dont_relog_serToPidDict logged more then once")
-
-  # Ensure that status checker return True for running process even if multiple
-  # pids for a service component exist
-  @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
-  @patch.object(StatusCheck, 'getIsLive')
-  def test_live_if_multiple_pids(self, get_is_live_mock):
-
-    one_more_pid_file_name = string.replace(COMPONENT_LIVE_PID, StatusCheck.USER_PATTERN,
-      'any_other_linux_user')
-    one_more_pid_full_path = PID_DIR + os.sep + one_more_pid_file_name
-
-    self.pidFilesDict[one_more_pid_file_name] = one_more_pid_full_path
-    self.is_live_values[one_more_pid_full_path] = False
-
-    statusCheck = StatusCheck(self.serviceToPidDict, self.pidPathVars,
-      self.globalConfig, self.servicesToLinuxUser)
-
-    statusCheck.pidFilesDict = self.pidFilesDict
-
-    get_is_live_mock.side_effect = lambda pid_path : self.is_live_values[pid_path]
-
-    status = statusCheck.getStatus(COMPONENT_LIVE)
-    self.assertEqual(status, True)
-    
-  # Ensure that status checker prints error message if there is no linux user
-  # for service, which pid depends on user
-  @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
-  @patch.object(StatusCheck, 'getIsLive')
-  @patch.object(logger, "error")
-  def test_no_user_mapping(self, error_mock, get_is_live_mock):
-
-    
-    badServiceToPidDict = self.serviceToPidDict.copy()
-    badServiceToPidDict['BAD_COMPONENT'] = 'prefix' + StatusCheck.USER_PATTERN
-
-    statusCheck = StatusCheck(badServiceToPidDict, self.pidPathVars,
-      self.globalConfig, self.servicesToLinuxUser)
-
-    statusCheck.pidFilesDict = self.pidFilesDict
-
-    get_is_live_mock.side_effect = lambda pid_path : self.is_live_values[pid_path]
-
-    status = statusCheck.getStatus(COMPONENT_LIVE)
-    self.assertTrue(error_mock.called)
-
-  # Ensure that status checker return False for dead process
-  @patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
-  @patch.object(StatusCheck, 'getIsLive')
-  def test_dead(self, get_is_live_mock):
-    statusCheck = StatusCheck(self.serviceToPidDict, self.pidPathVars,
-      self.globalConfig, self.servicesToLinuxUser)
-
-    statusCheck.pidFilesDict = self.pidFilesDict
-    
-    get_is_live_mock.side_effect = lambda pid_path : self.is_live_values[pid_path]
-    status = statusCheck.getStatus(COMPONENT_DEAD)
-    self.assertEqual(status, False)


[08/50] [abbrv] ambari git commit: AMBARI-22095 Make hooks stack agnostic (dsen)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/before-START/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/before-START/files/checkForFormat.sh b/ambari-server/src/main/resources/stack-hooks/before-START/files/checkForFormat.sh
new file mode 100644
index 0000000..68aa96d
--- /dev/null
+++ b/ambari-server/src/main/resources/stack-hooks/before-START/files/checkForFormat.sh
@@ -0,0 +1,65 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export hdfs_user=$1
+shift
+export conf_dir=$1
+shift
+export bin_dir=$1
+shift
+export mark_dir=$1
+shift
+export name_dirs=$*
+
+export EXIT_CODE=0
+export command="namenode -format"
+export list_of_non_empty_dirs=""
+
+mark_file=/var/run/hadoop/hdfs/namenode-formatted
+if [[ -f ${mark_file} ]] ; then
+  /var/lib/ambari-agent/ambari-sudo.sh rm -f ${mark_file}
+  /var/lib/ambari-agent/ambari-sudo.sh mkdir -p ${mark_dir}
+fi
+
+if [[ ! -d $mark_dir ]] ; then
+  for dir in `echo $name_dirs | tr ',' ' '` ; do
+    echo "NameNode Dirname = $dir"
+    cmd="ls $dir | wc -l  | grep -q ^0$"
+    eval $cmd
+    if [[ $? -ne 0 ]] ; then
+      (( EXIT_CODE = $EXIT_CODE + 1 ))
+      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
+    fi
+  done
+
+  if [[ $EXIT_CODE == 0 ]] ; then
+    /var/lib/ambari-agent/ambari-sudo.sh su ${hdfs_user} - -s /bin/bash -c "export PATH=$PATH:$bin_dir ; yes Y | hdfs --config ${conf_dir} ${command}"
+    (( EXIT_CODE = $EXIT_CODE | $? ))
+  else
+    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
+  fi
+else
+  echo "${mark_dir} exists. Namenode DFS already formatted"
+fi
+
+exit $EXIT_CODE
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/before-START/files/fast-hdfs-resource.jar
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/before-START/files/fast-hdfs-resource.jar b/ambari-server/src/main/resources/stack-hooks/before-START/files/fast-hdfs-resource.jar
new file mode 100644
index 0000000..6c993bf
Binary files /dev/null and b/ambari-server/src/main/resources/stack-hooks/before-START/files/fast-hdfs-resource.jar differ

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/before-START/files/task-log4j.properties
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/before-START/files/task-log4j.properties b/ambari-server/src/main/resources/stack-hooks/before-START/files/task-log4j.properties
new file mode 100644
index 0000000..7e12962
--- /dev/null
+++ b/ambari-server/src/main/resources/stack-hooks/before-START/files/task-log4j.properties
@@ -0,0 +1,134 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+#
+# Job Summary Appender 
+#
+# Use following logger to send summary to separate file defined by 
+# hadoop.mapreduce.jobsummary.log.file rolled daily:
+# hadoop.mapreduce.jobsummary.logger=INFO,JSA
+# 
+hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
+hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this 
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+# Rolling File Appender
+#
+
+#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Logfile size and and 30-day backups
+#log4j.appender.RFA.MaxFileSize=1MB
+#log4j.appender.RFA.MaxBackupIndex=30
+
+#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+# Custom Logging levels
+
+hadoop.metrics.log.level=INFO
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Null Appender
+# Trap security logger on the hadoop client side
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+ 
+# Removes "deprecated" messages
+log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/before-START/files/topology_script.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/before-START/files/topology_script.py b/ambari-server/src/main/resources/stack-hooks/before-START/files/topology_script.py
new file mode 100644
index 0000000..0f7a55c
--- /dev/null
+++ b/ambari-server/src/main/resources/stack-hooks/before-START/files/topology_script.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import sys, os
+from string import join
+import ConfigParser
+
+
+DEFAULT_RACK = "/default-rack"
+DATA_FILE_NAME =  os.path.dirname(os.path.abspath(__file__)) + "/topology_mappings.data"
+SECTION_NAME = "network_topology"
+
+class TopologyScript():
+
+  def load_rack_map(self):
+    try:
+      #RACK_MAP contains both host name vs rack and ip vs rack mappings
+      mappings = ConfigParser.ConfigParser()
+      mappings.read(DATA_FILE_NAME)
+      return dict(mappings.items(SECTION_NAME))
+    except ConfigParser.NoSectionError:
+      return {}
+
+  def get_racks(self, rack_map, args):
+    if len(args) == 1:
+      return DEFAULT_RACK
+    else:
+      return join([self.lookup_by_hostname_or_ip(input_argument, rack_map) for input_argument in args[1:]],)
+
+  def lookup_by_hostname_or_ip(self, hostname_or_ip, rack_map):
+    #try looking up by hostname
+    rack = rack_map.get(hostname_or_ip)
+    if rack is not None:
+      return rack
+    #try looking up by ip
+    rack = rack_map.get(self.extract_ip(hostname_or_ip))
+    #try by localhost since hadoop could be passing in 127.0.0.1 which might not be mapped
+    return rack if rack is not None else rack_map.get("localhost.localdomain", DEFAULT_RACK)
+
+  #strips out port and slashes in case hadoop passes in something like 127.0.0.1/127.0.0.1:50010
+  def extract_ip(self, container_string):
+    return container_string.split("/")[0].split(":")[0]
+
+  def execute(self, args):
+    rack_map = self.load_rack_map()
+    rack = self.get_racks(rack_map, args)
+    print rack
+
+if __name__ == "__main__":
+  TopologyScript().execute(sys.argv)

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/before-START/scripts/custom_extensions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/before-START/scripts/custom_extensions.py b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/custom_extensions.py
new file mode 100644
index 0000000..04299ba
--- /dev/null
+++ b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/custom_extensions.py
@@ -0,0 +1,173 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from resource_management.core.resources import Directory
+from resource_management.core.resources import Execute
+from resource_management.libraries.functions import default
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import format
+
+
+DEFAULT_HADOOP_HDFS_EXTENSION_DIR = "/hdp/ext/{0}/hadoop"
+DEFAULT_HADOOP_HIVE_EXTENSION_DIR = "/hdp/ext/{0}/hive"
+DEFAULT_HADOOP_HBASE_EXTENSION_DIR = "/hdp/ext/{0}/hbase"
+
+def setup_extensions():
+  """
+  The goal of this method is to distribute extensions (for example jar files) from
+  HDFS (/hdp/ext/{major_stack_version}/{service_name}) to all nodes which contain related
+  components of service (YARN, HIVE or HBASE). Extensions should be added to HDFS by
+  user manually.
+  """
+
+  import params
+
+  # Hadoop Custom extensions
+  hadoop_custom_extensions_enabled = default("/configurations/core-site/hadoop.custom-extensions.enabled", False)
+  hadoop_custom_extensions_services = default("/configurations/core-site/hadoop.custom-extensions.services", "")
+  hadoop_custom_extensions_owner = default("/configurations/core-site/hadoop.custom-extensions.owner", params.hdfs_user)
+  hadoop_custom_extensions_hdfs_dir = get_config_formatted_value(default("/configurations/core-site/hadoop.custom-extensions.root",
+                                                 DEFAULT_HADOOP_HDFS_EXTENSION_DIR.format(params.major_stack_version)))
+  hadoop_custom_extensions_services = [ service.strip().upper() for service in hadoop_custom_extensions_services.split(",") ]
+  hadoop_custom_extensions_services.append("YARN")
+
+  hadoop_custom_extensions_local_dir = "{0}/current/ext/hadoop".format(Script.get_stack_root())
+
+  if params.current_service in hadoop_custom_extensions_services:
+    clean_extensions(hadoop_custom_extensions_local_dir)
+    if hadoop_custom_extensions_enabled:
+      download_extensions(hadoop_custom_extensions_owner, params.user_group,
+                          hadoop_custom_extensions_hdfs_dir,
+                          hadoop_custom_extensions_local_dir)
+
+  setup_extensions_hive()
+
+  hbase_custom_extensions_services = []
+  hbase_custom_extensions_services.append("HBASE")
+  if params.current_service in hbase_custom_extensions_services:
+    setup_hbase_extensions()
+
+
+def setup_hbase_extensions():
+  import params
+
+  # HBase Custom extensions
+  hbase_custom_extensions_enabled = default("/configurations/hbase-site/hbase.custom-extensions.enabled", False)
+  hbase_custom_extensions_owner = default("/configurations/hbase-site/hbase.custom-extensions.owner", params.hdfs_user)
+  hbase_custom_extensions_hdfs_dir = get_config_formatted_value(default("/configurations/hbase-site/hbase.custom-extensions.root",
+                                                DEFAULT_HADOOP_HBASE_EXTENSION_DIR.format(params.major_stack_version)))
+  hbase_custom_extensions_local_dir = "{0}/current/ext/hbase".format(Script.get_stack_root())
+
+  impacted_components = ['HBASE_MASTER', 'HBASE_REGIONSERVER', 'PHOENIX_QUERY_SERVER'];
+  role = params.config.get('role','')
+
+  if role in impacted_components:
+    clean_extensions(hbase_custom_extensions_local_dir)
+    if hbase_custom_extensions_enabled:
+      download_extensions(hbase_custom_extensions_owner, params.user_group,
+                          hbase_custom_extensions_hdfs_dir,
+                          hbase_custom_extensions_local_dir)
+
+
+def setup_extensions_hive():
+  import params
+
+  hive_custom_extensions_enabled = default("/configurations/hive-site/hive.custom-extensions.enabled", False)
+  hive_custom_extensions_owner = default("/configurations/hive-site/hive.custom-extensions.owner", params.hdfs_user)
+  hive_custom_extensions_hdfs_dir = DEFAULT_HADOOP_HIVE_EXTENSION_DIR.format(params.major_stack_version)
+
+  hive_custom_extensions_local_dir = "{0}/current/ext/hive".format(Script.get_stack_root())
+
+  impacted_components = ['HIVE_SERVER', 'HIVE_CLIENT'];
+  role = params.config.get('role','')
+
+  # Run copying for HIVE_SERVER and HIVE_CLIENT
+  if params.current_service == 'HIVE' and role in impacted_components:
+    clean_extensions(hive_custom_extensions_local_dir)
+    if hive_custom_extensions_enabled:
+      download_extensions(hive_custom_extensions_owner, params.user_group,
+                          hive_custom_extensions_hdfs_dir,
+                          hive_custom_extensions_local_dir)
+
+def download_extensions(owner_user, owner_group, hdfs_source_dir, local_target_dir):
+  """
+  :param owner_user: user owner of the HDFS directory
+  :param owner_group: group owner of the HDFS directory
+  :param hdfs_source_dir: the HDFS directory from where the files are being pull
+  :param local_target_dir: the location of where to download the files
+  :return: Will return True if successful, otherwise, False.
+  """
+  import params
+
+  if not os.path.isdir(local_target_dir):
+    extensions_tmp_dir=format("{tmp_dir}/custom_extensions")
+    Directory(local_target_dir,
+              owner="root",
+              mode=0755,
+              group="root",
+              create_parents=True)
+
+    params.HdfsResource(hdfs_source_dir,
+                        type="directory",
+                        action="create_on_execute",
+                        owner=owner_user,
+                        group=owner_group,
+                        mode=0755)
+
+    Directory(extensions_tmp_dir,
+              owner=params.hdfs_user,
+              mode=0755,
+              create_parents=True)
+
+    # copy from hdfs to /tmp
+    params.HdfsResource(extensions_tmp_dir,
+                        type="directory",
+                        action="download_on_execute",
+                        source=hdfs_source_dir,
+                        user=params.hdfs_user,
+                        mode=0644,
+                        replace_existing_files=True)
+
+    # Execute command is not quoting correctly.
+    cmd = format("{sudo} mv {extensions_tmp_dir}/* {local_target_dir}")
+    only_if_cmd = "ls -d {extensions_tmp_dir}/*".format(extensions_tmp_dir=extensions_tmp_dir)
+    Execute(cmd, only_if=only_if_cmd)
+
+    only_if_local = 'ls -d "{local_target_dir}"'.format(local_target_dir=local_target_dir)
+    Execute(("chown", "-R", "root:root", local_target_dir),
+            sudo=True,
+            only_if=only_if_local)
+
+    params.HdfsResource(None,action="execute")
+  return True
+
+def clean_extensions(local_dir):
+  """
+  :param local_dir: The local directory where the extensions are stored.
+  :return: Will return True if successful, otherwise, False.
+  """
+  if os.path.isdir(local_dir):
+    Directory(local_dir,
+              action="delete")
+  return True
+
+def get_config_formatted_value(property_value):
+  return format(property_value.replace("{{", "{").replace("}}", "}"))

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/before-START/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/before-START/scripts/hook.py b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/hook.py
new file mode 100644
index 0000000..4cb276a
--- /dev/null
+++ b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/hook.py
@@ -0,0 +1,43 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from rack_awareness import create_topology_script_and_mapping
+from shared_initialization import setup_hadoop, setup_configs, create_javahome_symlink, setup_unlimited_key_jce_policy
+from custom_extensions import setup_extensions
+
+class BeforeStartHook(Hook):
+
+  def hook(self, env):
+    import params
+
+    self.run_custom_hook('before-ANY')
+    env.set_params(params)
+
+    setup_hadoop()
+    setup_configs()
+    create_javahome_symlink()
+    create_topology_script_and_mapping()
+    setup_unlimited_key_jce_policy()
+    if params.stack_supports_hadoop_custom_extensions:
+      setup_extensions()
+
+if __name__ == "__main__":
+  BeforeStartHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/params.py
new file mode 100644
index 0000000..6c26e01
--- /dev/null
+++ b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/params.py
@@ -0,0 +1,380 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import default
+from resource_management.libraries.functions import format_jvm_option
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.version import format_stack_version, compare_versions, get_major_version
+from ambari_commons.os_check import OSCheck
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.stack_features import get_stack_feature_version
+from resource_management.libraries.functions import StackFeature
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+artifact_dir = tmp_dir + "/AMBARI-artifacts"
+
+version_for_stack_feature_checks = get_stack_feature_version(config)
+stack_supports_hadoop_custom_extensions = check_stack_feature(StackFeature.HADOOP_CUSTOM_EXTENSIONS, version_for_stack_feature_checks)
+
+sudo = AMBARI_SUDO_BINARY
+
+# Global flag enabling or disabling the sysprep feature
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+
+# Whether to skip copying fast-hdfs-resource.jar to /var/lib/ambari-agent/lib/
+# This is required if tarballs are going to be copied to HDFS, so set to False
+sysprep_skip_copy_fast_jar_hdfs = host_sys_prepped and default("/configurations/cluster-env/sysprep_skip_copy_fast_jar_hdfs", False)
+
+# Whether to skip setting up the unlimited key JCE policy
+sysprep_skip_setup_jce = host_sys_prepped and default("/configurations/cluster-env/sysprep_skip_setup_jce", False)
+
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+major_stack_version = get_major_version(stack_version_formatted)
+
+dfs_type = default("/commandParams/dfs_type", "")
+hadoop_conf_dir = "/etc/hadoop/conf"
+component_list = default("/localComponents", [])
+
+hdfs_tmp_dir = default("/configurations/hadoop-env/hdfs_tmp_dir", "/tmp")
+
+hadoop_metrics2_properties_content = None
+if 'hadoop-metrics2.properties' in config['configurations']:
+  hadoop_metrics2_properties_content = config['configurations']['hadoop-metrics2.properties']['content']
+
+hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
+hadoop_lib_home = stack_select.get_hadoop_dir("lib")
+hadoop_bin = stack_select.get_hadoop_dir("sbin")
+
+mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
+hadoop_home = stack_select.get_hadoop_dir("home")
+create_lib_snappy_symlinks = False
+  
+current_service = config['serviceName']
+
+#security params
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+ambari_server_resources_url = default("/hostLevelParams/jdk_location", None)
+if ambari_server_resources_url is not None and ambari_server_resources_url.endswith('/'):
+  ambari_server_resources_url = ambari_server_resources_url[:-1]
+
+# Unlimited key JCE policy params
+jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
+unlimited_key_jce_required = default("/hostLevelParams/unlimited_key_jce_required", False)
+jdk_name = default("/hostLevelParams/jdk_name", None)
+java_home = default("/hostLevelParams/java_home", None)
+java_exec = "{0}/bin/java".format(java_home) if java_home is not None else "/bin/java"
+
+#users and groups
+has_hadoop_env = 'hadoop-env' in config['configurations']
+mapred_user = config['configurations']['mapred-env']['mapred_user']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+yarn_user = config['configurations']['yarn-env']['yarn_user']
+
+user_group = config['configurations']['cluster-env']['user_group']
+
+#hosts
+hostname = config["hostname"]
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+rm_host = default("/clusterHostInfo/rm_host", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
+hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+hs_host = default("/clusterHostInfo/hs_host", [])
+jtnode_host = default("/clusterHostInfo/jtnode_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+cluster_name = config["clusterName"]
+set_instanceId = "false"
+if 'cluster-env' in config['configurations'] and \
+    'metrics_collector_external_hosts' in config['configurations']['cluster-env']:
+  ams_collector_hosts = config['configurations']['cluster-env']['metrics_collector_external_hosts']
+  set_instanceId = "true"
+else:
+  ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
+
+has_namenode = not len(namenode_host) == 0
+has_resourcemanager = not len(rm_host) == 0
+has_slaves = not len(slave_hosts) == 0
+has_oozie_server = not len(oozie_servers) == 0
+has_hcat_server_host = not len(hcat_server_hosts) == 0
+has_hive_server_host = not len(hive_server_host) == 0
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_zk_host = not len(zk_hosts) == 0
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+has_metric_collector = not len(ams_collector_hosts) == 0
+
+is_namenode_master = hostname in namenode_host
+is_jtnode_master = hostname in jtnode_host
+is_rmnode_master = hostname in rm_host
+is_hsnode_master = hostname in hs_host
+is_hbase_master = hostname in hbase_master_hosts
+is_slave = hostname in slave_hosts
+
+if has_ganglia_server:
+  ganglia_server_host = ganglia_server_hosts[0]
+
+metric_collector_port = None
+if has_metric_collector:
+  if 'cluster-env' in config['configurations'] and \
+      'metrics_collector_external_port' in config['configurations']['cluster-env']:
+    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_external_port']
+  else:
+    metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
+    if metric_collector_web_address.find(':') != -1:
+      metric_collector_port = metric_collector_web_address.split(':')[1]
+    else:
+      metric_collector_port = '6188'
+  if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
+    metric_collector_protocol = 'https'
+  else:
+    metric_collector_protocol = 'http'
+  metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
+  metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
+  metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
+
+  pass
+metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
+metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
+
+host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
+host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
+
+# Cluster Zookeeper quorum
+zookeeper_quorum = None
+if has_zk_host:
+  if 'zoo.cfg' in config['configurations'] and 'clientPort' in config['configurations']['zoo.cfg']:
+    zookeeper_clientPort = config['configurations']['zoo.cfg']['clientPort']
+  else:
+    zookeeper_clientPort = '2181'
+  zookeeper_quorum = (':' + zookeeper_clientPort + ',').join(config['clusterHostInfo']['zookeeper_hosts'])
+  # last port config
+  zookeeper_quorum += ':' + zookeeper_clientPort
+
+#hadoop params
+
+if has_namenode or dfs_type == 'HCFS':
+  hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
+  hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+  task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
+
+hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+hbase_tmp_dir = "/tmp/hbase-hbase"
+#db params
+server_db_name = config['hostLevelParams']['db_name']
+db_driver_filename = config['hostLevelParams']['db_driver_filename']
+oracle_driver_url = config['hostLevelParams']['oracle_jdbc_url']
+mysql_driver_url = config['hostLevelParams']['mysql_jdbc_url']
+oracle_driver_symlink_url = format("{ambari_server_resources_url}/oracle-jdbc-driver.jar")
+mysql_driver_symlink_url = format("{ambari_server_resources_url}/mysql-jdbc-driver.jar")
+
+ambari_db_rca_url = config['hostLevelParams']['ambari_db_rca_url'][0]
+ambari_db_rca_driver = config['hostLevelParams']['ambari_db_rca_driver'][0]
+ambari_db_rca_username = config['hostLevelParams']['ambari_db_rca_username'][0]
+ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password'][0]
+
+if has_namenode and 'rca_enabled' in config['configurations']['hadoop-env']:
+  rca_enabled =  config['configurations']['hadoop-env']['rca_enabled']
+else:
+  rca_enabled = False
+rca_disabled_prefix = "###"
+if rca_enabled == True:
+  rca_prefix = ""
+else:
+  rca_prefix = rca_disabled_prefix
+
+#hadoop-env.sh
+
+jsvc_path = "/usr/lib/bigtop-utils"
+
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
+namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
+namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
+
+jtnode_opt_newsize = "200m"
+jtnode_opt_maxnewsize = "200m"
+jtnode_heapsize =  "1024m"
+ttnode_heapsize = "1024m"
+
+dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
+mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
+
+#log4j.properties
+
+yarn_log_dir_prefix = default("/configurations/yarn-env/yarn_log_dir_prefix","/var/log/hadoop-yarn")
+
+dfs_hosts = default('/configurations/hdfs-site/dfs.hosts', None)
+
+# Hdfs log4j settings
+hadoop_log_max_backup_size = default('configurations/hdfs-log4j/hadoop_log_max_backup_size', 256)
+hadoop_log_number_of_backup_files = default('configurations/hdfs-log4j/hadoop_log_number_of_backup_files', 10)
+hadoop_security_log_max_backup_size = default('configurations/hdfs-log4j/hadoop_security_log_max_backup_size', 256)
+hadoop_security_log_number_of_backup_files = default('configurations/hdfs-log4j/hadoop_security_log_number_of_backup_files', 20)
+
+# Yarn log4j settings
+yarn_rm_summary_log_max_backup_size = default('configurations/yarn-log4j/yarn_rm_summary_log_max_backup_size', 256)
+yarn_rm_summary_log_number_of_backup_files = default('configurations/yarn-log4j/yarn_rm_summary_log_number_of_backup_files', 20)
+
+#log4j.properties
+if (('hdfs-log4j' in config['configurations']) and ('content' in config['configurations']['hdfs-log4j'])):
+  log4j_props = config['configurations']['hdfs-log4j']['content']
+  if (('yarn-log4j' in config['configurations']) and ('content' in config['configurations']['yarn-log4j'])):
+    log4j_props += config['configurations']['yarn-log4j']['content']
+else:
+  log4j_props = None
+
+refresh_topology = False
+command_params = config["commandParams"] if "commandParams" in config else None
+if command_params is not None:
+  refresh_topology = bool(command_params["refresh_topology"]) if "refresh_topology" in command_params else False
+
+ambari_java_home = default("/commandParams/ambari_java_home", None)
+ambari_jdk_name = default("/commandParams/ambari_jdk_name", None)
+ambari_jce_name = default("/commandParams/ambari_jce_name", None)
+  
+ambari_libs_dir = "/var/lib/ambari-agent/lib"
+is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
+#host info
+all_hosts = default("/clusterHostInfo/all_hosts", [])
+all_racks = default("/clusterHostInfo/all_racks", [])
+all_ipv4_ips = default("/clusterHostInfo/all_ipv4_ips", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+
+#topology files
+net_topology_script_file_path = "/etc/hadoop/conf/topology_script.py"
+net_topology_script_dir = os.path.dirname(net_topology_script_file_path)
+net_topology_mapping_data_file_name = 'topology_mappings.data'
+net_topology_mapping_data_file_path = os.path.join(net_topology_script_dir, net_topology_mapping_data_file_name)
+
+#Added logic to create /tmp and /user directory for HCFS stack.  
+has_core_site = 'core-site' in config['configurations']
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+kinit_path_local = get_kinit_path()
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', None)
+hdfs_site = config['configurations']['hdfs-site']
+smoke_user =  config['configurations']['cluster-env']['smokeuser']
+smoke_hdfs_user_dir = format("/user/{smoke_user}")
+smoke_hdfs_user_mode = 0770
+
+
+##### Namenode RPC ports - metrics config section start #####
+
+# Figure out the rpc ports for current namenode
+nn_rpc_client_port = None
+nn_rpc_dn_port = None
+nn_rpc_healthcheck_port = None
+
+namenode_id = None
+namenode_rpc = None
+
+dfs_ha_enabled = False
+dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.internal.nameservices', None)
+if dfs_ha_nameservices is None:
+  dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.nameservices', None)
+dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
+
+dfs_ha_namemodes_ids_list = []
+other_namenode_id = None
+
+if dfs_ha_namenode_ids:
+ dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
+ dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
+ if dfs_ha_namenode_ids_array_len > 1:
+   dfs_ha_enabled = True
+
+if dfs_ha_enabled:
+ for nn_id in dfs_ha_namemodes_ids_list:
+   nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
+   if hostname.lower() in nn_host.lower():
+     namenode_id = nn_id
+     namenode_rpc = nn_host
+   pass
+ pass
+else:
+  namenode_rpc = default('/configurations/hdfs-site/dfs.namenode.rpc-address', default_fs)
+
+# if HDFS is not installed in the cluster, then don't try to access namenode_rpc
+if "core-site" in config['configurations'] and namenode_rpc:
+  port_str = namenode_rpc.split(':')[-1].strip()
+  try:
+    nn_rpc_client_port = int(port_str)
+  except ValueError:
+    nn_rpc_client_port = None
+
+if dfs_ha_enabled:
+ dfs_service_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.servicerpc-address.{dfs_ha_nameservices}.{namenode_id}'), None)
+ dfs_lifeline_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.lifeline.rpc-address.{dfs_ha_nameservices}.{namenode_id}'), None)
+else:
+ dfs_service_rpc_address = default('/configurations/hdfs-site/dfs.namenode.servicerpc-address', None)
+ dfs_lifeline_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.lifeline.rpc-address'), None)
+
+if dfs_service_rpc_address:
+ nn_rpc_dn_port = dfs_service_rpc_address.split(':')[1].strip()
+
+if dfs_lifeline_rpc_address:
+ nn_rpc_healthcheck_port = dfs_lifeline_rpc_address.split(':')[1].strip()
+
+is_nn_client_port_configured = False if nn_rpc_client_port is None else True
+is_nn_dn_port_configured = False if nn_rpc_dn_port is None else True
+is_nn_healthcheck_port_configured = False if nn_rpc_healthcheck_port is None else True
+
+##### end #####
+
+import functools
+#create partial functions with common arguments for every HdfsResource call
+#to create/delete/copyfromlocal hdfs directories/files we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs,
+  immutable_paths = get_not_managed_resources(),
+  dfs_type = dfs_type
+)

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/before-START/scripts/rack_awareness.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/before-START/scripts/rack_awareness.py b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/rack_awareness.py
new file mode 100644
index 0000000..48158bb
--- /dev/null
+++ b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/rack_awareness.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management.core.resources import File
+from resource_management.core.source import StaticFile, Template
+from resource_management.libraries.functions import format
+
+
+def create_topology_mapping():
+  import params
+
+  File(params.net_topology_mapping_data_file_path,
+       content=Template("topology_mappings.data.j2"),
+       owner=params.hdfs_user,
+       group=params.user_group,
+       mode=0644,
+       only_if=format("test -d {net_topology_script_dir}"))
+
+def create_topology_script():
+  import params
+
+  File(params.net_topology_script_file_path,
+       content=StaticFile('topology_script.py'),
+       mode=0755,
+       only_if=format("test -d {net_topology_script_dir}"))
+
+def create_topology_script_and_mapping():
+  import params
+  if params.has_hadoop_env:
+    create_topology_mapping()
+    create_topology_script()

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/shared_initialization.py
new file mode 100644
index 0000000..48dc4b0
--- /dev/null
+++ b/ambari-server/src/main/resources/stack-hooks/before-START/scripts/shared_initialization.py
@@ -0,0 +1,256 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+from resource_management.libraries.providers.hdfs_resource import WebHDFSUtil
+from resource_management.core.resources.jcepolicyinfo import JcePolicyInfo
+
+from resource_management import *
+
+def setup_hadoop():
+  """
+  Setup hadoop files and directories
+  """
+  import params
+
+  Execute(("setenforce","0"),
+          only_if="test -f /selinux/enforce",
+          not_if="(! which getenforce ) || (which getenforce && getenforce | grep -q Disabled)",
+          sudo=True,
+  )
+
+  #directories
+  if params.has_namenode or params.dfs_type == 'HCFS':
+    Directory(params.hdfs_log_dir_prefix,
+              create_parents = True,
+              owner='root',
+              group=params.user_group,
+              mode=0775,
+              cd_access='a',
+    )
+    if params.has_namenode:
+      Directory(params.hadoop_pid_dir_prefix,
+              create_parents = True,
+              owner='root',
+              group='root',
+              cd_access='a',
+      )
+    Directory(params.hadoop_tmp_dir,
+              create_parents = True,
+              owner=params.hdfs_user,
+              cd_access='a',
+              )
+  #files
+    if params.security_enabled:
+      tc_owner = "root"
+    else:
+      tc_owner = params.hdfs_user
+      
+    # if WebHDFS is not enabled we need this jar to create hadoop folders and copy tarballs to HDFS.
+    if params.sysprep_skip_copy_fast_jar_hdfs:
+      print "Skipping copying of fast-hdfs-resource.jar as host is sys prepped"
+    elif params.dfs_type == 'HCFS' or not WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
+      # for source-code of jar goto contrib/fast-hdfs-resource
+      File(format("{ambari_libs_dir}/fast-hdfs-resource.jar"),
+           mode=0644,
+           content=StaticFile("fast-hdfs-resource.jar")
+      )
+      
+    if os.path.exists(params.hadoop_conf_dir):
+      File(os.path.join(params.hadoop_conf_dir, 'commons-logging.properties'),
+           owner=tc_owner,
+           content=Template('commons-logging.properties.j2')
+      )
+
+      health_check_template_name = "health_check"
+      File(os.path.join(params.hadoop_conf_dir, health_check_template_name),
+           owner=tc_owner,
+           content=Template(health_check_template_name + ".j2")
+      )
+
+      log4j_filename = os.path.join(params.hadoop_conf_dir, "log4j.properties")
+      if (params.log4j_props != None):
+        File(log4j_filename,
+             mode=0644,
+             group=params.user_group,
+             owner=params.hdfs_user,
+             content=InlineTemplate(params.log4j_props)
+        )
+      elif (os.path.exists(format("{params.hadoop_conf_dir}/log4j.properties"))):
+        File(log4j_filename,
+             mode=0644,
+             group=params.user_group,
+             owner=params.hdfs_user,
+        )
+
+      if params.hadoop_metrics2_properties_content:
+        File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"),
+             owner=params.hdfs_user,
+             group=params.user_group,
+             content=InlineTemplate(params.hadoop_metrics2_properties_content)
+             )
+      else:
+        File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"),
+             owner=params.hdfs_user,
+             group=params.user_group,
+             content=Template("hadoop-metrics2.properties.j2")
+             )
+
+    if params.dfs_type == 'HCFS' and params.has_core_site and 'ECS_CLIENT' in params.component_list:
+       create_dirs()
+
+    create_microsoft_r_dir()
+
+
+def setup_configs():
+  """
+  Creates configs for services HDFS mapred
+  """
+  import params
+
+  if params.has_namenode or params.dfs_type == 'HCFS':
+    if os.path.exists(params.hadoop_conf_dir):
+      File(params.task_log4j_properties_location,
+           content=StaticFile("task-log4j.properties"),
+           mode=0755
+      )
+
+    if os.path.exists(os.path.join(params.hadoop_conf_dir, 'configuration.xsl')):
+      File(os.path.join(params.hadoop_conf_dir, 'configuration.xsl'),
+           owner=params.hdfs_user,
+           group=params.user_group
+      )
+    if os.path.exists(os.path.join(params.hadoop_conf_dir, 'masters')):
+      File(os.path.join(params.hadoop_conf_dir, 'masters'),
+                owner=params.hdfs_user,
+                group=params.user_group
+      )
+
+def create_javahome_symlink():
+  if os.path.exists("/usr/jdk/jdk1.6.0_31") and not os.path.exists("/usr/jdk64/jdk1.6.0_31"):
+    Directory("/usr/jdk64/",
+         create_parents = True,
+    )
+    Link("/usr/jdk/jdk1.6.0_31",
+         to="/usr/jdk64/jdk1.6.0_31",
+    )
+
+def create_dirs():
+   import params
+   params.HdfsResource(params.hdfs_tmp_dir,
+                       type="directory",
+                       action="create_on_execute",
+                       owner=params.hdfs_user,
+                       mode=0777
+   )
+   params.HdfsResource(params.smoke_hdfs_user_dir,
+                       type="directory",
+                       action="create_on_execute",
+                       owner=params.smoke_user,
+                       mode=params.smoke_hdfs_user_mode
+   )
+   params.HdfsResource(None,
+                      action="execute"
+   )
+
+def create_microsoft_r_dir():
+  import params
+  if 'MICROSOFT_R_NODE_CLIENT' in params.component_list and params.default_fs:
+    directory = '/user/RevoShare'
+    try:
+      params.HdfsResource(directory,
+                          type="directory",
+                          action="create_on_execute",
+                          owner=params.hdfs_user,
+                          mode=0777)
+      params.HdfsResource(None, action="execute")
+    except Exception as exception:
+      Logger.warning("Could not check the existence of {0} on DFS while starting {1}, exception: {2}".format(directory, params.current_service, str(exception)))
+
+def setup_unlimited_key_jce_policy():
+  """
+  Sets up the unlimited key JCE policy if needed. (sets up ambari JCE as well if ambari and the  stack use different JDK)
+  """
+  import params
+  __setup_unlimited_key_jce_policy(custom_java_home=params.java_home, custom_jdk_name=params.jdk_name, custom_jce_name = params.jce_policy_zip)
+  if params.ambari_jce_name and params.ambari_jce_name != params.jce_policy_zip:
+    __setup_unlimited_key_jce_policy(custom_java_home=params.ambari_java_home, custom_jdk_name=params.ambari_jdk_name, custom_jce_name = params.ambari_jce_name)
+
+def __setup_unlimited_key_jce_policy(custom_java_home, custom_jdk_name, custom_jce_name):
+  """
+  Sets up the unlimited key JCE policy if needed.
+
+  The following criteria must be met:
+
+    * The cluster has not been previously prepared (sys preped) - cluster-env/sysprep_skip_setup_jce = False
+    * Ambari is managing the host's JVM - /hostLevelParams/jdk_name is set
+    * Either security is enabled OR a service requires it - /hostLevelParams/unlimited_key_jce_required = True
+    * The unlimited key JCE policy has not already been installed
+
+  If the conditions are met, the following steps are taken to install the unlimited key JCE policy JARs
+
+    1. The unlimited key JCE policy ZIP file is downloaded from the Ambari server and stored in the
+        Ambari agent's temporary directory
+    2. The existing JCE policy JAR files are deleted
+    3. The downloaded ZIP file is unzipped into the proper JCE policy directory
+
+  :return: None
+  """
+  import params
+
+  if params.sysprep_skip_setup_jce:
+    Logger.info("Skipping unlimited key JCE policy check and setup since the host is sys prepped")
+
+  elif not custom_jdk_name:
+    Logger.debug("Skipping unlimited key JCE policy check and setup since the Java VM is not managed by Ambari")
+
+  elif not params.unlimited_key_jce_required:
+    Logger.debug("Skipping unlimited key JCE policy check and setup since it is not required")
+
+  else:
+    jcePolicyInfo = JcePolicyInfo(custom_java_home)
+
+    if jcePolicyInfo.is_unlimited_key_jce_policy():
+      Logger.info("The unlimited key JCE policy is required, and appears to have been installed.")
+
+    elif custom_jce_name is None:
+      raise Fail("The unlimited key JCE policy needs to be installed; however the JCE policy zip is not specified.")
+
+    else:
+      Logger.info("The unlimited key JCE policy is required, and needs to be installed.")
+
+      jce_zip_target = format("{artifact_dir}/{custom_jce_name}")
+      jce_zip_source = format("{ambari_server_resources_url}/{custom_jce_name}")
+      java_security_dir = format("{custom_java_home}/jre/lib/security")
+
+      Logger.debug("Downloading the unlimited key JCE policy files from {0} to {1}.".format(jce_zip_source, jce_zip_target))
+      Directory(params.artifact_dir, create_parents=True)
+      File(jce_zip_target, content=DownloadSource(jce_zip_source))
+
+      Logger.debug("Removing existing JCE policy JAR files: {0}.".format(java_security_dir))
+      File(format("{java_security_dir}/US_export_policy.jar"), action="delete")
+      File(format("{java_security_dir}/local_policy.jar"), action="delete")
+
+      Logger.debug("Unzipping the unlimited key JCE policy files from {0} into {1}.".format(jce_zip_target, java_security_dir))
+      extract_cmd = ("unzip", "-o", "-j", "-q", jce_zip_target, "-d", java_security_dir)
+      Execute(extract_cmd,
+              only_if=format("test -e {java_security_dir} && test -f {jce_zip_target}"),
+              path=['/bin/', '/usr/bin'],
+              sudo=True
+              )

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/before-START/templates/commons-logging.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/before-START/templates/commons-logging.properties.j2 b/ambari-server/src/main/resources/stack-hooks/before-START/templates/commons-logging.properties.j2
new file mode 100644
index 0000000..2197ba5
--- /dev/null
+++ b/ambari-server/src/main/resources/stack-hooks/before-START/templates/commons-logging.properties.j2
@@ -0,0 +1,43 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+#Logging Implementation
+
+#Log4J
+org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger
+
+#JDK Logger
+#org.apache.commons.logging.Log=org.apache.commons.logging.impl.Jdk14Logger

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/before-START/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/before-START/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stack-hooks/before-START/templates/exclude_hosts_list.j2
new file mode 100644
index 0000000..1adba80
--- /dev/null
+++ b/ambari-server/src/main/resources/stack-hooks/before-START/templates/exclude_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in hdfs_exclude_file %}
+{{host}}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/before-START/templates/hadoop-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/before-START/templates/hadoop-metrics2.properties.j2 b/ambari-server/src/main/resources/stack-hooks/before-START/templates/hadoop-metrics2.properties.j2
new file mode 100644
index 0000000..2cd9aa8
--- /dev/null
+++ b/ambari-server/src/main/resources/stack-hooks/before-START/templates/hadoop-metrics2.properties.j2
@@ -0,0 +1,107 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# syntax: [prefix].[source|sink|jmx].[instance].[options]
+# See package.html for org.apache.hadoop.metrics2 for details
+
+{% if has_ganglia_server %}
+*.period=60
+
+*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+*.sink.ganglia.period=10
+
+# default for supportsparse is false
+*.sink.ganglia.supportsparse=true
+
+.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+# Hook up to the server
+namenode.sink.ganglia.servers={{ganglia_server_host}}:8661
+datanode.sink.ganglia.servers={{ganglia_server_host}}:8659
+jobtracker.sink.ganglia.servers={{ganglia_server_host}}:8662
+tasktracker.sink.ganglia.servers={{ganglia_server_host}}:8658
+maptask.sink.ganglia.servers={{ganglia_server_host}}:8660
+reducetask.sink.ganglia.servers={{ganglia_server_host}}:8660
+resourcemanager.sink.ganglia.servers={{ganglia_server_host}}:8664
+nodemanager.sink.ganglia.servers={{ganglia_server_host}}:8657
+historyserver.sink.ganglia.servers={{ganglia_server_host}}:8666
+journalnode.sink.ganglia.servers={{ganglia_server_host}}:8654
+nimbus.sink.ganglia.servers={{ganglia_server_host}}:8649
+supervisor.sink.ganglia.servers={{ganglia_server_host}}:8650
+
+resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
+
+{% endif %}
+
+{% if has_metric_collector %}
+
+*.period={{metrics_collection_period}}
+*.sink.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
+*.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
+*.sink.timeline.period={{metrics_collection_period}}
+*.sink.timeline.sendInterval={{metrics_report_interval}}000
+*.sink.timeline.slave.host.name={{hostname}}
+*.sink.timeline.zookeeper.quorum={{zookeeper_quorum}}
+*.sink.timeline.protocol={{metric_collector_protocol}}
+*.sink.timeline.port={{metric_collector_port}}
+*.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
+*.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
+
+# HTTPS properties
+*.sink.timeline.truststore.path = {{metric_truststore_path}}
+*.sink.timeline.truststore.type = {{metric_truststore_type}}
+*.sink.timeline.truststore.password = {{metric_truststore_password}}
+
+datanode.sink.timeline.collector.hosts={{ams_collector_hosts}}
+namenode.sink.timeline.collector.hosts={{ams_collector_hosts}}
+resourcemanager.sink.timeline.collector.hosts={{ams_collector_hosts}}
+nodemanager.sink.timeline.collector.hosts={{ams_collector_hosts}}
+jobhistoryserver.sink.timeline.collector.hosts={{ams_collector_hosts}}
+journalnode.sink.timeline.collector.hosts={{ams_collector_hosts}}
+applicationhistoryserver.sink.timeline.collector.hosts={{ams_collector_hosts}}
+
+resourcemanager.sink.timeline.tagsForPrefix.yarn=Queue
+
+{% if is_nn_client_port_configured %}
+# Namenode rpc ports customization
+namenode.sink.timeline.metric.rpc.client.port={{nn_rpc_client_port}}
+{% endif %}
+{% if is_nn_dn_port_configured %}
+namenode.sink.timeline.metric.rpc.datanode.port={{nn_rpc_dn_port}}
+{% endif %}
+{% if is_nn_healthcheck_port_configured %}
+namenode.sink.timeline.metric.rpc.healthcheck.port={{nn_rpc_healthcheck_port}}
+{% endif %}
+
+{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/before-START/templates/health_check.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/before-START/templates/health_check.j2 b/ambari-server/src/main/resources/stack-hooks/before-START/templates/health_check.j2
new file mode 100644
index 0000000..0a03d17
--- /dev/null
+++ b/ambari-server/src/main/resources/stack-hooks/before-START/templates/health_check.j2
@@ -0,0 +1,81 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+#!/bin/bash
+#
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+err=0;
+
+function check_disks {
+
+  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
+    fsdev=""
+    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
+    if [ -z "$fsdev" -a "$m" != "/mnt" ] ; then
+      msg_="$msg_ $m(u)"
+    else
+      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
+    fi
+  done
+
+  if [ -z "$msg_" ] ; then
+    echo "disks ok" ; exit 0
+  else
+    echo "$msg_" ; exit 2
+  fi
+
+}
+
+# Run all checks
+for check in disks ; do
+  msg=`check_${check}` ;
+  if [ $? -eq 0 ] ; then
+    ok_msg="$ok_msg$msg,"
+  else
+    err_msg="$err_msg$msg,"
+  fi
+done
+
+if [ ! -z "$err_msg" ] ; then
+  echo -n "ERROR $err_msg "
+fi
+if [ ! -z "$ok_msg" ] ; then
+  echo -n "OK: $ok_msg"
+fi
+
+echo
+
+# Success!
+exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/before-START/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/before-START/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/stack-hooks/before-START/templates/include_hosts_list.j2
new file mode 100644
index 0000000..4a9e713
--- /dev/null
+++ b/ambari-server/src/main/resources/stack-hooks/before-START/templates/include_hosts_list.j2
@@ -0,0 +1,21 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+{% for host in slave_hosts %}
+{{host}}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/before-START/templates/topology_mappings.data.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/before-START/templates/topology_mappings.data.j2 b/ambari-server/src/main/resources/stack-hooks/before-START/templates/topology_mappings.data.j2
new file mode 100644
index 0000000..15034d6
--- /dev/null
+++ b/ambari-server/src/main/resources/stack-hooks/before-START/templates/topology_mappings.data.j2
@@ -0,0 +1,24 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+    #
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+[network_topology]
+{% for host in all_hosts %}
+{% if host in slave_hosts %}
+{{host}}={{all_racks[loop.index-1]}}
+{{all_ipv4_ips[loop.index-1]}}={{all_racks[loop.index-1]}}
+{% endif %}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/hook.py
deleted file mode 100644
index 8bae9e6..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/hook.py
+++ /dev/null
@@ -1,37 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.libraries.script.hook import Hook
-from shared_initialization import link_configs
-from shared_initialization import setup_config
-from shared_initialization import setup_stack_symlinks
-
-class AfterInstallHook(Hook):
-
-  def hook(self, env):
-    import params
-
-    env.set_params(params)
-    setup_stack_symlinks(self.stroutfile)
-    setup_config()
-
-    link_configs(self.stroutfile)
-
-if __name__ == "__main__":
-  AfterInstallHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
deleted file mode 100644
index bf9d79b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
+++ /dev/null
@@ -1,108 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-
-from ambari_commons.constants import AMBARI_SUDO_BINARY
-from resource_management.libraries.script import Script
-from resource_management.libraries.script.script import get_config_lock_file
-from resource_management.libraries.functions import default
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import format_jvm_option
-from resource_management.libraries.functions.version import format_stack_version, get_major_version
-from string import lower
-
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-dfs_type = default("/commandParams/dfs_type", "")
-
-is_parallel_execution_enabled = int(default("/agentConfigParams/agent/parallel_execution", 0)) == 1
-host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
-
-sudo = AMBARI_SUDO_BINARY
-
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-stack_version_formatted = format_stack_version(stack_version_unformatted)
-major_stack_version = get_major_version(stack_version_formatted)
-
-# service name
-service_name = config['serviceName']
-
-# logsearch configuration
-logsearch_logfeeder_conf = "/etc/ambari-logsearch-logfeeder/conf"
-
-agent_cache_dir = config['hostLevelParams']['agentCacheDir']
-service_package_folder = config['commandParams']['service_package_folder']
-logsearch_service_name = service_name.lower().replace("_", "-")
-logsearch_config_file_name = 'input.config-' + logsearch_service_name + ".json"
-logsearch_config_file_path = agent_cache_dir + "/" + service_package_folder + "/templates/" + logsearch_config_file_name + ".j2"
-logsearch_config_file_exists = os.path.isfile(logsearch_config_file_path)
-
-# default hadoop params
-hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
-
-mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
-
-versioned_stack_root = '/usr/hdp/current'
-
-#security params
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-
-#java params
-java_home = config['hostLevelParams']['java_home']
-
-#hadoop params
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
-hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
-
-jsvc_path = "/usr/lib/bigtop-utils"
-
-hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
-namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
-namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
-
-jtnode_opt_newsize = "200m"
-jtnode_opt_maxnewsize = "200m"
-jtnode_heapsize =  "1024m"
-ttnode_heapsize = "1024m"
-
-dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
-mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
-
-#users and groups
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-user_group = config['configurations']['cluster-env']['user_group']
-
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-has_namenode = not len(namenode_host) == 0
-
-if has_namenode or dfs_type == 'HCFS':
-  hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-
-link_configs_lock_file = get_config_lock_file()
-stack_select_lock_file = os.path.join(tmp_dir, "stack_select_lock_file")
-
-upgrade_suspended = default("/roleParams/upgrade_suspended", False)

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py
deleted file mode 100644
index 67c3ba8..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py
+++ /dev/null
@@ -1,132 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os
-
-import ambari_simplejson as json
-from ambari_jinja2 import Environment as JinjaEnvironment
-from resource_management.core.logger import Logger
-from resource_management.core.resources.system import Directory, File
-from resource_management.core.source import InlineTemplate, Template
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions.default import default
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.fcntl_based_process_lock import FcntlBasedProcessLock
-from resource_management.libraries.resources.xml_config import XmlConfig
-from resource_management.libraries.script import Script
-
-
-def setup_stack_symlinks(struct_out_file):
-  """
-  Invokes <stack-selector-tool> set all against a calculated fully-qualified, "normalized" version based on a
-  stack version, such as "2.3". This should always be called after a component has been
-  installed to ensure that all HDP pointers are correct. The stack upgrade logic does not
-  interact with this since it's done via a custom command and will not trigger this hook.
-  :return:
-  """
-  import params
-  if params.upgrade_suspended:
-    Logger.warning("Skipping running stack-selector-tool because there is a suspended upgrade")
-    return
-
-  if params.host_sys_prepped:
-    Logger.warning("Skipping running stack-selector-tool because this is a sys_prepped host. This may cause symlink pointers not to be created for HDP components installed later on top of an already sys_prepped host")
-    return
-
-  # get the packages which the stack-select tool should be used on
-  stack_packages = stack_select.get_packages(stack_select.PACKAGE_SCOPE_INSTALL)
-  if stack_packages is None:
-    return
-
-  json_version = load_version(struct_out_file)
-
-  if not json_version:
-    Logger.info("There is no advertised version for this component stored in {0}".format(struct_out_file))
-    return
-
-  # On parallel command execution this should be executed by a single process at a time.
-  with FcntlBasedProcessLock(params.stack_select_lock_file, enabled = params.is_parallel_execution_enabled, skip_fcntl_failures = True):
-    for package in stack_packages:
-      stack_select.select(package, json_version)
-
-
-def setup_config():
-  import params
-  stackversion = params.stack_version_unformatted
-  Logger.info("FS Type: {0}".format(params.dfs_type))
-
-  is_hadoop_conf_dir_present = False
-  if hasattr(params, "hadoop_conf_dir") and params.hadoop_conf_dir is not None and os.path.exists(params.hadoop_conf_dir):
-    is_hadoop_conf_dir_present = True
-  else:
-    Logger.warning("Parameter hadoop_conf_dir is missing or directory does not exist. This is expected if this host does not have any Hadoop components.")
-
-  if is_hadoop_conf_dir_present and (params.has_namenode or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS'):
-    # create core-site only if the hadoop config diretory exists
-    XmlConfig("core-site.xml",
-              conf_dir=params.hadoop_conf_dir,
-              configurations=params.config['configurations']['core-site'],
-              configuration_attributes=params.config['configuration_attributes']['core-site'],
-              owner=params.hdfs_user,
-              group=params.user_group,
-              only_if=format("ls {hadoop_conf_dir}"))
-
-  Directory(params.logsearch_logfeeder_conf,
-            mode=0755,
-            cd_access='a',
-            create_parents=True
-            )
-
-  if params.logsearch_config_file_exists:
-    File(format("{logsearch_logfeeder_conf}/" + params.logsearch_config_file_name),
-         content=Template(params.logsearch_config_file_path,extra_imports=[default])
-         )
-  else:
-    Logger.warning('No logsearch configuration exists at ' + params.logsearch_config_file_path)
-
-
-def load_version(struct_out_file):
-  """
-  Load version from file.  Made a separate method for testing
-  """
-  try:
-    with open(struct_out_file, 'r') as fp:
-      json_info = json.load(fp)
-
-    return json_info['version']
-  except (IOError, KeyError, TypeError):
-    return None
-
-
-def link_configs(struct_out_file):
-  """
-  Links configs, only on a fresh install of HDP-2.3 and higher
-  """
-  import params
-
-  json_version = load_version(struct_out_file)
-
-  if not json_version:
-    Logger.info("Could not load 'version' from {0}".format(struct_out_file))
-    return
-
-  # On parallel command execution this should be executed by a single process at a time.
-  with FcntlBasedProcessLock(params.link_configs_lock_file, enabled = params.is_parallel_execution_enabled, skip_fcntl_failures = True):
-    for k, v in conf_select.get_package_dirs().iteritems():
-      conf_select.convert_conf_directories_to_symlinks(k, json_version, v)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/files/changeToSecureUid.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/files/changeToSecureUid.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/files/changeToSecureUid.sh
deleted file mode 100644
index a6b8b77..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/files/changeToSecureUid.sh
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-username=$1
-directories=$2
-newUid=$3
-
-function find_available_uid() {
- for ((i=1001; i<=2000; i++))
- do
-   grep -q $i /etc/passwd
-   if [ "$?" -ne 0 ]
-   then
-    newUid=$i
-    break
-   fi
- done
-}
-
-if [ -z $2 ]; then
-  test $(id -u ${username} 2>/dev/null)
-  if [ $? -ne 1 ]; then
-   newUid=`id -u ${username}`
-  else
-   find_available_uid
-  fi
-  echo $newUid
-  exit 0
-else
-  find_available_uid
-fi
-
-if [ $newUid -eq 0 ]
-then
-  echo "Failed to find Uid between 1000 and 2000"
-  exit 1
-fi
-
-set -e
-dir_array=($(echo $directories | sed 's/,/\n/g'))
-old_uid=$(id -u $username)
-sudo_prefix="/var/lib/ambari-agent/ambari-sudo.sh -H -E"
-echo "Changing uid of $username from $old_uid to $newUid"
-echo "Changing directory permisions for ${dir_array[@]}"
-$sudo_prefix usermod -u $newUid $username && for dir in ${dir_array[@]} ; do ls $dir 2> /dev/null && echo "Changing permission for $dir" && $sudo_prefix chown -Rh $newUid $dir ; done
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/hook.py
deleted file mode 100644
index c34be0b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/hook.py
+++ /dev/null
@@ -1,36 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from shared_initialization import *
-
-class BeforeAnyHook(Hook):
-
-  def hook(self, env):
-    import params
-    env.set_params(params)
-
-    setup_users()
-    if params.has_namenode or params.dfs_type == 'HCFS':
-      setup_hadoop_env()
-    setup_java()
-
-if __name__ == "__main__":
-  BeforeAnyHook().execute()
-


[26/50] [abbrv] ambari git commit: AMBARI-22112 Log Search UI: refine Capture feature. (Istvan Tobias via ababiichuk)

Posted by jl...@apache.org.
AMBARI-22112 Log Search UI: refine Capture feature. (Istvan Tobias via ababiichuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b0421821
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b0421821
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b0421821

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: b0421821ea66d5cfda7a5f628890350218595986
Parents: fc58250
Author: Istvan Tobias <>
Authored: Thu Oct 5 12:32:50 2017 +0300
Committer: ababiichuk <ab...@hortonworks.com>
Committed: Thu Oct 5 12:32:50 2017 +0300

----------------------------------------------------------------------
 .../filters-panel/filters-panel.component.less   |  2 +-
 .../src/app/services/filtering.service.ts        | 19 +++++++++++++++++--
 2 files changed, 18 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b0421821/ambari-logsearch/ambari-logsearch-web/src/app/components/filters-panel/filters-panel.component.less
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/components/filters-panel/filters-panel.component.less b/ambari-logsearch/ambari-logsearch-web/src/app/components/filters-panel/filters-panel.component.less
index 4607d34..aeb6911 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/components/filters-panel/filters-panel.component.less
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/components/filters-panel/filters-panel.component.less
@@ -64,6 +64,6 @@
   }
 
   /deep/ .stop-icon {
-    color: @error-color;
+    color: @exclude-color;
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/b0421821/ambari-logsearch/ambari-logsearch-web/src/app/services/filtering.service.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/services/filtering.service.ts b/ambari-logsearch/ambari-logsearch-web/src/app/services/filtering.service.ts
index 9e3a7d2..c3177cc 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/services/filtering.service.ts
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/services/filtering.service.ts
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-import {Injectable} from '@angular/core';
+import {Injectable, Input} from '@angular/core';
 import {FormControl, FormGroup} from '@angular/forms';
 import {Response} from '@angular/http';
 import {Subject} from 'rxjs/Subject';
@@ -78,6 +78,14 @@ export class FilteringService {
 
   timeZone: string = this.defaultTimeZone;
 
+  /**
+   * A configurable property to indicate the maximum capture time in milliseconds.
+   * @type {number}
+   * @default 600000 (10 minutes)
+   */
+  @Input()
+  maximumCaptureTimeLimit: number = 600000;
+
   filters = {
     clusters: {
       label: 'filter.clusters',
@@ -419,7 +427,13 @@ export class FilteringService {
 
   startCaptureTimer(): void {
     this.startCaptureTime = new Date().valueOf();
-    Observable.timer(0, 1000).takeUntil(this.stopTimer).subscribe(seconds => this.captureSeconds = seconds);
+    const maxCaptureTimeInSeconds = this.maximumCaptureTimeLimit / 1000;
+    Observable.timer(0, 1000).takeUntil(this.stopTimer).subscribe(seconds => {
+      this.captureSeconds = seconds;
+      if (this.captureSeconds >= maxCaptureTimeInSeconds) {
+        this.stopCaptureTimer();
+      }
+    });
   }
 
   stopCaptureTimer(): void {
@@ -427,6 +441,7 @@ export class FilteringService {
     this.stopCaptureTime = new Date().valueOf();
     this.captureSeconds = 0;
     this.stopTimer.next();
+    this.setCustomTimeRange(this.startCaptureTime, this.stopCaptureTime);
     Observable.timer(0, 1000).takeUntil(this.stopAutoRefreshCountdown).subscribe(seconds => {
       this.autoRefreshRemainingSeconds = autoRefreshIntervalSeconds - seconds;
       if (!this.autoRefreshRemainingSeconds) {


[28/50] [abbrv] ambari git commit: AMBARI-22145. Secure cluster deploy failing with "While building the CHECK_KEYTABS custom command for KERBEROS/KERBEROS_CLIENT, there were no healthy eligible hosts" (amagyar)

Posted by jl...@apache.org.
AMBARI-22145. Secure cluster deploy failing with "While building the CHECK_KEYTABS custom command for KERBEROS/KERBEROS_CLIENT, there were no healthy eligible hosts" (amagyar)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/ed378b76
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/ed378b76
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/ed378b76

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: ed378b76498e17d752353153b2596bbb0eb810d9
Parents: 2512dc8
Author: Attila Magyar <am...@hortonworks.com>
Authored: Thu Oct 5 17:19:31 2017 +0200
Committer: Attila Magyar <am...@hortonworks.com>
Committed: Thu Oct 5 17:19:31 2017 +0200

----------------------------------------------------------------------
 .../server/controller/KerberosHelperImpl.java       | 16 +++++++++-------
 1 file changed, 9 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/ed378b76/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
index 67b08fd..b691968 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/KerberosHelperImpl.java
@@ -3433,18 +3433,20 @@ public class KerberosHelperImpl implements KerberosHelper {
       Collection<ServiceComponentHost> filteredComponents = filterServiceComponentHostsForHosts(
           new ArrayList<>(serviceComponentHosts), getHostsWithValidKerberosClient(cluster));
 
-      List<String> hostsToUpdate = createUniqueHostList(filteredComponents, Collections.singleton(HostState.HEALTHY));
-      Map<String, String> requestParams = new HashMap<>();
-      List<RequestResourceFilter> requestResourceFilters = new ArrayList<>();
-      RequestResourceFilter reqResFilter = new RequestResourceFilter(Service.Type.KERBEROS.name(), Role.KERBEROS_CLIENT.name(), hostsToUpdate);
-      requestResourceFilters.add(reqResFilter);
+      if (!filteredComponents.isEmpty()) {
+        List<String> hostsToUpdate = createUniqueHostList(filteredComponents, Collections.singleton(HostState.HEALTHY));
+        Map<String, String> requestParams = new HashMap<>();
+        List<RequestResourceFilter> requestResourceFilters = new ArrayList<>();
+        RequestResourceFilter reqResFilter = new RequestResourceFilter(Service.Type.KERBEROS.name(), Role.KERBEROS_CLIENT.name(), hostsToUpdate);
+        requestResourceFilters.add(reqResFilter);
 
-      ActionExecutionContext actionExecContext = new ActionExecutionContext(
+        ActionExecutionContext actionExecContext = new ActionExecutionContext(
           cluster.getClusterName(),
           CHECK_KEYTABS,
           requestResourceFilters,
           requestParams);
-      customCommandExecutionHelper.addExecutionCommandsToStage(actionExecContext, stage, requestParams, null);
+        customCommandExecutionHelper.addExecutionCommandsToStage(actionExecContext, stage, requestParams, null);
+      }
       RoleGraph roleGraph = roleGraphFactory.createNew(roleCommandOrder);
       roleGraph.build(stage);
 


[06/50] [abbrv] ambari git commit: AMBARI-22095 Make hooks stack agnostic (dsen)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
deleted file mode 100644
index 48dc4b0..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
+++ /dev/null
@@ -1,256 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-from resource_management.libraries.providers.hdfs_resource import WebHDFSUtil
-from resource_management.core.resources.jcepolicyinfo import JcePolicyInfo
-
-from resource_management import *
-
-def setup_hadoop():
-  """
-  Setup hadoop files and directories
-  """
-  import params
-
-  Execute(("setenforce","0"),
-          only_if="test -f /selinux/enforce",
-          not_if="(! which getenforce ) || (which getenforce && getenforce | grep -q Disabled)",
-          sudo=True,
-  )
-
-  #directories
-  if params.has_namenode or params.dfs_type == 'HCFS':
-    Directory(params.hdfs_log_dir_prefix,
-              create_parents = True,
-              owner='root',
-              group=params.user_group,
-              mode=0775,
-              cd_access='a',
-    )
-    if params.has_namenode:
-      Directory(params.hadoop_pid_dir_prefix,
-              create_parents = True,
-              owner='root',
-              group='root',
-              cd_access='a',
-      )
-    Directory(params.hadoop_tmp_dir,
-              create_parents = True,
-              owner=params.hdfs_user,
-              cd_access='a',
-              )
-  #files
-    if params.security_enabled:
-      tc_owner = "root"
-    else:
-      tc_owner = params.hdfs_user
-      
-    # if WebHDFS is not enabled we need this jar to create hadoop folders and copy tarballs to HDFS.
-    if params.sysprep_skip_copy_fast_jar_hdfs:
-      print "Skipping copying of fast-hdfs-resource.jar as host is sys prepped"
-    elif params.dfs_type == 'HCFS' or not WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
-      # for source-code of jar goto contrib/fast-hdfs-resource
-      File(format("{ambari_libs_dir}/fast-hdfs-resource.jar"),
-           mode=0644,
-           content=StaticFile("fast-hdfs-resource.jar")
-      )
-      
-    if os.path.exists(params.hadoop_conf_dir):
-      File(os.path.join(params.hadoop_conf_dir, 'commons-logging.properties'),
-           owner=tc_owner,
-           content=Template('commons-logging.properties.j2')
-      )
-
-      health_check_template_name = "health_check"
-      File(os.path.join(params.hadoop_conf_dir, health_check_template_name),
-           owner=tc_owner,
-           content=Template(health_check_template_name + ".j2")
-      )
-
-      log4j_filename = os.path.join(params.hadoop_conf_dir, "log4j.properties")
-      if (params.log4j_props != None):
-        File(log4j_filename,
-             mode=0644,
-             group=params.user_group,
-             owner=params.hdfs_user,
-             content=InlineTemplate(params.log4j_props)
-        )
-      elif (os.path.exists(format("{params.hadoop_conf_dir}/log4j.properties"))):
-        File(log4j_filename,
-             mode=0644,
-             group=params.user_group,
-             owner=params.hdfs_user,
-        )
-
-      if params.hadoop_metrics2_properties_content:
-        File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"),
-             owner=params.hdfs_user,
-             group=params.user_group,
-             content=InlineTemplate(params.hadoop_metrics2_properties_content)
-             )
-      else:
-        File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"),
-             owner=params.hdfs_user,
-             group=params.user_group,
-             content=Template("hadoop-metrics2.properties.j2")
-             )
-
-    if params.dfs_type == 'HCFS' and params.has_core_site and 'ECS_CLIENT' in params.component_list:
-       create_dirs()
-
-    create_microsoft_r_dir()
-
-
-def setup_configs():
-  """
-  Creates configs for services HDFS mapred
-  """
-  import params
-
-  if params.has_namenode or params.dfs_type == 'HCFS':
-    if os.path.exists(params.hadoop_conf_dir):
-      File(params.task_log4j_properties_location,
-           content=StaticFile("task-log4j.properties"),
-           mode=0755
-      )
-
-    if os.path.exists(os.path.join(params.hadoop_conf_dir, 'configuration.xsl')):
-      File(os.path.join(params.hadoop_conf_dir, 'configuration.xsl'),
-           owner=params.hdfs_user,
-           group=params.user_group
-      )
-    if os.path.exists(os.path.join(params.hadoop_conf_dir, 'masters')):
-      File(os.path.join(params.hadoop_conf_dir, 'masters'),
-                owner=params.hdfs_user,
-                group=params.user_group
-      )
-
-def create_javahome_symlink():
-  if os.path.exists("/usr/jdk/jdk1.6.0_31") and not os.path.exists("/usr/jdk64/jdk1.6.0_31"):
-    Directory("/usr/jdk64/",
-         create_parents = True,
-    )
-    Link("/usr/jdk/jdk1.6.0_31",
-         to="/usr/jdk64/jdk1.6.0_31",
-    )
-
-def create_dirs():
-   import params
-   params.HdfsResource(params.hdfs_tmp_dir,
-                       type="directory",
-                       action="create_on_execute",
-                       owner=params.hdfs_user,
-                       mode=0777
-   )
-   params.HdfsResource(params.smoke_hdfs_user_dir,
-                       type="directory",
-                       action="create_on_execute",
-                       owner=params.smoke_user,
-                       mode=params.smoke_hdfs_user_mode
-   )
-   params.HdfsResource(None,
-                      action="execute"
-   )
-
-def create_microsoft_r_dir():
-  import params
-  if 'MICROSOFT_R_NODE_CLIENT' in params.component_list and params.default_fs:
-    directory = '/user/RevoShare'
-    try:
-      params.HdfsResource(directory,
-                          type="directory",
-                          action="create_on_execute",
-                          owner=params.hdfs_user,
-                          mode=0777)
-      params.HdfsResource(None, action="execute")
-    except Exception as exception:
-      Logger.warning("Could not check the existence of {0} on DFS while starting {1}, exception: {2}".format(directory, params.current_service, str(exception)))
-
-def setup_unlimited_key_jce_policy():
-  """
-  Sets up the unlimited key JCE policy if needed. (sets up ambari JCE as well if ambari and the  stack use different JDK)
-  """
-  import params
-  __setup_unlimited_key_jce_policy(custom_java_home=params.java_home, custom_jdk_name=params.jdk_name, custom_jce_name = params.jce_policy_zip)
-  if params.ambari_jce_name and params.ambari_jce_name != params.jce_policy_zip:
-    __setup_unlimited_key_jce_policy(custom_java_home=params.ambari_java_home, custom_jdk_name=params.ambari_jdk_name, custom_jce_name = params.ambari_jce_name)
-
-def __setup_unlimited_key_jce_policy(custom_java_home, custom_jdk_name, custom_jce_name):
-  """
-  Sets up the unlimited key JCE policy if needed.
-
-  The following criteria must be met:
-
-    * The cluster has not been previously prepared (sys preped) - cluster-env/sysprep_skip_setup_jce = False
-    * Ambari is managing the host's JVM - /hostLevelParams/jdk_name is set
-    * Either security is enabled OR a service requires it - /hostLevelParams/unlimited_key_jce_required = True
-    * The unlimited key JCE policy has not already been installed
-
-  If the conditions are met, the following steps are taken to install the unlimited key JCE policy JARs
-
-    1. The unlimited key JCE policy ZIP file is downloaded from the Ambari server and stored in the
-        Ambari agent's temporary directory
-    2. The existing JCE policy JAR files are deleted
-    3. The downloaded ZIP file is unzipped into the proper JCE policy directory
-
-  :return: None
-  """
-  import params
-
-  if params.sysprep_skip_setup_jce:
-    Logger.info("Skipping unlimited key JCE policy check and setup since the host is sys prepped")
-
-  elif not custom_jdk_name:
-    Logger.debug("Skipping unlimited key JCE policy check and setup since the Java VM is not managed by Ambari")
-
-  elif not params.unlimited_key_jce_required:
-    Logger.debug("Skipping unlimited key JCE policy check and setup since it is not required")
-
-  else:
-    jcePolicyInfo = JcePolicyInfo(custom_java_home)
-
-    if jcePolicyInfo.is_unlimited_key_jce_policy():
-      Logger.info("The unlimited key JCE policy is required, and appears to have been installed.")
-
-    elif custom_jce_name is None:
-      raise Fail("The unlimited key JCE policy needs to be installed; however the JCE policy zip is not specified.")
-
-    else:
-      Logger.info("The unlimited key JCE policy is required, and needs to be installed.")
-
-      jce_zip_target = format("{artifact_dir}/{custom_jce_name}")
-      jce_zip_source = format("{ambari_server_resources_url}/{custom_jce_name}")
-      java_security_dir = format("{custom_java_home}/jre/lib/security")
-
-      Logger.debug("Downloading the unlimited key JCE policy files from {0} to {1}.".format(jce_zip_source, jce_zip_target))
-      Directory(params.artifact_dir, create_parents=True)
-      File(jce_zip_target, content=DownloadSource(jce_zip_source))
-
-      Logger.debug("Removing existing JCE policy JAR files: {0}.".format(java_security_dir))
-      File(format("{java_security_dir}/US_export_policy.jar"), action="delete")
-      File(format("{java_security_dir}/local_policy.jar"), action="delete")
-
-      Logger.debug("Unzipping the unlimited key JCE policy files from {0} into {1}.".format(jce_zip_target, java_security_dir))
-      extract_cmd = ("unzip", "-o", "-j", "-q", jce_zip_target, "-d", java_security_dir)
-      Execute(extract_cmd,
-              only_if=format("test -e {java_security_dir} && test -f {jce_zip_target}"),
-              path=['/bin/', '/usr/bin'],
-              sudo=True
-              )

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/commons-logging.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/commons-logging.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/commons-logging.properties.j2
deleted file mode 100644
index 2197ba5..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/commons-logging.properties.j2
+++ /dev/null
@@ -1,43 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-#Logging Implementation
-
-#Log4J
-org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger
-
-#JDK Logger
-#org.apache.commons.logging.Log=org.apache.commons.logging.impl.Jdk14Logger

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/exclude_hosts_list.j2
deleted file mode 100644
index 1adba80..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/exclude_hosts_list.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{% for host in hdfs_exclude_file %}
-{{host}}
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2
deleted file mode 100644
index 2cd9aa8..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2
+++ /dev/null
@@ -1,107 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# syntax: [prefix].[source|sink|jmx].[instance].[options]
-# See package.html for org.apache.hadoop.metrics2 for details
-
-{% if has_ganglia_server %}
-*.period=60
-
-*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
-*.sink.ganglia.period=10
-
-# default for supportsparse is false
-*.sink.ganglia.supportsparse=true
-
-.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
-.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
-
-# Hook up to the server
-namenode.sink.ganglia.servers={{ganglia_server_host}}:8661
-datanode.sink.ganglia.servers={{ganglia_server_host}}:8659
-jobtracker.sink.ganglia.servers={{ganglia_server_host}}:8662
-tasktracker.sink.ganglia.servers={{ganglia_server_host}}:8658
-maptask.sink.ganglia.servers={{ganglia_server_host}}:8660
-reducetask.sink.ganglia.servers={{ganglia_server_host}}:8660
-resourcemanager.sink.ganglia.servers={{ganglia_server_host}}:8664
-nodemanager.sink.ganglia.servers={{ganglia_server_host}}:8657
-historyserver.sink.ganglia.servers={{ganglia_server_host}}:8666
-journalnode.sink.ganglia.servers={{ganglia_server_host}}:8654
-nimbus.sink.ganglia.servers={{ganglia_server_host}}:8649
-supervisor.sink.ganglia.servers={{ganglia_server_host}}:8650
-
-resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
-
-{% endif %}
-
-{% if has_metric_collector %}
-
-*.period={{metrics_collection_period}}
-*.sink.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
-*.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
-*.sink.timeline.period={{metrics_collection_period}}
-*.sink.timeline.sendInterval={{metrics_report_interval}}000
-*.sink.timeline.slave.host.name={{hostname}}
-*.sink.timeline.zookeeper.quorum={{zookeeper_quorum}}
-*.sink.timeline.protocol={{metric_collector_protocol}}
-*.sink.timeline.port={{metric_collector_port}}
-*.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
-*.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
-
-# HTTPS properties
-*.sink.timeline.truststore.path = {{metric_truststore_path}}
-*.sink.timeline.truststore.type = {{metric_truststore_type}}
-*.sink.timeline.truststore.password = {{metric_truststore_password}}
-
-datanode.sink.timeline.collector.hosts={{ams_collector_hosts}}
-namenode.sink.timeline.collector.hosts={{ams_collector_hosts}}
-resourcemanager.sink.timeline.collector.hosts={{ams_collector_hosts}}
-nodemanager.sink.timeline.collector.hosts={{ams_collector_hosts}}
-jobhistoryserver.sink.timeline.collector.hosts={{ams_collector_hosts}}
-journalnode.sink.timeline.collector.hosts={{ams_collector_hosts}}
-applicationhistoryserver.sink.timeline.collector.hosts={{ams_collector_hosts}}
-
-resourcemanager.sink.timeline.tagsForPrefix.yarn=Queue
-
-{% if is_nn_client_port_configured %}
-# Namenode rpc ports customization
-namenode.sink.timeline.metric.rpc.client.port={{nn_rpc_client_port}}
-{% endif %}
-{% if is_nn_dn_port_configured %}
-namenode.sink.timeline.metric.rpc.datanode.port={{nn_rpc_dn_port}}
-{% endif %}
-{% if is_nn_healthcheck_port_configured %}
-namenode.sink.timeline.metric.rpc.healthcheck.port={{nn_rpc_healthcheck_port}}
-{% endif %}
-
-{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/health_check.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/health_check.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/health_check.j2
deleted file mode 100644
index 0a03d17..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/health_check.j2
+++ /dev/null
@@ -1,81 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#!/bin/bash
-#
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-err=0;
-
-function check_disks {
-
-  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
-    fsdev=""
-    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
-    if [ -z "$fsdev" -a "$m" != "/mnt" ] ; then
-      msg_="$msg_ $m(u)"
-    else
-      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
-    fi
-  done
-
-  if [ -z "$msg_" ] ; then
-    echo "disks ok" ; exit 0
-  else
-    echo "$msg_" ; exit 2
-  fi
-
-}
-
-# Run all checks
-for check in disks ; do
-  msg=`check_${check}` ;
-  if [ $? -eq 0 ] ; then
-    ok_msg="$ok_msg$msg,"
-  else
-    err_msg="$err_msg$msg,"
-  fi
-done
-
-if [ ! -z "$err_msg" ] ; then
-  echo -n "ERROR $err_msg "
-fi
-if [ ! -z "$ok_msg" ] ; then
-  echo -n "OK: $ok_msg"
-fi
-
-echo
-
-# Success!
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/include_hosts_list.j2
deleted file mode 100644
index 4a9e713..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/include_hosts_list.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{% for host in slave_hosts %}
-{{host}}
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/topology_mappings.data.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/topology_mappings.data.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/topology_mappings.data.j2
deleted file mode 100644
index 15034d6..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/topology_mappings.data.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-    #
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-[network_topology]
-{% for host in all_hosts %}
-{% if host in slave_hosts %}
-{{host}}={{all_racks[loop.index-1]}}
-{{all_ipv4_ips[loop.index-1]}}={{all_racks[loop.index-1]}}
-{% endif %}
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/ecs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/ecs_client.py b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/ecs_client.py
index 6363c59..df7f0cd 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/ecs_client.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/ecs_client.py
@@ -59,7 +59,7 @@ class ECSClient(Script):
 
     File(format("{ambari_libs_dir}/fast-hdfs-resource.jar"),
            mode=0644,
-           content=StaticFile("/var/lib/ambari-agent/cache/stacks/HDP/2.0.6/hooks/before-START/files/fast-hdfs-resource.jar")
+           content=StaticFile("/var/lib/ambari-agent/cache/stack-hooks/before-START/files/fast-hdfs-resource.jar")
     )
 
   def setup_hadoop_env(self, env):

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/hook.py
deleted file mode 100644
index 8bae9e6..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/hook.py
+++ /dev/null
@@ -1,37 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.libraries.script.hook import Hook
-from shared_initialization import link_configs
-from shared_initialization import setup_config
-from shared_initialization import setup_stack_symlinks
-
-class AfterInstallHook(Hook):
-
-  def hook(self, env):
-    import params
-
-    env.set_params(params)
-    setup_stack_symlinks(self.stroutfile)
-    setup_config()
-
-    link_configs(self.stroutfile)
-
-if __name__ == "__main__":
-  AfterInstallHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/params.py
deleted file mode 100644
index 34dfe70..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/params.py
+++ /dev/null
@@ -1,109 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-
-from ambari_commons.constants import AMBARI_SUDO_BINARY
-from resource_management.libraries.script import Script
-from resource_management.libraries.script.script import get_config_lock_file
-from resource_management.libraries.functions import default
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import format_jvm_option
-from resource_management.libraries.functions.version import format_stack_version
-from string import lower
-
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-dfs_type = default("/commandParams/dfs_type", "")
-stack_root = Script.get_stack_root()
-
-is_parallel_execution_enabled = int(default("/agentConfigParams/agent/parallel_execution", 0)) == 1
-host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
-
-sudo = AMBARI_SUDO_BINARY
-
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-stack_version_formatted = format_stack_version(stack_version_unformatted)
-
-# service name
-service_name = config['serviceName']
-
-# logsearch configuration
-logsearch_logfeeder_conf = "/etc/ambari-logsearch-logfeeder/conf"
-
-agent_cache_dir = config['hostLevelParams']['agentCacheDir']
-service_package_folder = config['commandParams']['service_package_folder']
-logsearch_service_name = service_name.lower().replace("_", "-")
-logsearch_config_file_name = 'input.config-' + logsearch_service_name + ".json"
-logsearch_config_file_path = agent_cache_dir + "/" + service_package_folder + "/templates/" + logsearch_config_file_name + ".j2"
-logsearch_config_file_exists = os.path.isfile(logsearch_config_file_path)
-
-# default hadoop params
-mapreduce_libs_path = format("{stack_root}/current/hadoop-mapreduce-client/*")
-hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
-hadoop_conf_empty_dir = None
-
-versioned_stack_root = format('{stack_root}/current')
-
-#security params
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-
-#java params
-java_home = config['hostLevelParams']['java_home']
-
-#hadoop params
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
-hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
-
-jsvc_path = "/usr/lib/bigtop-utils"
-
-hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
-namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
-namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
-
-jtnode_opt_newsize = "200m"
-jtnode_opt_maxnewsize = "200m"
-jtnode_heapsize =  "1024m"
-ttnode_heapsize = "1024m"
-
-dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
-mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
-
-#users and groups
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-user_group = config['configurations']['cluster-env']['user_group']
-
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-has_namenode = not len(namenode_host) == 0
-
-if has_namenode or dfs_type == 'HCFS':
-  hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-
-link_configs_lock_file = get_config_lock_file()
-stack_select_lock_file = os.path.join(tmp_dir, "stack_select_lock_file")
-
-upgrade_suspended = default("/roleParams/upgrade_suspended", False)

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/shared_initialization.py
deleted file mode 100644
index 0ffd5a5..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/shared_initialization.py
+++ /dev/null
@@ -1,140 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os
-
-import ambari_simplejson as json
-from ambari_jinja2 import Environment as JinjaEnvironment
-from resource_management.core.logger import Logger
-from resource_management.core.resources.system import Directory, File
-from resource_management.core.source import InlineTemplate, Template
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions.default import default
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.version import compare_versions
-from resource_management.libraries.functions.fcntl_based_process_lock import FcntlBasedProcessLock
-from resource_management.libraries.resources.xml_config import XmlConfig
-from resource_management.libraries.script import Script
-
-
-def setup_stack_symlinks(struct_out_file):
-  """
-  Invokes <stack-selector-tool> set all against a calculated fully-qualified, "normalized" version based on a
-  stack version, such as "2.3". This should always be called after a component has been
-  installed to ensure that all HDP pointers are correct. The stack upgrade logic does not
-  interact with this since it's done via a custom command and will not trigger this hook.
-  :return:
-  """
-  import params
-  if params.upgrade_suspended:
-    Logger.warning("Skipping running stack-selector-tool because there is a suspended upgrade")
-    return
-
-  if params.host_sys_prepped:
-    Logger.warning("Skipping running stack-selector-tool becase this is a sys_prepped host. This may cause symlink pointers not to be created for HDP componets installed later on top of an already sys_prepped host.")
-    return
-
-  # get the packages which the stack-select tool should be used on
-  stack_select_packages = stack_select.get_packages(stack_select.PACKAGE_SCOPE_INSTALL)
-  if stack_select_packages is None:
-    return
-
-  json_version = load_version(struct_out_file)
-
-  if not json_version:
-    Logger.info("There is no advertised version for this component stored in {0}".format(struct_out_file))
-    return
-
-  # On parallel command execution this should be executed by a single process at a time.
-  with FcntlBasedProcessLock(params.stack_select_lock_file, enabled = params.is_parallel_execution_enabled, skip_fcntl_failures = True):
-    for package in stack_select_packages:
-      stack_select.select(package, json_version)
-
-
-def setup_config():
-  import params
-  stackversion = params.stack_version_unformatted
-  Logger.info("FS Type: {0}".format(params.dfs_type))
-
-  is_hadoop_conf_dir_present = False
-  if hasattr(params, "hadoop_conf_dir") and params.hadoop_conf_dir is not None and os.path.exists(params.hadoop_conf_dir):
-    is_hadoop_conf_dir_present = True
-  else:
-    Logger.warning("Parameter hadoop_conf_dir is missing or directory does not exist. This is expected if this host does not have any Hadoop components.")
-
-  if is_hadoop_conf_dir_present and (params.has_namenode or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS'):
-    # create core-site only if the hadoop config diretory exists
-    XmlConfig("core-site.xml",
-              conf_dir=params.hadoop_conf_dir,
-              configurations=params.config['configurations']['core-site'],
-              configuration_attributes=params.config['configuration_attributes']['core-site'],
-              owner=params.hdfs_user,
-              group=params.user_group,
-              only_if=format("ls {hadoop_conf_dir}"))
-
-  Directory(params.logsearch_logfeeder_conf,
-            mode=0755,
-            cd_access='a',
-            create_parents=True
-            )
-
-  if params.logsearch_config_file_exists:
-    File(format("{logsearch_logfeeder_conf}/" + params.logsearch_config_file_name),
-         content=Template(params.logsearch_config_file_path,extra_imports=[default])
-         )
-  else:
-    Logger.warning('No logsearch configuration exists at ' + params.logsearch_config_file_path)
-
-
-def load_version(struct_out_file):
-  """
-  Load version from file.  Made a separate method for testing
-  """
-  json_version = None
-  try:
-    if os.path.exists(struct_out_file):
-      with open(struct_out_file, 'r') as fp:
-        json_info = json.load(fp)
-        json_version = json_info['version']
-  except:
-    pass
-
-  return json_version
-  
-
-def link_configs(struct_out_file):
-  """
-  Links configs, only on a fresh install of HDP-2.3 and higher
-  """
-  import params
-
-  if not Script.is_stack_greater_or_equal("2.3"):
-    Logger.info("Can only link configs for HDP-2.3 and higher.")
-    return
-
-  json_version = load_version(struct_out_file)
-
-  if not json_version:
-    Logger.info("Could not load 'version' from {0}".format(struct_out_file))
-    return
-
-  # On parallel command execution this should be executed by a single process at a time.
-  with FcntlBasedProcessLock(params.link_configs_lock_file, enabled = params.is_parallel_execution_enabled, skip_fcntl_failures = True):
-    for k, v in conf_select.get_package_dirs().iteritems():
-      conf_select.convert_conf_directories_to_symlinks(k, json_version, v)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/files/changeToSecureUid.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/files/changeToSecureUid.sh b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/files/changeToSecureUid.sh
deleted file mode 100644
index 08542c4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/files/changeToSecureUid.sh
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-username=$1
-directories=$2
-
-function find_available_uid() {
- for ((i=1001; i<=2000; i++))
- do
-   grep -q $i /etc/passwd
-   if [ "$?" -ne 0 ]
-   then
-    newUid=$i
-    break
-   fi
- done
-}
-
-find_available_uid
-
-if [ $newUid -eq 0 ]
-then
-  echo "Failed to find Uid between 1000 and 2000"
-  exit 1
-fi
-
-set -e
-
-dir_array=($(echo $directories | sed 's/,/\n/g'))
-old_uid=$(id -u $username)
-sudo_prefix="/var/lib/ambari-agent/ambari-sudo.sh -H -E"
-echo "Changing uid of $username from $old_uid to $newUid"
-echo "Changing directory permisions for ${dir_array[@]}"
-$sudo_prefix usermod -u $newUid $username && for dir in ${dir_array[@]} ; do ls $dir 2> /dev/null && echo "Changing permission for $dir" && $sudo_prefix chown -Rh $newUid $dir ; done
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/hook.py
deleted file mode 100644
index c34be0b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/hook.py
+++ /dev/null
@@ -1,36 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from shared_initialization import *
-
-class BeforeAnyHook(Hook):
-
-  def hook(self, env):
-    import params
-    env.set_params(params)
-
-    setup_users()
-    if params.has_namenode or params.dfs_type == 'HCFS':
-      setup_hadoop_env()
-    setup_java()
-
-if __name__ == "__main__":
-  BeforeAnyHook().execute()
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/params.py
deleted file mode 100644
index cee0519..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/params.py
+++ /dev/null
@@ -1,254 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import collections
-import re
-import os
-import ast
-
-import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
-
-from resource_management.libraries.script import Script
-from resource_management.libraries.functions import default
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import format_jvm_option
-from resource_management.libraries.functions.is_empty import is_empty
-from resource_management.libraries.functions.version import format_stack_version
-from resource_management.libraries.functions.version import compare_versions
-from resource_management.libraries.functions.expect import expect
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.stack_features import get_stack_feature_version
-from ambari_commons.os_check import OSCheck
-from ambari_commons.constants import AMBARI_SUDO_BINARY
-
-
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-dfs_type = default("/commandParams/dfs_type", "")
-stack_root = Script.get_stack_root()
-
-artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
-jdk_name = default("/hostLevelParams/jdk_name", None)
-java_home = config['hostLevelParams']['java_home']
-java_version = expect("/hostLevelParams/java_version", int)
-jdk_location = config['hostLevelParams']['jdk_location']
-
-sudo = AMBARI_SUDO_BINARY
-
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-stack_version_formatted = format_stack_version(stack_version_unformatted)
-
-upgrade_type = Script.get_upgrade_type(default("/commandParams/upgrade_type", ""))
-version = default("/commandParams/version", None)
-# Handle upgrade and downgrade
-if (upgrade_type is not None) and version:
-  stack_version_formatted = format_stack_version(version)
-
-ambari_java_home = default("/commandParams/ambari_java_home", None)
-ambari_jdk_name = default("/commandParams/ambari_jdk_name", None)
-
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-
-# Some datanode settings
-dfs_dn_addr = default('/configurations/hdfs-site/dfs.datanode.address', None)
-dfs_dn_http_addr = default('/configurations/hdfs-site/dfs.datanode.http.address', None)
-dfs_dn_https_addr = default('/configurations/hdfs-site/dfs.datanode.https.address', None)
-dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None)
-secure_dn_ports_are_in_use = False
-
-def get_port(address):
-  """
-  Extracts port from the address like 0.0.0.0:1019
-  """
-  if address is None:
-    return None
-  m = re.search(r'(?:http(?:s)?://)?([\w\d.]*):(\d{1,5})', address)
-  if m is not None:
-    return int(m.group(2))
-  else:
-    return None
-
-def is_secure_port(port):
-  """
-  Returns True if port is root-owned at *nix systems
-  """
-  if port is not None:
-    return port < 1024
-  else:
-    return False
-
-# hadoop default params
-mapreduce_libs_path = format("{stack_root}/current/hadoop-mapreduce-client/*")
-
-# upgrades would cause these directories to have a version instead of "current"
-# which would cause a lot of problems when writing out hadoop-env.sh; instead
-# force the use of "current" in the hook
-hdfs_user_nofile_limit = default("/configurations/hadoop-env/hdfs_user_nofile_limit", "128000")
-hadoop_home = stack_select.get_hadoop_dir("home")
-hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
-
-hadoop_conf_empty_dir = None
-hadoop_secure_dn_user = hdfs_user
-hadoop_dir = "/etc/hadoop"
-versioned_stack_root = format('{stack_root}/current')
-hadoop_java_io_tmpdir = os.path.join(tmp_dir, "hadoop_java_io_tmpdir")
-datanode_max_locked_memory = config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory']
-is_datanode_max_locked_memory_set = not is_empty(config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory'])
-
-if not security_enabled:
-  hadoop_secure_dn_user = '""'
-else:
-  dfs_dn_port = get_port(dfs_dn_addr)
-  dfs_dn_http_port = get_port(dfs_dn_http_addr)
-  dfs_dn_https_port = get_port(dfs_dn_https_addr)
-  # We try to avoid inability to start datanode as a plain user due to usage of root-owned ports
-  if dfs_http_policy == "HTTPS_ONLY":
-    secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_https_port)
-  elif dfs_http_policy == "HTTP_AND_HTTPS":
-    secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port) or is_secure_port(dfs_dn_https_port)
-  else:   # params.dfs_http_policy == "HTTP_ONLY" or not defined:
-    secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port)
-  if secure_dn_ports_are_in_use:
-    hadoop_secure_dn_user = hdfs_user
-  else:
-    hadoop_secure_dn_user = '""'
-
-#hadoop params
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
-hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
-
-jsvc_path = "/usr/lib/bigtop-utils"
-
-hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
-namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
-namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
-
-jtnode_opt_newsize = "200m"
-jtnode_opt_maxnewsize = "200m"
-jtnode_heapsize =  "1024m"
-ttnode_heapsize = "1024m"
-
-dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
-nfsgateway_heapsize = config['configurations']['hadoop-env']['nfsgateway_heapsize']
-mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
-hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
-
-#users and groups
-hbase_user = config['configurations']['hbase-env']['hbase_user']
-smoke_user =  config['configurations']['cluster-env']['smokeuser']
-gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
-gmond_user = config['configurations']['ganglia-env']["gmond_user"]
-tez_user = config['configurations']['tez-env']["tez_user"]
-oozie_user = config['configurations']['oozie-env']["oozie_user"]
-falcon_user = config['configurations']['falcon-env']["falcon_user"]
-ranger_user = config['configurations']['ranger-env']["ranger_user"]
-zeppelin_user = config['configurations']['zeppelin-env']["zeppelin_user"]
-zeppelin_group = config['configurations']['zeppelin-env']["zeppelin_group"]
-
-user_group = config['configurations']['cluster-env']['user_group']
-
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-falcon_server_hosts = default("/clusterHostInfo/falcon_server_hosts", [])
-ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
-zeppelin_master_hosts = default("/clusterHostInfo/zeppelin_master_hosts", [])
-
-# get the correct version to use for checking stack features
-version_for_stack_feature_checks = get_stack_feature_version(config)
-
-has_namenode = not len(namenode_host) == 0
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-has_tez = 'tez-site' in config['configurations']
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_oozie_server = not len(oozie_servers) == 0
-has_falcon_server_hosts = not len(falcon_server_hosts) == 0
-has_ranger_admin = not len(ranger_admin_hosts) == 0
-has_zeppelin_master = not len(zeppelin_master_hosts) == 0
-stack_supports_zk_security = check_stack_feature(StackFeature.SECURE_ZOOKEEPER, version_for_stack_feature_checks)
-
-# HDFS High Availability properties
-dfs_ha_enabled = False
-dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.internal.nameservices', None)
-if dfs_ha_nameservices is None:
-  dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.nameservices', None)
-dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
-if dfs_ha_namenode_ids:
-  dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
-  dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
-  if dfs_ha_namenode_ids_array_len > 1:
-    dfs_ha_enabled = True
-
-
-if has_namenode or dfs_type == 'HCFS':
-    hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-    hadoop_conf_secure_dir = os.path.join(hadoop_conf_dir, "secure")
-
-hbase_tmp_dir = "/tmp/hbase-hbase"
-
-proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
-ranger_group = config['configurations']['ranger-env']['ranger_group']
-dfs_cluster_administrators_group = config['configurations']['hdfs-site']["dfs.cluster.administrators"]
-
-sysprep_skip_create_users_and_groups = default("/configurations/cluster-env/sysprep_skip_create_users_and_groups", False)
-ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
-fetch_nonlocal_groups = config['configurations']['cluster-env']["fetch_nonlocal_groups"]
-
-smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
-if has_hbase_masters:
-  hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
-#repo params
-repo_info = config['hostLevelParams']['repo_info']
-service_repo_info = default("/hostLevelParams/service_repo_info",None)
-
-user_to_groups_dict = {}
-
-#Append new user-group mapping to the dict
-try:
-  user_group_map = ast.literal_eval(config['hostLevelParams']['user_groups'])
-  for key in user_group_map.iterkeys():
-    user_to_groups_dict[key] = user_group_map[key]
-except ValueError:
-  print('User Group mapping (user_group) is missing in the hostLevelParams')
-
-user_to_gid_dict = collections.defaultdict(lambda:user_group)
-
-user_list = json.loads(config['hostLevelParams']['user_list'])
-group_list = json.loads(config['hostLevelParams']['group_list'])
-host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
-
-tez_am_view_acls = config['configurations']['tez-site']["tez.am.view-acls"]
-override_uid = str(default("/configurations/cluster-env/override_uid", "true")).lower()
-
-# if NN HA on secure clutser, access Zookeper securely
-if stack_supports_zk_security and dfs_ha_enabled and security_enabled:
-  hadoop_zkfc_opts=format("-Dzookeeper.sasl.client=true -Dzookeeper.sasl.client.username=zookeeper -Djava.security.auth.login.config={hadoop_conf_secure_dir}/hdfs_jaas.conf -Dzookeeper.sasl.clientconfig=Client")

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py
deleted file mode 100644
index dbd1727..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py
+++ /dev/null
@@ -1,239 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-import re
-import getpass
-import tempfile
-from copy import copy
-from resource_management.libraries.functions.version import compare_versions
-from resource_management import *
-
-def setup_users():
-  """
-  Creates users before cluster installation
-  """
-  import params
-
-  should_create_users_and_groups = False
-  if params.host_sys_prepped:
-    should_create_users_and_groups = not params.sysprep_skip_create_users_and_groups
-  else:
-    should_create_users_and_groups = not params.ignore_groupsusers_create
-
-  if should_create_users_and_groups:
-    for group in params.group_list:
-      Group(group,
-      )
-
-    for user in params.user_list:
-      User(user,
-          gid = params.user_to_gid_dict[user],
-          groups = params.user_to_groups_dict[user],
-          fetch_nonlocal_groups = params.fetch_nonlocal_groups
-      )
-
-    if params.override_uid == "true":
-      set_uid(params.smoke_user, params.smoke_user_dirs)
-    else:
-      Logger.info('Skipping setting uid for smoke user as host is sys prepped')
-  else:
-    Logger.info('Skipping creation of User and Group as host is sys prepped or ignore_groupsusers_create flag is on')
-    pass
-
-
-  if params.has_hbase_masters:
-    Directory (params.hbase_tmp_dir,
-               owner = params.hbase_user,
-               mode=0775,
-               create_parents = True,
-               cd_access="a",
-    )
-    if params.override_uid == "true":
-      set_uid(params.hbase_user, params.hbase_user_dirs)
-    else:
-      Logger.info('Skipping setting uid for hbase user as host is sys prepped')
-
-  if should_create_users_and_groups:
-    if params.has_namenode:
-      create_dfs_cluster_admins()
-    if params.has_tez and params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.3') >= 0:
-      create_tez_am_view_acls()
-  else:
-    Logger.info('Skipping setting dfs cluster admin and tez view acls as host is sys prepped')
-
-def create_dfs_cluster_admins():
-  """
-  dfs.cluster.administrators support format <comma-delimited list of usernames><space><comma-delimited list of group names>
-  """
-  import params
-
-  groups_list = create_users_and_groups(params.dfs_cluster_administrators_group)
-
-  User(params.hdfs_user,
-    groups = params.user_to_groups_dict[params.hdfs_user] + groups_list,
-          fetch_nonlocal_groups = params.fetch_nonlocal_groups
-  )
-
-def create_tez_am_view_acls():
-
-  """
-  tez.am.view-acls support format <comma-delimited list of usernames><space><comma-delimited list of group names>
-  """
-  import params
-
-  if not params.tez_am_view_acls.startswith("*"):
-    create_users_and_groups(params.tez_am_view_acls)
-
-def create_users_and_groups(user_and_groups):
-
-  import params
-
-  parts = re.split('\s', user_and_groups)
-  if len(parts) == 1:
-    parts.append("")
-
-  users_list = parts[0].strip(",").split(",") if parts[0] else []
-  groups_list = parts[1].strip(",").split(",") if parts[1] else []
-
-  if users_list:
-    User(users_list,
-          fetch_nonlocal_groups = params.fetch_nonlocal_groups
-    )
-
-  if groups_list:
-    Group(copy(groups_list),
-    )
-  return groups_list
-    
-def set_uid(user, user_dirs):
-  """
-  user_dirs - comma separated directories
-  """
-  import params
-
-  File(format("{tmp_dir}/changeUid.sh"),
-       content=StaticFile("changeToSecureUid.sh"),
-       mode=0555)
-  ignore_groupsusers_create_str = str(params.ignore_groupsusers_create).lower()
-  Execute(format("{tmp_dir}/changeUid.sh {user} {user_dirs}"),
-          not_if = format("(test $(id -u {user}) -gt 1000) || ({ignore_groupsusers_create_str})"))
-    
-def setup_hadoop_env():
-  import params
-  stackversion = params.stack_version_unformatted
-  Logger.info("FS Type: {0}".format(params.dfs_type))
-  if params.has_namenode or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS':
-    if params.security_enabled:
-      tc_owner = "root"
-    else:
-      tc_owner = params.hdfs_user
-
-    # create /etc/hadoop
-    Directory(params.hadoop_dir, mode=0755)
-
-    # HDP < 2.2 used a conf -> conf.empty symlink for /etc/hadoop/
-    if Script.is_stack_less_than("2.2"):
-      Directory(params.hadoop_conf_empty_dir, create_parents = True, owner="root",
-        group=params.user_group )
-
-      Link(params.hadoop_conf_dir, to=params.hadoop_conf_empty_dir,
-         not_if=format("ls {hadoop_conf_dir}"))
-
-    # write out hadoop-env.sh, but only if the directory exists
-    if os.path.exists(params.hadoop_conf_dir):
-      File(os.path.join(params.hadoop_conf_dir, 'hadoop-env.sh'), owner=tc_owner,
-        group=params.user_group,
-        content=InlineTemplate(params.hadoop_env_sh_template))
-
-    # Create tmp dir for java.io.tmpdir
-    # Handle a situation when /tmp is set to noexec
-    Directory(params.hadoop_java_io_tmpdir,
-              owner=params.hdfs_user,
-              group=params.user_group,
-              mode=01777
-    )
-
-def setup_java():
-  """
-  Install jdk using specific params.
-  Install ambari jdk as well if the stack and ambari jdk are different.
-  """
-  import params
-  __setup_java(custom_java_home=params.java_home, custom_jdk_name=params.jdk_name)
-  if params.ambari_java_home and params.ambari_java_home != params.java_home:
-    __setup_java(custom_java_home=params.ambari_java_home, custom_jdk_name=params.ambari_jdk_name)
-
-def __setup_java(custom_java_home, custom_jdk_name):
-  """
-  Installs jdk using specific params, that comes from ambari-server
-  """
-  import params
-  java_exec = format("{custom_java_home}/bin/java")
-
-  if not os.path.isfile(java_exec):
-    if not params.jdk_name: # if custom jdk is used.
-      raise Fail(format("Unable to access {java_exec}. Confirm you have copied jdk to this host."))
-
-    jdk_curl_target = format("{tmp_dir}/{custom_jdk_name}")
-    java_dir = os.path.dirname(params.java_home)
-
-    Directory(params.artifact_dir,
-              create_parents = True,
-              )
-
-    File(jdk_curl_target,
-         content = DownloadSource(format("{jdk_location}/{custom_jdk_name}")),
-         not_if = format("test -f {jdk_curl_target}")
-         )
-
-    File(jdk_curl_target,
-         mode = 0755,
-         )
-
-    tmp_java_dir = tempfile.mkdtemp(prefix="jdk_tmp_", dir=params.tmp_dir)
-
-    try:
-      if params.jdk_name.endswith(".bin"):
-        chmod_cmd = ("chmod", "+x", jdk_curl_target)
-        install_cmd = format("cd {tmp_java_dir} && echo A | {jdk_curl_target} -noregister && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
-      elif params.jdk_name.endswith(".gz"):
-        chmod_cmd = ("chmod","a+x", java_dir)
-        install_cmd = format("cd {tmp_java_dir} && tar -xf {jdk_curl_target} && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
-
-      Directory(java_dir
-                )
-
-      Execute(chmod_cmd,
-              sudo = True,
-              )
-
-      Execute(install_cmd,
-              )
-
-    finally:
-      Directory(tmp_java_dir, action="delete")
-
-    File(format("{custom_java_home}/bin/java"),
-         mode=0755,
-         cd_access="a",
-         )
-    Execute(('chmod', '-R', '755', params.java_home),
-            sudo = True,
-            )

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/hook.py
deleted file mode 100644
index ce17776..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/hook.py
+++ /dev/null
@@ -1,37 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from shared_initialization import *
-from repo_initialization import *
-
-class BeforeInstallHook(Hook):
-
-  def hook(self, env):
-    import params
-
-    self.run_custom_hook('before-ANY')
-    env.set_params(params)
-    
-    install_repos()
-    install_packages()
-
-if __name__ == "__main__":
-  BeforeInstallHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/params.py
deleted file mode 100644
index 50c5a40..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/params.py
+++ /dev/null
@@ -1,115 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from ambari_commons.constants import AMBARI_SUDO_BINARY
-from resource_management.libraries.functions.version import format_stack_version, compare_versions
-from resource_management.core.system import System
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import default, format
-from resource_management.libraries.functions.expect import expect
-
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-sudo = AMBARI_SUDO_BINARY
-
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability']
-agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int)
-stack_version_formatted = format_stack_version(stack_version_unformatted)
-
-#users and groups
-hbase_user = config['configurations']['hbase-env']['hbase_user']
-smoke_user =  config['configurations']['cluster-env']['smokeuser']
-gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
-gmond_user = config['configurations']['ganglia-env']["gmond_user"]
-tez_user = config['configurations']['tez-env']["tez_user"]
-
-user_group = config['configurations']['cluster-env']['user_group']
-proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
-
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-
-# repo templates
-repo_rhel_suse =  config['configurations']['cluster-env']['repo_suse_rhel_template']
-repo_ubuntu =  config['configurations']['cluster-env']['repo_ubuntu_template']
-
-#hosts
-hostname = config["hostname"]
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
-hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-storm_server_hosts = default("/clusterHostInfo/nimbus_hosts", [])
-falcon_host =  default('/clusterHostInfo/falcon_server_hosts', [])
-
-has_sqoop_client = 'sqoop-env' in config['configurations']
-has_namenode = not len(namenode_host) == 0
-has_hs = not len(hs_host) == 0
-has_resourcemanager = not len(rm_host) == 0
-has_slaves = not len(slave_hosts) == 0
-has_oozie_server = not len(oozie_servers)  == 0
-has_hcat_server_host = not len(hcat_server_hosts)  == 0
-has_hive_server_host = not len(hive_server_host)  == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_zk_host = not len(zk_hosts) == 0
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-has_storm_server = not len(storm_server_hosts) == 0
-has_falcon_server = not len(falcon_host) == 0
-has_tez = 'tez-site' in config['configurations']
-
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-if has_ganglia_server:
-  ganglia_server_host = ganglia_server_hosts[0]
-
-hbase_tmp_dir = "/tmp/hbase-hbase"
-
-#security params
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-
-#java params
-java_home = config['hostLevelParams']['java_home']
-artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
-jdk_name = default("/hostLevelParams/jdk_name", None) # None when jdk is already installed by user
-jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
-jce_location = config['hostLevelParams']['jdk_location']
-jdk_location = config['hostLevelParams']['jdk_location']
-ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
-host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
-
-smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
-if has_hbase_masters:
-  hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
-#repo params
-repo_info = config['hostLevelParams']['repo_info']
-service_repo_info = default("/hostLevelParams/service_repo_info",None)
-
-repo_file = default("/repositoryFile", None)

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/repo_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/repo_initialization.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/repo_initialization.py
deleted file mode 100644
index 357bc62..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/repo_initialization.py
+++ /dev/null
@@ -1,76 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from ambari_commons.os_check import OSCheck
-from resource_management.libraries.resources.repository import Repository
-from resource_management.libraries.functions.repository_util import create_repo_files, CommandRepository
-from resource_management.core.logger import Logger
-import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
-
-# components_lits = repoName + postfix
-_UBUNTU_REPO_COMPONENTS_POSTFIX = ["main"]
-
-def _alter_repo(action, repo_string, repo_template):
-  """
-  @param action: "delete" or "create"
-  @param repo_string: e.g. "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0._\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]"
-  """
-  repo_dicts = json.loads(repo_string)
-
-  if not isinstance(repo_dicts, list):
-    repo_dicts = [repo_dicts]
-
-  if 0 == len(repo_dicts):
-    Logger.info("Repository list is empty. Ambari may not be managing the repositories.")
-  else:
-    Logger.info("Initializing {0} repositories".format(str(len(repo_dicts))))
-
-  for repo in repo_dicts:
-    if not 'baseUrl' in repo:
-      repo['baseUrl'] = None
-    if not 'mirrorsList' in repo:
-      repo['mirrorsList'] = None
-    
-    ubuntu_components = [ repo['repoName'] ] + _UBUNTU_REPO_COMPONENTS_POSTFIX
-    
-    Repository(repo['repoId'],
-               action = action,
-               base_url = repo['baseUrl'],
-               mirror_list = repo['mirrorsList'],
-               repo_file_name = repo['repoName'],
-               repo_template = repo_template,
-               components = ubuntu_components, # ubuntu specific
-    )
-
-def install_repos():
-  import params
-  if params.host_sys_prepped:
-    return
-
-  template = params.repo_rhel_suse if OSCheck.is_suse_family() or OSCheck.is_redhat_family() else params.repo_ubuntu
-
-  # use this newer way of specifying repositories, if available
-  if params.repo_file is not None:
-    create_repo_files(template, CommandRepository(params.repo_file))
-    return
-
-  _alter_repo("create", params.repo_info, template)
-
-  if params.service_repo_info:
-    _alter_repo("create", params.service_repo_info, template)

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/shared_initialization.py
deleted file mode 100644
index 1609050..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/shared_initialization.py
+++ /dev/null
@@ -1,37 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-
-from resource_management.libraries.functions import stack_tools
-from resource_management.libraries.functions.version import compare_versions
-from resource_management.core.resources.packaging import Package
-
-def install_packages():
-  import params
-  if params.host_sys_prepped:
-    return
-
-  packages = ['unzip', 'curl']
-  if params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.2') >= 0:
-    stack_selector_package = stack_tools.get_stack_tool_package(stack_tools.STACK_SELECTOR_NAME)
-    packages.append(stack_selector_package)
-  Package(packages,
-          retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
-          retry_count=params.agent_stack_retry_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-RESTART/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-RESTART/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-RESTART/scripts/hook.py
deleted file mode 100644
index 14b9d99..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-RESTART/scripts/hook.py
+++ /dev/null
@@ -1,29 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-class BeforeRestartHook(Hook):
-
-  def hook(self, env):
-    self.run_custom_hook('before-START')
-
-if __name__ == "__main__":
-  BeforeRestartHook().execute()
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/checkForFormat.sh b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/checkForFormat.sh
deleted file mode 100644
index 68aa96d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/checkForFormat.sh
+++ /dev/null
@@ -1,65 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-export hdfs_user=$1
-shift
-export conf_dir=$1
-shift
-export bin_dir=$1
-shift
-export mark_dir=$1
-shift
-export name_dirs=$*
-
-export EXIT_CODE=0
-export command="namenode -format"
-export list_of_non_empty_dirs=""
-
-mark_file=/var/run/hadoop/hdfs/namenode-formatted
-if [[ -f ${mark_file} ]] ; then
-  /var/lib/ambari-agent/ambari-sudo.sh rm -f ${mark_file}
-  /var/lib/ambari-agent/ambari-sudo.sh mkdir -p ${mark_dir}
-fi
-
-if [[ ! -d $mark_dir ]] ; then
-  for dir in `echo $name_dirs | tr ',' ' '` ; do
-    echo "NameNode Dirname = $dir"
-    cmd="ls $dir | wc -l  | grep -q ^0$"
-    eval $cmd
-    if [[ $? -ne 0 ]] ; then
-      (( EXIT_CODE = $EXIT_CODE + 1 ))
-      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
-    fi
-  done
-
-  if [[ $EXIT_CODE == 0 ]] ; then
-    /var/lib/ambari-agent/ambari-sudo.sh su ${hdfs_user} - -s /bin/bash -c "export PATH=$PATH:$bin_dir ; yes Y | hdfs --config ${conf_dir} ${command}"
-    (( EXIT_CODE = $EXIT_CODE | $? ))
-  else
-    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
-  fi
-else
-  echo "${mark_dir} exists. Namenode DFS already formatted"
-fi
-
-exit $EXIT_CODE
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/fast-hdfs-resource.jar
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/fast-hdfs-resource.jar b/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/fast-hdfs-resource.jar
deleted file mode 100644
index 6c993bf..0000000
Binary files a/ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/fast-hdfs-resource.jar and /dev/null differ


[14/50] [abbrv] ambari git commit: AMBARI-22104. Refactor existing server side actions to use the common AbstractUpgradeServerAction. Remove unused imports (dlysnichenko)

Posted by jl...@apache.org.
AMBARI-22104. Refactor existing server side actions to use the common AbstractUpgradeServerAction. Remove unused imports (dlysnichenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/67396ba0
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/67396ba0
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/67396ba0

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 67396ba0404319c11094b7376956ab19cfe36341
Parents: 11707ba
Author: Lisnichenko Dmitro <dl...@hortonworks.com>
Authored: Tue Oct 3 20:47:36 2017 +0300
Committer: Lisnichenko Dmitro <dl...@hortonworks.com>
Committed: Tue Oct 3 20:47:36 2017 +0300

----------------------------------------------------------------------
 .../serveraction/upgrades/RangerWebAlertConfigActionTest.java     | 3 ---
 1 file changed, 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/67396ba0/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerWebAlertConfigActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerWebAlertConfigActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerWebAlertConfigActionTest.java
index 7a1831c..f9fc0aa 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerWebAlertConfigActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerWebAlertConfigActionTest.java
@@ -19,7 +19,6 @@
 package org.apache.ambari.server.serveraction.upgrades;
 
 
-import static org.easymock.EasyMock.anyObject;
 import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.replay;
 
@@ -49,8 +48,6 @@ import org.junit.runner.RunWith;
 import org.mockito.Mockito;
 import org.mockito.runners.MockitoJUnitRunner;
 
-import com.google.inject.Injector;
-
 import junit.framework.Assert;
 
 


[09/50] [abbrv] ambari git commit: AMBARI-22095 Make hooks stack agnostic (dsen)

Posted by jl...@apache.org.
AMBARI-22095 Make hooks stack agnostic (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/5b36cdfd
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/5b36cdfd
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/5b36cdfd

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 5b36cdfd87b756eba922dfd1ac5419552f4d375f
Parents: 05c7067
Author: Dmytro Sen <ds...@apache.org>
Authored: Tue Oct 3 15:45:22 2017 +0300
Committer: Dmytro Sen <ds...@apache.org>
Committed: Tue Oct 3 15:45:22 2017 +0300

----------------------------------------------------------------------
 ambari-agent/pom.xml                            |   4 +-
 .../src/main/python/ambari_agent/FileCache.py   |   5 +-
 .../test/python/ambari_agent/TestFileCache.py   |   4 +-
 ambari-server/pom.xml                           |   2 +
 ambari-server/src/main/assemblies/server.xml    |  20 +-
 .../actionmanager/ExecutionCommandWrapper.java  |   3 +-
 .../ambari/server/agent/HeartbeatMonitor.java   |   3 +-
 .../internal/UpgradeResourceProvider.java       |   3 +-
 .../ambari/server/stack/StackDirectory.java     |  29 +-
 .../ambari/server/stack/StackManager.java       |   5 +
 .../apache/ambari/server/stack/StackModule.java |   5 -
 .../apache/ambari/server/state/StackInfo.java   |   8 -
 .../python/ambari_server/resourceFilesKeeper.py |   7 +-
 .../python/ambari_server/serverConfiguration.py |   2 +-
 .../main/python/ambari_server/setupMpacks.py    |   2 +-
 .../main/resources/scripts/Ambaripreupload.py   |   4 +-
 .../scripts/post-user-creation-hook.sh          |   2 +-
 .../stack-hooks/after-INSTALL/scripts/hook.py   |  37 ++
 .../stack-hooks/after-INSTALL/scripts/params.py | 108 ++++++
 .../scripts/shared_initialization.py            | 132 +++++++
 .../before-ANY/files/changeToSecureUid.sh       |  64 ++++
 .../stack-hooks/before-ANY/scripts/hook.py      |  36 ++
 .../stack-hooks/before-ANY/scripts/params.py    | 254 +++++++++++++
 .../before-ANY/scripts/shared_initialization.py | 273 +++++++++++++
 .../stack-hooks/before-INSTALL/scripts/hook.py  |  37 ++
 .../before-INSTALL/scripts/params.py            | 115 ++++++
 .../scripts/repo_initialization.py              |  75 ++++
 .../scripts/shared_initialization.py            |  37 ++
 .../stack-hooks/before-RESTART/scripts/hook.py  |  29 ++
 .../before-START/files/checkForFormat.sh        |  65 ++++
 .../before-START/files/fast-hdfs-resource.jar   | Bin 0 -> 28296600 bytes
 .../before-START/files/task-log4j.properties    | 134 +++++++
 .../before-START/files/topology_script.py       |  66 ++++
 .../before-START/scripts/custom_extensions.py   | 173 +++++++++
 .../stack-hooks/before-START/scripts/hook.py    |  43 +++
 .../stack-hooks/before-START/scripts/params.py  | 380 +++++++++++++++++++
 .../before-START/scripts/rack_awareness.py      |  48 +++
 .../scripts/shared_initialization.py            | 256 +++++++++++++
 .../templates/commons-logging.properties.j2     |  43 +++
 .../templates/exclude_hosts_list.j2             |  21 +
 .../templates/hadoop-metrics2.properties.j2     | 107 ++++++
 .../before-START/templates/health_check.j2      |  81 ++++
 .../templates/include_hosts_list.j2             |  21 +
 .../templates/topology_mappings.data.j2         |  24 ++
 .../2.0.6/hooks/after-INSTALL/scripts/hook.py   |  37 --
 .../2.0.6/hooks/after-INSTALL/scripts/params.py | 108 ------
 .../scripts/shared_initialization.py            | 132 -------
 .../hooks/before-ANY/files/changeToSecureUid.sh |  64 ----
 .../HDP/2.0.6/hooks/before-ANY/scripts/hook.py  |  36 --
 .../2.0.6/hooks/before-ANY/scripts/params.py    | 254 -------------
 .../before-ANY/scripts/shared_initialization.py | 273 -------------
 .../2.0.6/hooks/before-INSTALL/scripts/hook.py  |  37 --
 .../hooks/before-INSTALL/scripts/params.py      | 115 ------
 .../scripts/repo_initialization.py              |  75 ----
 .../scripts/shared_initialization.py            |  37 --
 .../2.0.6/hooks/before-RESTART/scripts/hook.py  |  29 --
 .../hooks/before-START/files/checkForFormat.sh  |  65 ----
 .../before-START/files/fast-hdfs-resource.jar   | Bin 28296600 -> 0 bytes
 .../before-START/files/task-log4j.properties    | 134 -------
 .../hooks/before-START/files/topology_script.py |  66 ----
 .../before-START/scripts/custom_extensions.py   | 173 ---------
 .../2.0.6/hooks/before-START/scripts/hook.py    |  43 ---
 .../2.0.6/hooks/before-START/scripts/params.py  | 380 -------------------
 .../before-START/scripts/rack_awareness.py      |  48 ---
 .../scripts/shared_initialization.py            | 256 -------------
 .../templates/commons-logging.properties.j2     |  43 ---
 .../templates/exclude_hosts_list.j2             |  21 -
 .../templates/hadoop-metrics2.properties.j2     | 107 ------
 .../before-START/templates/health_check.j2      |  81 ----
 .../templates/include_hosts_list.j2             |  21 -
 .../templates/topology_mappings.data.j2         |  24 --
 .../services/ECS/package/scripts/ecs_client.py  |   2 +-
 .../HDP/3.0/hooks/after-INSTALL/scripts/hook.py |  37 --
 .../3.0/hooks/after-INSTALL/scripts/params.py   | 109 ------
 .../scripts/shared_initialization.py            | 140 -------
 .../hooks/before-ANY/files/changeToSecureUid.sh |  53 ---
 .../HDP/3.0/hooks/before-ANY/scripts/hook.py    |  36 --
 .../HDP/3.0/hooks/before-ANY/scripts/params.py  | 254 -------------
 .../before-ANY/scripts/shared_initialization.py | 239 ------------
 .../3.0/hooks/before-INSTALL/scripts/hook.py    |  37 --
 .../3.0/hooks/before-INSTALL/scripts/params.py  | 115 ------
 .../scripts/repo_initialization.py              |  76 ----
 .../scripts/shared_initialization.py            |  37 --
 .../3.0/hooks/before-RESTART/scripts/hook.py    |  29 --
 .../hooks/before-START/files/checkForFormat.sh  |  65 ----
 .../before-START/files/fast-hdfs-resource.jar   | Bin 28296600 -> 0 bytes
 .../before-START/files/task-log4j.properties    | 134 -------
 .../hooks/before-START/files/topology_script.py |  66 ----
 .../HDP/3.0/hooks/before-START/scripts/hook.py  |  40 --
 .../3.0/hooks/before-START/scripts/params.py    | 364 ------------------
 .../before-START/scripts/rack_awareness.py      |  47 ---
 .../scripts/shared_initialization.py            | 249 ------------
 .../templates/commons-logging.properties.j2     |  43 ---
 .../templates/exclude_hosts_list.j2             |  21 -
 .../templates/hadoop-metrics2.properties.j2     | 107 ------
 .../before-START/templates/health_check.j2      |  81 ----
 .../templates/include_hosts_list.j2             |  21 -
 .../templates/topology_mappings.data.j2         |  24 --
 .../server/api/services/AmbariMetaInfoTest.java |  19 -
 .../src/test/python/TestResourceFilesKeeper.py  |   1 +
 .../hooks/after-INSTALL/test_after_install.py   |  26 +-
 .../2.0.6/hooks/before-ANY/test_before_any.py   |   5 +-
 .../hooks/before-INSTALL/test_before_install.py |  14 +-
 .../hooks/before-START/test_before_start.py     |  21 +-
 .../src/test/python/stacks/utils/RMFTestCase.py |   8 +
 .../src/main/assemblies/hdf-ambari-mpack.xml    |   1 +
 106 files changed, 2766 insertions(+), 5181 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-agent/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-agent/pom.xml b/ambari-agent/pom.xml
index 8673f2e..eede374 100644
--- a/ambari-agent/pom.xml
+++ b/ambari-agent/pom.xml
@@ -318,8 +318,7 @@
                     <include>/cred/lib/*.jar</include>
                     <include>/tools/*.jar</include>
                     <include>/cache/stacks/HDP/2.1.GlusterFS/services/STORM/package/files/wordCount.jar</include>
-                    <include>/cache/stacks/HDP/2.0.6/hooks/before-START/files/fast-hdfs-resource.jar</include>
-                    <include>/cache/stacks/HDP/3.0/hooks/before-START/files/fast-hdfs-resource.jar</include>
+                    <include>/cache/stack-hooks/before-START/files/fast-hdfs-resource.jar</include>
                     <include>/cache/common-services/STORM/0.9.1/package/files/wordCount.jar</include>
                   </includes>
                 </source>
@@ -487,6 +486,7 @@
                   <directory>${resourcesFolder}</directory>
                   <includes>
                     <include>common-services/**</include>
+                    <include>stack-hooks/**</include>
                     <include>stacks/stack_advisor.py</include>
                     <include>stacks/${stack.distribution}/**/*</include>
                   </includes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-agent/src/main/python/ambari_agent/FileCache.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/FileCache.py b/ambari-agent/src/main/python/ambari_agent/FileCache.py
index d0c8bdb..28912d1 100644
--- a/ambari-agent/src/main/python/ambari_agent/FileCache.py
+++ b/ambari-agent/src/main/python/ambari_agent/FileCache.py
@@ -83,11 +83,10 @@ class FileCache():
     Returns a base directory for hooks
     """
     try:
-      hooks_subpath = command['commandParams']['hooks_folder']
+      hooks_path = command['commandParams']['hooks_folder']
     except KeyError:
       return None
-    subpath = os.path.join(self.STACKS_CACHE_DIRECTORY, hooks_subpath)
-    return self.provide_directory(self.cache_dir, subpath,
+    return self.provide_directory(self.cache_dir, hooks_path,
                                   server_url_prefix)
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-agent/src/test/python/ambari_agent/TestFileCache.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestFileCache.py b/ambari-agent/src/test/python/ambari_agent/TestFileCache.py
index 00f6b69..68cc8d9 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestFileCache.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestFileCache.py
@@ -93,7 +93,7 @@ class TestFileCache(TestCase):
     # Check existing dir case
     command = {
       'commandParams' : {
-        'hooks_folder' : os.path.join('HDP', '2.1.1', 'hooks')
+        'hooks_folder' : 'stack-hooks'
       }
     }
     provide_directory_mock.return_value = "dummy value"
@@ -103,7 +103,7 @@ class TestFileCache(TestCase):
       pprint.pformat(provide_directory_mock.call_args_list[0][0]),
       "('/var/lib/ambari-agent/cache', "
       "{0}, "
-      "'server_url_pref')".format(pprint.pformat(os.path.join('stacks','HDP', '2.1.1', 'hooks'))))
+      "'server_url_pref')".format(pprint.pformat('stack-hooks')))
     self.assertEquals(res, "dummy value")
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-server/pom.xml b/ambari-server/pom.xml
index 3d22044..e250da7 100644
--- a/ambari-server/pom.xml
+++ b/ambari-server/pom.xml
@@ -44,6 +44,7 @@
     <customActionsRoot>src/main/resources/custom_actions</customActionsRoot>
     <ambariProperties>conf/unix/ambari.properties</ambariProperties>
     <commonServicesSrcLocation>src/main/resources/common-services</commonServicesSrcLocation>
+    <stackHooksLocation>src/main/resources/stack-hooks</stackHooksLocation>
     <stacksSrcLocation>src/main/resources/stacks/${stack.distribution}</stacksSrcLocation>
     <tarballResourcesFolder>src/main/resources</tarballResourcesFolder>
     <skipPythonTests>false</skipPythonTests>
@@ -1067,6 +1068,7 @@
         <ambariProperties>target/pluggable-stack-definition/conf/unix/ambari.properties</ambariProperties>
         <resourceManagementSrcLocation>target/pluggable-stack-definition/python/resource_management</resourceManagementSrcLocation>
         <commonServicesSrcLocation>target/pluggable-stack-definition/common-services</commonServicesSrcLocation>
+        <stackHooksLocation>target/pluggable-stack-definition/stack-hooks</stackHooksLocation>
         <stacksSrcLocation>target/pluggable-stack-definition/stacks/${stack.distribution}</stacksSrcLocation>
         <resourcesSrcLocation>src/main/resources</resourcesSrcLocation>
         <tarballResourcesFolder>target/pluggable-stack-definition</tarballResourcesFolder>

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/assemblies/server.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/assemblies/server.xml b/ambari-server/src/main/assemblies/server.xml
index c26a769..3079d1b 100644
--- a/ambari-server/src/main/assemblies/server.xml
+++ b/ambari-server/src/main/assemblies/server.xml
@@ -167,6 +167,22 @@
     </fileSet>
     <fileSet>
       <fileMode>755</fileMode>
+      <directory>${stackHooksLocation}</directory>
+      <outputDirectory>/var/lib/ambari-server/resources/stack-hooks</outputDirectory>
+      <excludes>
+	    <exclude>/before-START/files/fast-hdfs-resource.jar</exclude>
+	  </excludes>
+    </fileSet>
+    <fileSet>
+      <fileMode>644</fileMode>
+      <directory>${stackHooksLocation}</directory>
+      <outputDirectory>/var/lib/ambari-server/resources/stack-hooks</outputDirectory>
+      <includes>
+	    <include>/before-START/files/fast-hdfs-resource.jar</include>
+	  </includes>
+    </fileSet>
+    <fileSet>
+      <fileMode>755</fileMode>
       <directory>src/main/resources/upgrade/catalog</directory>
       <outputDirectory>/var/lib/ambari-server/resources/upgrade/catalog</outputDirectory>
     </fileSet>
@@ -176,8 +192,6 @@
       <outputDirectory>/var/lib/ambari-server/resources/stacks/${stack.distribution}</outputDirectory>
       <excludes>
 	    <exclude>/2.1.GlusterFS/services/STORM/package/files/wordCount.jar</exclude>
-        <exclude>/2.0.6/hooks/before-START/files/fast-hdfs-resource.jar</exclude>
-        <exclude>/3.0/hooks/before-START/files/fast-hdfs-resource.jar</exclude>
 	  </excludes>
     </fileSet>
     <fileSet>
@@ -186,8 +200,6 @@
       <outputDirectory>/var/lib/ambari-server/resources/stacks/${stack.distribution}</outputDirectory>
       <includes>
 	    <include>/2.1.GlusterFS/services/STORM/package/files/wordCount.jar</include>
-        <include>/2.0.6/hooks/before-START/files/fast-hdfs-resource.jar</include>
-        <include>/3.0/hooks/before-START/files/fast-hdfs-resource.jar</include>
 	  </includes>
     </fileSet>
     <fileSet>

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java
index 47aa093..ff13d0b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java
@@ -20,6 +20,7 @@ package org.apache.ambari.server.actionmanager;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.HOOKS_FOLDER;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SERVICE_PACKAGE_FOLDER;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.VERSION;
+import static org.apache.ambari.server.stack.StackManager.DEFAULT_HOOKS_FOLDER;
 
 import java.util.HashMap;
 import java.util.Map;
@@ -268,7 +269,7 @@ public class ExecutionCommandWrapper {
           stackId.getStackVersion());
 
         if (!commandParams.containsKey(HOOKS_FOLDER)) {
-          commandParams.put(HOOKS_FOLDER, stackInfo.getStackHooksFolder());
+          commandParams.put(HOOKS_FOLDER, DEFAULT_HOOKS_FOLDER);
         }
 
         if (!commandParams.containsKey(SERVICE_PACKAGE_FOLDER)) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java
index d83a5d1..c13df6b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java
@@ -25,6 +25,7 @@ import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SCRIPT_TY
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SERVICE_PACKAGE_FOLDER;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.STACK_NAME;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.STACK_VERSION;
+import static org.apache.ambari.server.stack.StackManager.DEFAULT_HOOKS_FOLDER;
 
 import java.util.ArrayList;
 import java.util.Collection;
@@ -340,7 +341,7 @@ public class HeartbeatMonitor implements Runnable {
     commandParams.put(COMMAND_TIMEOUT, commandTimeout);
     commandParams.put(SERVICE_PACKAGE_FOLDER,
        serviceInfo.getServicePackageFolder());
-    commandParams.put(HOOKS_FOLDER, stackInfo.getStackHooksFolder());
+    commandParams.put(HOOKS_FOLDER, DEFAULT_HOOKS_FOLDER);
     // Fill host level params
     Map<String, String> hostLevelParams = statusCmd.getHostLevelParams();
     hostLevelParams.put(JDK_LOCATION, ambariManagementController.getJdkResourceUrl());

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
index 52f66bc..33ce25e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
@@ -19,6 +19,7 @@ package org.apache.ambari.server.controller.internal;
 
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.HOOKS_FOLDER;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SERVICE_PACKAGE_FOLDER;
+import static org.apache.ambari.server.stack.StackManager.DEFAULT_HOOKS_FOLDER;
 
 import java.text.MessageFormat;
 import java.util.ArrayList;
@@ -907,7 +908,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
           effectiveStackId.getStackVersion(), serviceName);
 
       commandParams.put(SERVICE_PACKAGE_FOLDER, serviceInfo.getServicePackageFolder());
-      commandParams.put(HOOKS_FOLDER, stackInfo.getStackHooksFolder());
+      commandParams.put(HOOKS_FOLDER, DEFAULT_HOOKS_FOLDER);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
index 0e59c95..9259466 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
@@ -72,10 +72,6 @@ public class StackDirectory extends StackDefinitionDirectory {
    * Filename for theme file at service layer
    */
   public static final String SERVICE_THEME_FILE_NAME = "theme.json";
-  /**
-   * hooks directory path
-   */
-  private String hooksDir;
 
   /**
    * upgrades directory path
@@ -142,15 +138,10 @@ public class StackDirectory extends StackDefinitionDirectory {
    */
   ModuleFileUnmarshaller unmarshaller = new ModuleFileUnmarshaller();
 
-  /**
-   * name of the hooks directory
-   */
-  public static final String HOOKS_FOLDER_NAME = "hooks";
   public static final FilenameFilter FILENAME_FILTER = new FilenameFilter() {
     @Override
     public boolean accept(File dir, String s) {
-      return !(s.equals(".svn") || s.equals(".git") ||
-          s.equals(HOOKS_FOLDER_NAME));
+      return !(s.equals(".svn") || s.equals(".git"));
     }
   };
 
@@ -206,15 +197,6 @@ public class StackDirectory extends StackDefinitionDirectory {
   }
 
   /**
-   * Obtain the hooks directory path.
-   *
-   * @return hooks directory path
-   */
-  public String getHooksDir() {
-    return hooksDir;
-  }
-
-  /**
    * Obtain the upgrades directory path.
    *
    * @return upgrades directory path
@@ -327,20 +309,11 @@ public class StackDirectory extends StackDefinitionDirectory {
    */
   private void parsePath() throws AmbariException {
     Collection<String> subDirs = Arrays.asList(directory.list());
-    if (subDirs.contains(HOOKS_FOLDER_NAME)) {
-      // hooksDir is expected to be relative to stack root
-      hooksDir = getStackDirName() + File.separator + getName() +
-          File.separator + HOOKS_FOLDER_NAME;
-    } else {
-      LOG.debug("Hooks folder {}{}" + HOOKS_FOLDER_NAME + " does not exist", getAbsolutePath(), File.separator);
-    }
-
     if (subDirs.contains(RCO_FILE_NAME)) {
       // rcoFile is expected to be absolute
       rcoFilePath = getAbsolutePath() + File.separator + RCO_FILE_NAME;
     }
 
-
     if (subDirs.contains(KERBEROS_DESCRIPTOR_FILE_NAME)) {
       // kerberosDescriptorFilePath is expected to be absolute
       kerberosDescriptorFilePath = getAbsolutePath() + File.separator + KERBEROS_DESCRIPTOR_FILE_NAME;

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/java/org/apache/ambari/server/stack/StackManager.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackManager.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackManager.java
index b11ecab..eb6737a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackManager.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackManager.java
@@ -79,6 +79,11 @@ public class StackManager {
   public static final String COMMON_SERVICES = "common-services";
 
   /**
+   * Prefix used for common stack hooks parent path string
+   */
+  public static final String DEFAULT_HOOKS_FOLDER = "stack-hooks";
+
+  /**
    * Prefix used for extension services parent path string
    */
   public static final String EXTENSIONS = "extensions";

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
index 6dc2b93..742706d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
@@ -284,10 +284,6 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
     mergeConfigurations(parentStack, allStacks, commonServices, extensions);
     mergeRoleCommandOrder(parentStack);
 
-    if (stackInfo.getStackHooksFolder() == null) {
-      stackInfo.setStackHooksFolder(parentStack.getModuleInfo().getStackHooksFolder());
-    }
-
     // grab stack level kerberos.json from parent stack
     if (stackInfo.getKerberosDescriptorFileLocation() == null) {
       stackInfo.setKerberosDescriptorFileLocation(parentStack.getModuleInfo().getKerberosDescriptorFileLocation());
@@ -574,7 +570,6 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
       stackInfo.setMinUpgradeVersion(smx.getVersion().getUpgrade());
       stackInfo.setActive(smx.getVersion().isActive());
       stackInfo.setParentStackVersion(smx.getExtends());
-      stackInfo.setStackHooksFolder(stackDirectory.getHooksDir());
       stackInfo.setRcoFileLocation(stackDirectory.getRcoFilePath());
       stackInfo.setKerberosDescriptorFileLocation(stackDirectory.getKerberosDescriptorFilePath());
       stackInfo.setKerberosDescriptorPreConfigurationFileLocation(stackDirectory.getKerberosDescriptorPreconfigureFilePath());

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
index a3886ab..dcf850f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
@@ -437,14 +437,6 @@ public class StackInfo implements Comparable<StackInfo>, Validable {
     this.widgetsDescriptorFileLocation = widgetsDescriptorFileLocation;
   }
 
-  public String getStackHooksFolder() {
-    return stackHooksFolder;
-  }
-
-  public void setStackHooksFolder(String stackHooksFolder) {
-    this.stackHooksFolder = stackHooksFolder;
-  }
-
   /**
    * Set the path of the stack upgrade directory.
    *

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/python/ambari_server/resourceFilesKeeper.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari_server/resourceFilesKeeper.py b/ambari-server/src/main/python/ambari_server/resourceFilesKeeper.py
index b41c400..06d16fa 100644
--- a/ambari-server/src/main/python/ambari_server/resourceFilesKeeper.py
+++ b/ambari-server/src/main/python/ambari_server/resourceFilesKeeper.py
@@ -33,7 +33,7 @@ class ResourceFilesKeeper():
   This class encapsulates all utility methods for resource files maintenance.
   """
 
-  HOOKS_DIR="hooks"
+  STACK_HOOKS_DIR= "stack-hooks"
   PACKAGE_DIR="package"
   STACKS_DIR="stacks"
   COMMON_SERVICES_DIR="common-services"
@@ -43,7 +43,7 @@ class ResourceFilesKeeper():
   EXTENSIONS_DIR="extensions"
 
   # For these directories archives are created
-  ARCHIVABLE_DIRS = [HOOKS_DIR, PACKAGE_DIR]
+  ARCHIVABLE_DIRS = [PACKAGE_DIR]
 
   HASH_SUM_FILE=".hash"
   ARCHIVE_NAME="archive.zip"
@@ -116,6 +116,9 @@ class ResourceFilesKeeper():
     # Iterate over extension directories
     self._iter_update_directory_archive(valid_extensions)
 
+    # stack hooks
+    self._update_resources_subdir_archive(self.STACK_HOOKS_DIR)
+
     # custom actions
     self._update_resources_subdir_archive(self.CUSTOM_ACTIONS_DIR)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/python/ambari_server/serverConfiguration.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari_server/serverConfiguration.py b/ambari-server/src/main/python/ambari_server/serverConfiguration.py
index bd64b0e..df89f79 100644
--- a/ambari-server/src/main/python/ambari_server/serverConfiguration.py
+++ b/ambari-server/src/main/python/ambari_server/serverConfiguration.py
@@ -562,7 +562,7 @@ class ServerConfigDefaultsLinux(ServerConfigDefaults):
       (AmbariPath.get("/var/lib/ambari-server/data/cache/"), "700", "{0}", False),
       (AmbariPath.get("/var/lib/ambari-server/resources/common-services/STORM/0.9.1/package/files/wordCount.jar"), "644", "{0}", False),
       (AmbariPath.get("/var/lib/ambari-server/resources/stacks/HDP/2.1.GlusterFS/services/STORM/package/files/wordCount.jar"), "644", "{0}", False),
-      (AmbariPath.get("/var/lib/ambari-server/resources/stacks/HDP/2.0.6/hooks/before-START/files/fast-hdfs-resource.jar"), "644", "{0}", False),
+      (AmbariPath.get("/var/lib/ambari-server/resources/stack-hooks/before-START/files/fast-hdfs-resource.jar"), "644", "{0}", False),
       (AmbariPath.get("/var/lib/ambari-server/resources/stacks/HDP/2.1/services/SMARTSENSE/package/files/view/smartsense-ambari-view-1.4.0.0.60.jar"), "644", "{0}", False),
       (AmbariPath.get("/var/lib/ambari-server/resources/stacks/HDP/3.0/hooks/before-START/files/fast-hdfs-resource.jar"), "644", "{0}", False),
       # Also, /etc/ambari-server/conf/password.dat

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/python/ambari_server/setupMpacks.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari_server/setupMpacks.py b/ambari-server/src/main/python/ambari_server/setupMpacks.py
index 6f232f4..7a9b2b8 100755
--- a/ambari-server/src/main/python/ambari_server/setupMpacks.py
+++ b/ambari-server/src/main/python/ambari_server/setupMpacks.py
@@ -818,7 +818,7 @@ def _install_mpack(options, replay_mode=False, is_upgrade=False):
 
   print_info_msg("Management pack {0}-{1} successfully installed! Please restart ambari-server.".format(mpack_name, mpack_version))
   return mpack_metadata, mpack_name, mpack_version, mpack_staging_dir, mpack_archive_path
-
+# TODO
 def _execute_hook(mpack_metadata, hook_name, base_dir):
   if "hooks" in mpack_metadata:
     hooks = mpack_metadata["hooks"]

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/scripts/Ambaripreupload.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/scripts/Ambaripreupload.py b/ambari-server/src/main/resources/scripts/Ambaripreupload.py
index 132a5b9..a8665b1 100644
--- a/ambari-server/src/main/resources/scripts/Ambaripreupload.py
+++ b/ambari-server/src/main/resources/scripts/Ambaripreupload.py
@@ -439,7 +439,7 @@ with Environment() as env:
   # jar shouldn't be used before (read comment below)
   File(format("{ambari_libs_dir}/fast-hdfs-resource.jar"),
        mode=0644,
-       content=StaticFile("/var/lib/ambari-agent/cache/stacks/HDP/2.0.6/hooks/before-START/files/fast-hdfs-resource.jar")
+       content=StaticFile("/var/lib/ambari-agent/cache/stack-hooks/before-START/files/fast-hdfs-resource.jar")
   )
   # Create everything in one jar call (this is fast).
   # (! Before everything should be executed with action="create_on_execute/delete_on_execute" for this time-optimization to work)
@@ -460,4 +460,4 @@ with Environment() as env:
       sudo = True
     )
 
-  print "Ambari preupload script completed."
\ No newline at end of file
+  print "Ambari preupload script completed."

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/scripts/post-user-creation-hook.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/scripts/post-user-creation-hook.sh b/ambari-server/src/main/resources/scripts/post-user-creation-hook.sh
index d85741b..5318fc5 100755
--- a/ambari-server/src/main/resources/scripts/post-user-creation-hook.sh
+++ b/ambari-server/src/main/resources/scripts/post-user-creation-hook.sh
@@ -135,7 +135,7 @@ check_tools
 prepare_input
 
 # the default implementation creates user home folders; the first argument must be the username
-ambari_sudo "yarn jar /var/lib/ambari-server/resources/stacks/HDP/2.0.6/hooks/before-START/files/fast-hdfs-resource.jar $JSON_INPUT"
+ambari_sudo "yarn jar /var/lib/ambari-server/resources/stack-hooks/before-START/files/fast-hdfs-resource.jar $JSON_INPUT"
 
 if [ "$DEBUG" -gt "0" ]; then echo "Switch debug OFF";set -x;unset DEBUG; else echo "debug: OFF"; fi
 unset DEBUG

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/hook.py b/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/hook.py
new file mode 100644
index 0000000..8bae9e6
--- /dev/null
+++ b/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/hook.py
@@ -0,0 +1,37 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.script.hook import Hook
+from shared_initialization import link_configs
+from shared_initialization import setup_config
+from shared_initialization import setup_stack_symlinks
+
+class AfterInstallHook(Hook):
+
+  def hook(self, env):
+    import params
+
+    env.set_params(params)
+    setup_stack_symlinks(self.stroutfile)
+    setup_config()
+
+    link_configs(self.stroutfile)
+
+if __name__ == "__main__":
+  AfterInstallHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/params.py
new file mode 100644
index 0000000..bf9d79b
--- /dev/null
+++ b/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/params.py
@@ -0,0 +1,108 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+from resource_management.libraries.script import Script
+from resource_management.libraries.script.script import get_config_lock_file
+from resource_management.libraries.functions import default
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format_jvm_option
+from resource_management.libraries.functions.version import format_stack_version, get_major_version
+from string import lower
+
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+dfs_type = default("/commandParams/dfs_type", "")
+
+is_parallel_execution_enabled = int(default("/agentConfigParams/agent/parallel_execution", 0)) == 1
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+
+sudo = AMBARI_SUDO_BINARY
+
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+major_stack_version = get_major_version(stack_version_formatted)
+
+# service name
+service_name = config['serviceName']
+
+# logsearch configuration
+logsearch_logfeeder_conf = "/etc/ambari-logsearch-logfeeder/conf"
+
+agent_cache_dir = config['hostLevelParams']['agentCacheDir']
+service_package_folder = config['commandParams']['service_package_folder']
+logsearch_service_name = service_name.lower().replace("_", "-")
+logsearch_config_file_name = 'input.config-' + logsearch_service_name + ".json"
+logsearch_config_file_path = agent_cache_dir + "/" + service_package_folder + "/templates/" + logsearch_config_file_name + ".j2"
+logsearch_config_file_exists = os.path.isfile(logsearch_config_file_path)
+
+# default hadoop params
+hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
+
+mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
+
+versioned_stack_root = '/usr/hdp/current'
+
+#security params
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+#java params
+java_home = config['hostLevelParams']['java_home']
+
+#hadoop params
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
+hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
+
+jsvc_path = "/usr/lib/bigtop-utils"
+
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
+namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
+namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
+
+jtnode_opt_newsize = "200m"
+jtnode_opt_maxnewsize = "200m"
+jtnode_heapsize =  "1024m"
+ttnode_heapsize = "1024m"
+
+dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
+mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
+
+#users and groups
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+user_group = config['configurations']['cluster-env']['user_group']
+
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+has_namenode = not len(namenode_host) == 0
+
+if has_namenode or dfs_type == 'HCFS':
+  hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+
+link_configs_lock_file = get_config_lock_file()
+stack_select_lock_file = os.path.join(tmp_dir, "stack_select_lock_file")
+
+upgrade_suspended = default("/roleParams/upgrade_suspended", False)

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/shared_initialization.py
new file mode 100644
index 0000000..67c3ba8
--- /dev/null
+++ b/ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/shared_initialization.py
@@ -0,0 +1,132 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import os
+
+import ambari_simplejson as json
+from ambari_jinja2 import Environment as JinjaEnvironment
+from resource_management.core.logger import Logger
+from resource_management.core.resources.system import Directory, File
+from resource_management.core.source import InlineTemplate, Template
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.fcntl_based_process_lock import FcntlBasedProcessLock
+from resource_management.libraries.resources.xml_config import XmlConfig
+from resource_management.libraries.script import Script
+
+
+def setup_stack_symlinks(struct_out_file):
+  """
+  Invokes <stack-selector-tool> set all against a calculated fully-qualified, "normalized" version based on a
+  stack version, such as "2.3". This should always be called after a component has been
+  installed to ensure that all HDP pointers are correct. The stack upgrade logic does not
+  interact with this since it's done via a custom command and will not trigger this hook.
+  :return:
+  """
+  import params
+  if params.upgrade_suspended:
+    Logger.warning("Skipping running stack-selector-tool because there is a suspended upgrade")
+    return
+
+  if params.host_sys_prepped:
+    Logger.warning("Skipping running stack-selector-tool because this is a sys_prepped host. This may cause symlink pointers not to be created for HDP components installed later on top of an already sys_prepped host")
+    return
+
+  # get the packages which the stack-select tool should be used on
+  stack_packages = stack_select.get_packages(stack_select.PACKAGE_SCOPE_INSTALL)
+  if stack_packages is None:
+    return
+
+  json_version = load_version(struct_out_file)
+
+  if not json_version:
+    Logger.info("There is no advertised version for this component stored in {0}".format(struct_out_file))
+    return
+
+  # On parallel command execution this should be executed by a single process at a time.
+  with FcntlBasedProcessLock(params.stack_select_lock_file, enabled = params.is_parallel_execution_enabled, skip_fcntl_failures = True):
+    for package in stack_packages:
+      stack_select.select(package, json_version)
+
+
+def setup_config():
+  import params
+  stackversion = params.stack_version_unformatted
+  Logger.info("FS Type: {0}".format(params.dfs_type))
+
+  is_hadoop_conf_dir_present = False
+  if hasattr(params, "hadoop_conf_dir") and params.hadoop_conf_dir is not None and os.path.exists(params.hadoop_conf_dir):
+    is_hadoop_conf_dir_present = True
+  else:
+    Logger.warning("Parameter hadoop_conf_dir is missing or directory does not exist. This is expected if this host does not have any Hadoop components.")
+
+  if is_hadoop_conf_dir_present and (params.has_namenode or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS'):
+    # create core-site only if the hadoop config diretory exists
+    XmlConfig("core-site.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations']['core-site'],
+              configuration_attributes=params.config['configuration_attributes']['core-site'],
+              owner=params.hdfs_user,
+              group=params.user_group,
+              only_if=format("ls {hadoop_conf_dir}"))
+
+  Directory(params.logsearch_logfeeder_conf,
+            mode=0755,
+            cd_access='a',
+            create_parents=True
+            )
+
+  if params.logsearch_config_file_exists:
+    File(format("{logsearch_logfeeder_conf}/" + params.logsearch_config_file_name),
+         content=Template(params.logsearch_config_file_path,extra_imports=[default])
+         )
+  else:
+    Logger.warning('No logsearch configuration exists at ' + params.logsearch_config_file_path)
+
+
+def load_version(struct_out_file):
+  """
+  Load version from file.  Made a separate method for testing
+  """
+  try:
+    with open(struct_out_file, 'r') as fp:
+      json_info = json.load(fp)
+
+    return json_info['version']
+  except (IOError, KeyError, TypeError):
+    return None
+
+
+def link_configs(struct_out_file):
+  """
+  Links configs, only on a fresh install of HDP-2.3 and higher
+  """
+  import params
+
+  json_version = load_version(struct_out_file)
+
+  if not json_version:
+    Logger.info("Could not load 'version' from {0}".format(struct_out_file))
+    return
+
+  # On parallel command execution this should be executed by a single process at a time.
+  with FcntlBasedProcessLock(params.link_configs_lock_file, enabled = params.is_parallel_execution_enabled, skip_fcntl_failures = True):
+    for k, v in conf_select.get_package_dirs().iteritems():
+      conf_select.convert_conf_directories_to_symlinks(k, json_version, v)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/before-ANY/files/changeToSecureUid.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/before-ANY/files/changeToSecureUid.sh b/ambari-server/src/main/resources/stack-hooks/before-ANY/files/changeToSecureUid.sh
new file mode 100644
index 0000000..a6b8b77
--- /dev/null
+++ b/ambari-server/src/main/resources/stack-hooks/before-ANY/files/changeToSecureUid.sh
@@ -0,0 +1,64 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+username=$1
+directories=$2
+newUid=$3
+
+function find_available_uid() {
+ for ((i=1001; i<=2000; i++))
+ do
+   grep -q $i /etc/passwd
+   if [ "$?" -ne 0 ]
+   then
+    newUid=$i
+    break
+   fi
+ done
+}
+
+if [ -z $2 ]; then
+  test $(id -u ${username} 2>/dev/null)
+  if [ $? -ne 1 ]; then
+   newUid=`id -u ${username}`
+  else
+   find_available_uid
+  fi
+  echo $newUid
+  exit 0
+else
+  find_available_uid
+fi
+
+if [ $newUid -eq 0 ]
+then
+  echo "Failed to find Uid between 1000 and 2000"
+  exit 1
+fi
+
+set -e
+dir_array=($(echo $directories | sed 's/,/\n/g'))
+old_uid=$(id -u $username)
+sudo_prefix="/var/lib/ambari-agent/ambari-sudo.sh -H -E"
+echo "Changing uid of $username from $old_uid to $newUid"
+echo "Changing directory permisions for ${dir_array[@]}"
+$sudo_prefix usermod -u $newUid $username && for dir in ${dir_array[@]} ; do ls $dir 2> /dev/null && echo "Changing permission for $dir" && $sudo_prefix chown -Rh $newUid $dir ; done
+exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/hook.py b/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/hook.py
new file mode 100644
index 0000000..c34be0b
--- /dev/null
+++ b/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/hook.py
@@ -0,0 +1,36 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from shared_initialization import *
+
+class BeforeAnyHook(Hook):
+
+  def hook(self, env):
+    import params
+    env.set_params(params)
+
+    setup_users()
+    if params.has_namenode or params.dfs_type == 'HCFS':
+      setup_hadoop_env()
+    setup_java()
+
+if __name__ == "__main__":
+  BeforeAnyHook().execute()
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/params.py b/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/params.py
new file mode 100644
index 0000000..20992e2
--- /dev/null
+++ b/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/params.py
@@ -0,0 +1,254 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import collections
+import re
+import os
+import ast
+
+import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions import default
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format_jvm_option
+from resource_management.libraries.functions.is_empty import is_empty
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.expect import expect
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.stack_features import get_stack_feature_version
+from resource_management.libraries.functions.get_architecture import get_architecture
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+
+
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+stack_root = Script.get_stack_root()
+
+architecture = get_architecture()
+
+dfs_type = default("/commandParams/dfs_type", "")
+
+artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
+jdk_name = default("/hostLevelParams/jdk_name", None)
+java_home = config['hostLevelParams']['java_home']
+java_version = expect("/hostLevelParams/java_version", int)
+jdk_location = config['hostLevelParams']['jdk_location']
+
+hadoop_custom_extensions_enabled = default("/configurations/core-site/hadoop.custom-extensions.enabled", False)
+
+sudo = AMBARI_SUDO_BINARY
+
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+upgrade_type = Script.get_upgrade_type(default("/commandParams/upgrade_type", ""))
+version = default("/commandParams/version", None)
+# Handle upgrade and downgrade
+if (upgrade_type is not None) and version:
+  stack_version_formatted = format_stack_version(version)
+ambari_java_home = default("/commandParams/ambari_java_home", None)
+ambari_jdk_name = default("/commandParams/ambari_jdk_name", None)
+
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+
+# Some datanode settings
+dfs_dn_addr = default('/configurations/hdfs-site/dfs.datanode.address', None)
+dfs_dn_http_addr = default('/configurations/hdfs-site/dfs.datanode.http.address', None)
+dfs_dn_https_addr = default('/configurations/hdfs-site/dfs.datanode.https.address', None)
+dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None)
+secure_dn_ports_are_in_use = False
+
+def get_port(address):
+  """
+  Extracts port from the address like 0.0.0.0:1019
+  """
+  if address is None:
+    return None
+  m = re.search(r'(?:http(?:s)?://)?([\w\d.]*):(\d{1,5})', address)
+  if m is not None:
+    return int(m.group(2))
+  else:
+    return None
+
+def is_secure_port(port):
+  """
+  Returns True if port is root-owned at *nix systems
+  """
+  if port is not None:
+    return port < 1024
+  else:
+    return False
+
+# upgrades would cause these directories to have a version instead of "current"
+# which would cause a lot of problems when writing out hadoop-env.sh; instead
+# force the use of "current" in the hook
+hdfs_user_nofile_limit = default("/configurations/hadoop-env/hdfs_user_nofile_limit", "128000")
+hadoop_home = stack_select.get_hadoop_dir("home")
+hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
+hadoop_lib_home = stack_select.get_hadoop_dir("lib")
+
+hadoop_dir = "/etc/hadoop"
+hadoop_java_io_tmpdir = os.path.join(tmp_dir, "hadoop_java_io_tmpdir")
+datanode_max_locked_memory = config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory']
+is_datanode_max_locked_memory_set = not is_empty(config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory'])
+
+mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
+
+if not security_enabled:
+  hadoop_secure_dn_user = '""'
+else:
+  dfs_dn_port = get_port(dfs_dn_addr)
+  dfs_dn_http_port = get_port(dfs_dn_http_addr)
+  dfs_dn_https_port = get_port(dfs_dn_https_addr)
+  # We try to avoid inability to start datanode as a plain user due to usage of root-owned ports
+  if dfs_http_policy == "HTTPS_ONLY":
+    secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_https_port)
+  elif dfs_http_policy == "HTTP_AND_HTTPS":
+    secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port) or is_secure_port(dfs_dn_https_port)
+  else:   # params.dfs_http_policy == "HTTP_ONLY" or not defined:
+    secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port)
+  if secure_dn_ports_are_in_use:
+    hadoop_secure_dn_user = hdfs_user
+  else:
+    hadoop_secure_dn_user = '""'
+
+#hadoop params
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
+hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
+
+jsvc_path = "/usr/lib/bigtop-utils"
+
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
+namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
+namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
+
+jtnode_opt_newsize = "200m"
+jtnode_opt_maxnewsize = "200m"
+jtnode_heapsize =  "1024m"
+ttnode_heapsize = "1024m"
+
+dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
+nfsgateway_heapsize = config['configurations']['hadoop-env']['nfsgateway_heapsize']
+mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
+hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
+
+#users and groups
+hbase_user = config['configurations']['hbase-env']['hbase_user']
+smoke_user =  config['configurations']['cluster-env']['smokeuser']
+gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
+gmond_user = config['configurations']['ganglia-env']["gmond_user"]
+tez_user = config['configurations']['tez-env']["tez_user"]
+oozie_user = config['configurations']['oozie-env']["oozie_user"]
+falcon_user = config['configurations']['falcon-env']["falcon_user"]
+ranger_user = config['configurations']['ranger-env']["ranger_user"]
+zeppelin_user = config['configurations']['zeppelin-env']["zeppelin_user"]
+zeppelin_group = config['configurations']['zeppelin-env']["zeppelin_group"]
+
+user_group = config['configurations']['cluster-env']['user_group']
+
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+falcon_server_hosts = default("/clusterHostInfo/falcon_server_hosts", [])
+ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
+zeppelin_master_hosts = default("/clusterHostInfo/zeppelin_master_hosts", [])
+
+# get the correct version to use for checking stack features
+version_for_stack_feature_checks = get_stack_feature_version(config)
+
+
+has_namenode = not len(namenode_host) == 0
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+has_tez = 'tez-site' in config['configurations']
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_oozie_server = not len(oozie_servers) == 0
+has_falcon_server_hosts = not len(falcon_server_hosts) == 0
+has_ranger_admin = not len(ranger_admin_hosts) == 0
+has_zeppelin_master = not len(zeppelin_master_hosts) == 0
+stack_supports_zk_security = check_stack_feature(StackFeature.SECURE_ZOOKEEPER, version_for_stack_feature_checks)
+
+# HDFS High Availability properties
+dfs_ha_enabled = False
+dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.internal.nameservices', None)
+if dfs_ha_nameservices is None:
+  dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.nameservices', None)
+dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
+if dfs_ha_namenode_ids:
+  dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
+  dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
+  if dfs_ha_namenode_ids_array_len > 1:
+    dfs_ha_enabled = True
+
+if has_namenode or dfs_type == 'HCFS':
+    hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+    hadoop_conf_secure_dir = os.path.join(hadoop_conf_dir, "secure")
+
+hbase_tmp_dir = "/tmp/hbase-hbase"
+
+proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
+ranger_group = config['configurations']['ranger-env']['ranger_group']
+dfs_cluster_administrators_group = config['configurations']['hdfs-site']["dfs.cluster.administrators"]
+
+sysprep_skip_create_users_and_groups = default("/configurations/cluster-env/sysprep_skip_create_users_and_groups", False)
+ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
+fetch_nonlocal_groups = config['configurations']['cluster-env']["fetch_nonlocal_groups"]
+
+smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
+if has_hbase_masters:
+  hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
+#repo params
+repo_info = config['hostLevelParams']['repo_info']
+service_repo_info = default("/hostLevelParams/service_repo_info",None)
+
+user_to_groups_dict = {}
+
+#Append new user-group mapping to the dict
+try:
+  user_group_map = ast.literal_eval(config['hostLevelParams']['user_groups'])
+  for key in user_group_map.iterkeys():
+    user_to_groups_dict[key] = user_group_map[key]
+except ValueError:
+  print('User Group mapping (user_group) is missing in the hostLevelParams')
+
+user_to_gid_dict = collections.defaultdict(lambda:user_group)
+
+user_list = json.loads(config['hostLevelParams']['user_list'])
+group_list = json.loads(config['hostLevelParams']['group_list'])
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+
+tez_am_view_acls = config['configurations']['tez-site']["tez.am.view-acls"]
+override_uid = str(default("/configurations/cluster-env/override_uid", "true")).lower()
+
+# if NN HA on secure clutser, access Zookeper securely
+if stack_supports_zk_security and dfs_ha_enabled and security_enabled:
+    hadoop_zkfc_opts=format("-Dzookeeper.sasl.client=true -Dzookeeper.sasl.client.username=zookeeper -Djava.security.auth.login.config={hadoop_conf_secure_dir}/hdfs_jaas.conf -Dzookeeper.sasl.clientconfig=Client")

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/shared_initialization.py b/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/shared_initialization.py
new file mode 100644
index 0000000..27679e0
--- /dev/null
+++ b/ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/shared_initialization.py
@@ -0,0 +1,273 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import re
+import getpass
+import tempfile
+from copy import copy
+from resource_management.libraries.functions.version import compare_versions
+from resource_management import *
+from resource_management.core import shell
+
+def setup_users():
+  """
+  Creates users before cluster installation
+  """
+  import params
+
+  should_create_users_and_groups = False
+  if params.host_sys_prepped:
+    should_create_users_and_groups = not params.sysprep_skip_create_users_and_groups
+  else:
+    should_create_users_and_groups = not params.ignore_groupsusers_create
+
+  if should_create_users_and_groups:
+    for group in params.group_list:
+      Group(group,
+      )
+
+    for user in params.user_list:
+      User(user,
+           uid = get_uid(user) if params.override_uid == "true" else None,
+           gid = params.user_to_gid_dict[user],
+           groups = params.user_to_groups_dict[user],
+           fetch_nonlocal_groups = params.fetch_nonlocal_groups,
+           )
+
+    if params.override_uid == "true":
+      set_uid(params.smoke_user, params.smoke_user_dirs)
+    else:
+      Logger.info('Skipping setting uid for smoke user as host is sys prepped')
+  else:
+    Logger.info('Skipping creation of User and Group as host is sys prepped or ignore_groupsusers_create flag is on')
+    pass
+
+
+  if params.has_hbase_masters:
+    Directory (params.hbase_tmp_dir,
+               owner = params.hbase_user,
+               mode=0775,
+               create_parents = True,
+               cd_access="a",
+    )
+
+    if params.override_uid == "true":
+      set_uid(params.hbase_user, params.hbase_user_dirs)
+    else:
+      Logger.info('Skipping setting uid for hbase user as host is sys prepped')
+
+  if should_create_users_and_groups:
+    if params.has_namenode:
+      create_dfs_cluster_admins()
+    if params.has_tez and params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.3') >= 0:
+      create_tez_am_view_acls()
+  else:
+    Logger.info('Skipping setting dfs cluster admin and tez view acls as host is sys prepped')
+
+def create_dfs_cluster_admins():
+  """
+  dfs.cluster.administrators support format <comma-delimited list of usernames><space><comma-delimited list of group names>
+  """
+  import params
+
+  groups_list = create_users_and_groups(params.dfs_cluster_administrators_group)
+
+  User(params.hdfs_user,
+    groups = params.user_to_groups_dict[params.hdfs_user] + groups_list,
+    fetch_nonlocal_groups = params.fetch_nonlocal_groups
+  )
+
+def create_tez_am_view_acls():
+
+  """
+  tez.am.view-acls support format <comma-delimited list of usernames><space><comma-delimited list of group names>
+  """
+  import params
+
+  if not params.tez_am_view_acls.startswith("*"):
+    create_users_and_groups(params.tez_am_view_acls)
+
+def create_users_and_groups(user_and_groups):
+
+  import params
+
+  parts = re.split('\s+', user_and_groups)
+  if len(parts) == 1:
+    parts.append("")
+
+  users_list = parts[0].strip(",").split(",") if parts[0] else []
+  groups_list = parts[1].strip(",").split(",") if parts[1] else []
+
+  # skip creating groups and users if * is provided as value.
+  users_list = filter(lambda x: x != '*' , users_list)
+  groups_list = filter(lambda x: x != '*' , groups_list)
+
+  if users_list:
+    User(users_list,
+          fetch_nonlocal_groups = params.fetch_nonlocal_groups
+    )
+
+  if groups_list:
+    Group(copy(groups_list),
+    )
+  return groups_list
+
+def set_uid(user, user_dirs):
+  """
+  user_dirs - comma separated directories
+  """
+  import params
+
+  File(format("{tmp_dir}/changeUid.sh"),
+       content=StaticFile("changeToSecureUid.sh"),
+       mode=0555)
+  ignore_groupsusers_create_str = str(params.ignore_groupsusers_create).lower()
+  uid = get_uid(user, return_existing=True)
+  Execute(format("{tmp_dir}/changeUid.sh {user} {user_dirs} {new_uid}", new_uid=0 if uid is None else uid),
+          not_if = format("(test $(id -u {user}) -gt 1000) || ({ignore_groupsusers_create_str})"))
+
+def get_uid(user, return_existing=False):
+  """
+  Tries to get UID for username. It will try to find UID in custom properties in *cluster_env* and, if *return_existing=True*,
+  it will try to return UID of existing *user*.
+
+  :param user: username to get UID for
+  :param return_existing: return UID for existing user
+  :return:
+  """
+  import params
+  user_str = str(user) + "_uid"
+  service_env = [ serviceEnv for serviceEnv in params.config['configurations'] if user_str in params.config['configurations'][serviceEnv]]
+
+  if service_env and params.config['configurations'][service_env[0]][user_str]:
+    service_env_str = str(service_env[0])
+    uid = params.config['configurations'][service_env_str][user_str]
+    if len(service_env) > 1:
+      Logger.warning("Multiple values found for %s, using %s"  % (user_str, uid))
+    return uid
+  else:
+    if return_existing:
+      # pick up existing UID or try to find available UID in /etc/passwd, see changeToSecureUid.sh for more info
+      if user == params.smoke_user:
+        return None
+      File(format("{tmp_dir}/changeUid.sh"),
+           content=StaticFile("changeToSecureUid.sh"),
+           mode=0555)
+      code, newUid = shell.call(format("{tmp_dir}/changeUid.sh {user}"))
+      return int(newUid)
+    else:
+      # do not return UID for existing user, used in User resource call to let OS to choose UID for us
+      return None
+
+def setup_hadoop_env():
+  import params
+  stackversion = params.stack_version_unformatted
+  Logger.info("FS Type: {0}".format(params.dfs_type))
+  if params.has_namenode or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS':
+    if params.security_enabled:
+      tc_owner = "root"
+    else:
+      tc_owner = params.hdfs_user
+
+    # create /etc/hadoop
+    Directory(params.hadoop_dir, mode=0755)
+
+    # write out hadoop-env.sh, but only if the directory exists
+    if os.path.exists(params.hadoop_conf_dir):
+      File(os.path.join(params.hadoop_conf_dir, 'hadoop-env.sh'), owner=tc_owner,
+        group=params.user_group,
+        content=InlineTemplate(params.hadoop_env_sh_template))
+
+    # Create tmp dir for java.io.tmpdir
+    # Handle a situation when /tmp is set to noexec
+    Directory(params.hadoop_java_io_tmpdir,
+              owner=params.hdfs_user,
+              group=params.user_group,
+              mode=01777
+    )
+
+def setup_java():
+  """
+  Install jdk using specific params.
+  Install ambari jdk as well if the stack and ambari jdk are different.
+  """
+  import params
+  __setup_java(custom_java_home=params.java_home, custom_jdk_name=params.jdk_name)
+  if params.ambari_java_home and params.ambari_java_home != params.java_home:
+    __setup_java(custom_java_home=params.ambari_java_home, custom_jdk_name=params.ambari_jdk_name)
+
+def __setup_java(custom_java_home, custom_jdk_name):
+  """
+  Installs jdk using specific params, that comes from ambari-server
+  """
+  import params
+  java_exec = format("{custom_java_home}/bin/java")
+
+  if not os.path.isfile(java_exec):
+    if not params.jdk_name: # if custom jdk is used.
+      raise Fail(format("Unable to access {java_exec}. Confirm you have copied jdk to this host."))
+
+    jdk_curl_target = format("{tmp_dir}/{custom_jdk_name}")
+    java_dir = os.path.dirname(params.java_home)
+
+    Directory(params.artifact_dir,
+              create_parents = True,
+              )
+
+    File(jdk_curl_target,
+         content = DownloadSource(format("{jdk_location}/{custom_jdk_name}")),
+         not_if = format("test -f {jdk_curl_target}")
+         )
+
+    File(jdk_curl_target,
+         mode = 0755,
+         )
+
+    tmp_java_dir = tempfile.mkdtemp(prefix="jdk_tmp_", dir=params.tmp_dir)
+
+    try:
+      if params.jdk_name.endswith(".bin"):
+        chmod_cmd = ("chmod", "+x", jdk_curl_target)
+        install_cmd = format("cd {tmp_java_dir} && echo A | {jdk_curl_target} -noregister && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
+      elif params.jdk_name.endswith(".gz"):
+        chmod_cmd = ("chmod","a+x", java_dir)
+        install_cmd = format("cd {tmp_java_dir} && tar -xf {jdk_curl_target} && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
+
+      Directory(java_dir
+                )
+
+      Execute(chmod_cmd,
+              sudo = True,
+              )
+
+      Execute(install_cmd,
+              )
+
+    finally:
+      Directory(tmp_java_dir, action="delete")
+
+    File(format("{custom_java_home}/bin/java"),
+         mode=0755,
+         cd_access="a",
+         )
+    Execute(('chmod', '-R', '755', params.java_home),
+            sudo = True,
+            )
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/hook.py b/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/hook.py
new file mode 100644
index 0000000..ce17776
--- /dev/null
+++ b/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/hook.py
@@ -0,0 +1,37 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from shared_initialization import *
+from repo_initialization import *
+
+class BeforeInstallHook(Hook):
+
+  def hook(self, env):
+    import params
+
+    self.run_custom_hook('before-ANY')
+    env.set_params(params)
+    
+    install_repos()
+    install_packages()
+
+if __name__ == "__main__":
+  BeforeInstallHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/params.py
new file mode 100644
index 0000000..50c5a40
--- /dev/null
+++ b/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/params.py
@@ -0,0 +1,115 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+from resource_management.libraries.functions.version import format_stack_version, compare_versions
+from resource_management.core.system import System
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import default, format
+from resource_management.libraries.functions.expect import expect
+
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+sudo = AMBARI_SUDO_BINARY
+
+stack_version_unformatted = config['hostLevelParams']['stack_version']
+agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability']
+agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int)
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+
+#users and groups
+hbase_user = config['configurations']['hbase-env']['hbase_user']
+smoke_user =  config['configurations']['cluster-env']['smokeuser']
+gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
+gmond_user = config['configurations']['ganglia-env']["gmond_user"]
+tez_user = config['configurations']['tez-env']["tez_user"]
+
+user_group = config['configurations']['cluster-env']['user_group']
+proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
+
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+
+# repo templates
+repo_rhel_suse =  config['configurations']['cluster-env']['repo_suse_rhel_template']
+repo_ubuntu =  config['configurations']['cluster-env']['repo_ubuntu_template']
+
+#hosts
+hostname = config["hostname"]
+ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
+rm_host = default("/clusterHostInfo/rm_host", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
+hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+hs_host = default("/clusterHostInfo/hs_host", [])
+jtnode_host = default("/clusterHostInfo/jtnode_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+storm_server_hosts = default("/clusterHostInfo/nimbus_hosts", [])
+falcon_host =  default('/clusterHostInfo/falcon_server_hosts', [])
+
+has_sqoop_client = 'sqoop-env' in config['configurations']
+has_namenode = not len(namenode_host) == 0
+has_hs = not len(hs_host) == 0
+has_resourcemanager = not len(rm_host) == 0
+has_slaves = not len(slave_hosts) == 0
+has_oozie_server = not len(oozie_servers)  == 0
+has_hcat_server_host = not len(hcat_server_hosts)  == 0
+has_hive_server_host = not len(hive_server_host)  == 0
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_zk_host = not len(zk_hosts) == 0
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+has_storm_server = not len(storm_server_hosts) == 0
+has_falcon_server = not len(falcon_host) == 0
+has_tez = 'tez-site' in config['configurations']
+
+is_namenode_master = hostname in namenode_host
+is_jtnode_master = hostname in jtnode_host
+is_rmnode_master = hostname in rm_host
+is_hsnode_master = hostname in hs_host
+is_hbase_master = hostname in hbase_master_hosts
+is_slave = hostname in slave_hosts
+if has_ganglia_server:
+  ganglia_server_host = ganglia_server_hosts[0]
+
+hbase_tmp_dir = "/tmp/hbase-hbase"
+
+#security params
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+#java params
+java_home = config['hostLevelParams']['java_home']
+artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
+jdk_name = default("/hostLevelParams/jdk_name", None) # None when jdk is already installed by user
+jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
+jce_location = config['hostLevelParams']['jdk_location']
+jdk_location = config['hostLevelParams']['jdk_location']
+ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
+host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+
+smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
+if has_hbase_masters:
+  hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
+#repo params
+repo_info = config['hostLevelParams']['repo_info']
+service_repo_info = default("/hostLevelParams/service_repo_info",None)
+
+repo_file = default("/repositoryFile", None)

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/repo_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/repo_initialization.py b/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/repo_initialization.py
new file mode 100644
index 0000000..9f2b344
--- /dev/null
+++ b/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/repo_initialization.py
@@ -0,0 +1,75 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from ambari_commons.os_check import OSCheck
+from resource_management.libraries.resources.repository import Repository
+from resource_management.libraries.functions.repository_util import create_repo_files, CommandRepository, UBUNTU_REPO_COMPONENTS_POSTFIX
+from resource_management.core.logger import Logger
+import ambari_simplejson as json
+
+
+def _alter_repo(action, repo_string, repo_template):
+  """
+  @param action: "delete" or "create"
+  @param repo_string: e.g. "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0._\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]"
+  """
+  repo_dicts = json.loads(repo_string)
+
+  if not isinstance(repo_dicts, list):
+    repo_dicts = [repo_dicts]
+
+  if 0 == len(repo_dicts):
+    Logger.info("Repository list is empty. Ambari may not be managing the repositories.")
+  else:
+    Logger.info("Initializing {0} repositories".format(str(len(repo_dicts))))
+
+  for repo in repo_dicts:
+    if not 'baseUrl' in repo:
+      repo['baseUrl'] = None
+    if not 'mirrorsList' in repo:
+      repo['mirrorsList'] = None
+
+    ubuntu_components = [ repo['distribution'] if 'distribution' in repo and repo['distribution'] else repo['repoName'] ] \
+                        + [repo['components'].replace(",", " ") if 'components' in repo and repo['components'] else UBUNTU_REPO_COMPONENTS_POSTFIX]
+
+    Repository(repo['repoId'],
+               action = action,
+               base_url = repo['baseUrl'],
+               mirror_list = repo['mirrorsList'],
+               repo_file_name = repo['repoName'],
+               repo_template = repo_template,
+               components = ubuntu_components) # ubuntu specific
+
+
+def install_repos():
+  import params
+  if params.host_sys_prepped:
+    return
+
+  template = params.repo_rhel_suse if OSCheck.is_suse_family() or OSCheck.is_redhat_family() else params.repo_ubuntu
+
+  # use this newer way of specifying repositories, if available
+  if params.repo_file is not None:
+    create_repo_files(template, CommandRepository(params.repo_file))
+    return
+
+  _alter_repo("create", params.repo_info, template)
+
+  if params.service_repo_info:
+    _alter_repo("create", params.service_repo_info, template)

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/shared_initialization.py
new file mode 100644
index 0000000..1609050
--- /dev/null
+++ b/ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/shared_initialization.py
@@ -0,0 +1,37 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from resource_management.libraries.functions import stack_tools
+from resource_management.libraries.functions.version import compare_versions
+from resource_management.core.resources.packaging import Package
+
+def install_packages():
+  import params
+  if params.host_sys_prepped:
+    return
+
+  packages = ['unzip', 'curl']
+  if params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.2') >= 0:
+    stack_selector_package = stack_tools.get_stack_tool_package(stack_tools.STACK_SELECTOR_NAME)
+    packages.append(stack_selector_package)
+  Package(packages,
+          retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
+          retry_count=params.agent_stack_retry_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stack-hooks/before-RESTART/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stack-hooks/before-RESTART/scripts/hook.py b/ambari-server/src/main/resources/stack-hooks/before-RESTART/scripts/hook.py
new file mode 100644
index 0000000..14b9d99
--- /dev/null
+++ b/ambari-server/src/main/resources/stack-hooks/before-RESTART/scripts/hook.py
@@ -0,0 +1,29 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+class BeforeRestartHook(Hook):
+
+  def hook(self, env):
+    self.run_custom_hook('before-START')
+
+if __name__ == "__main__":
+  BeforeRestartHook().execute()
+


[13/50] [abbrv] ambari git commit: AMBARI-22120. Current Stack Showing 'Upgrade' (alexantonenko)

Posted by jl...@apache.org.
AMBARI-22120. Current Stack Showing 'Upgrade' (alexantonenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/11707ba5
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/11707ba5
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/11707ba5

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 11707ba5d40c5cab170c14cfbc5ef955ff89656a
Parents: 1032bc5
Author: Alex Antonenko <aa...@hortonworks.com>
Authored: Tue Oct 3 18:08:30 2017 +0300
Committer: Alex Antonenko <aa...@hortonworks.com>
Committed: Tue Oct 3 18:08:30 2017 +0300

----------------------------------------------------------------------
 .../stack_upgrade/upgrade_version_box_view.js   |  7 +-----
 .../upgrade_version_box_view_test.js            | 24 ++++----------------
 2 files changed, 5 insertions(+), 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/11707ba5/ambari-web/app/views/main/admin/stack_upgrade/upgrade_version_box_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/admin/stack_upgrade/upgrade_version_box_view.js b/ambari-web/app/views/main/admin/stack_upgrade/upgrade_version_box_view.js
index 212ef3d..81049e9 100644
--- a/ambari-web/app/views/main/admin/stack_upgrade/upgrade_version_box_view.js
+++ b/ambari-web/app/views/main/admin/stack_upgrade/upgrade_version_box_view.js
@@ -265,14 +265,9 @@ App.UpgradeVersionBoxView = Em.View.extend({
     if (Em.get(currentVersion, 'stack_name') !== this.get('content.stackVersionType') || isVersionHigherThanCurrent) {
       switch (status){
         case 'OUT_OF_SYNC':
-          element.set('isButtonGroup', true);
+          element.set('isButton', true);
           element.set('text', this.get('isVersionColumnView') ? Em.I18n.t('common.reinstall') : Em.I18n.t('admin.stackVersions.version.reinstall'));
           element.set('action', 'installRepoVersionPopup');
-          element.get('buttons').pushObject({
-            text: this.get('isVersionColumnView') ? Em.I18n.t('common.upgrade') : Em.I18n.t('admin.stackVersions.version.performUpgrade'),
-            action: 'confirmUpgrade',
-            isDisabled: isDisabled
-          });
           break;
         case 'INSTALL_FAILED':
           element.set('isButton', true);

http://git-wip-us.apache.org/repos/asf/ambari/blob/11707ba5/ambari-web/test/views/main/admin/stack_upgrade/upgrade_version_box_view_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/admin/stack_upgrade/upgrade_version_box_view_test.js b/ambari-web/test/views/main/admin/stack_upgrade/upgrade_version_box_view_test.js
index 854e734..5a8f135 100644
--- a/ambari-web/test/views/main/admin/stack_upgrade/upgrade_version_box_view_test.js
+++ b/ambari-web/test/views/main/admin/stack_upgrade/upgrade_version_box_view_test.js
@@ -624,12 +624,7 @@ describe('App.UpgradeVersionBoxView', function () {
         },
         expected: {
           status: 'OUT_OF_SYNC',
-          isButtonGroup: true,
-          buttons: [{
-            text: Em.I18n.t('admin.stackVersions.version.performUpgrade'),
-            action: 'confirmUpgrade',
-            isDisabled: false
-          }],
+          isButton: true,
           text: Em.I18n.t('admin.stackVersions.version.reinstall'),
           action: 'installRepoVersionPopup',
           isDisabled: false
@@ -657,12 +652,7 @@ describe('App.UpgradeVersionBoxView', function () {
         },
         expected: {
           status: 'OUT_OF_SYNC',
-          isButtonGroup: true,
-          buttons: [{
-            text: Em.I18n.t('admin.stackVersions.version.performUpgrade'),
-            action: 'confirmUpgrade',
-            isDisabled: true
-          }],
+          isButton: true,
           text: Em.I18n.t('admin.stackVersions.version.reinstall'),
           action: 'installRepoVersionPopup',
           isDisabled: true
@@ -1399,14 +1389,8 @@ describe('App.UpgradeVersionBoxView', function () {
       });
       view.processPreUpgradeState(element);
       expect(JSON.stringify(element)).to.be.equal(JSON.stringify(Em.Object.create({
-        "buttons": [
-          {
-            "text": Em.I18n.t('admin.stackVersions.version.performUpgrade'),
-            "action": "confirmUpgrade",
-            "isDisabled": false
-          }
-        ],
-        "isButtonGroup": true,
+        "buttons": [],
+        "isButton": true,
         "text": Em.I18n.t('admin.stackVersions.version.reinstall'),
         "action": 'installRepoVersionPopup',
         "isDisabled": false


[15/50] [abbrv] ambari git commit: AMBARI-22128 Log Search UI: add event bars for Components list item. (ababiichuk)

Posted by jl...@apache.org.
AMBARI-22128 Log Search UI: add event bars for Components list item. (ababiichuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/15cd1c59
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/15cd1c59
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/15cd1c59

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 15cd1c598ba1285dcde8df47f72647f5c7fc29e7
Parents: 67396ba
Author: ababiichuk <ab...@hortonworks.com>
Authored: Wed Oct 4 14:08:37 2017 +0300
Committer: ababiichuk <ab...@hortonworks.com>
Committed: Wed Oct 4 14:08:37 2017 +0300

----------------------------------------------------------------------
 .../filters-panel/filters-panel.component.html  |  3 +-
 .../src/app/models/node.model.ts                |  3 +-
 .../src/app/models/store.model.ts               |  2 +-
 .../app/services/component-generator.service.ts | 23 ++++++++-
 .../src/app/services/filtering.service.spec.ts  | 27 ++++++++++
 .../src/app/services/filtering.service.ts       | 53 +++++++++++++-------
 .../src/app/services/http-client.service.ts     |  2 +-
 7 files changed, 90 insertions(+), 23 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/15cd1c59/ambari-logsearch/ambari-logsearch-web/src/app/components/filters-panel/filters-panel.component.html
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/components/filters-panel/filters-panel.component.html b/ambari-logsearch/ambari-logsearch-web/src/app/components/filters-panel/filters-panel.component.html
index fc3dac8..e0db997 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/components/filters-panel/filters-panel.component.html
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/components/filters-panel/filters-panel.component.html
@@ -39,7 +39,8 @@
                    additionalLabelComponentSetter="getDataForHostsNodeBar"></filter-button>
     <filter-button formControlName="components" label="{{filters.components.label | translate}}"
                    [iconClass]="filters.components.iconClass" [subItems]="filters.components.options"
-                   [isMultipleChoice]="true" [isRightAlign]="true"></filter-button>
+                   [isMultipleChoice]="true" [isRightAlign]="true"
+                   additionalLabelComponentSetter="getDataForComponentsNodeBar"></filter-button>
     <filter-button formControlName="levels" label="{{filters.levels.label | translate}}" [iconClass]="filters.levels.iconClass"
                    [subItems]="filters.levels.options" [isMultipleChoice]="true" [isRightAlign]="true"></filter-button>
     <menu-button *ngIf="!captureSeconds" label="{{'filter.capture' | translate}}" iconClass="fa fa-caret-right"

http://git-wip-us.apache.org/repos/asf/ambari/blob/15cd1c59/ambari-logsearch/ambari-logsearch-web/src/app/models/node.model.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/models/node.model.ts b/ambari-logsearch/ambari-logsearch-web/src/app/models/node.model.ts
index 2891d142..b01421e 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/models/node.model.ts
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/models/node.model.ts
@@ -25,5 +25,6 @@ export interface Node {
   isParent: boolean;
   isRoot: boolean;
   childs?: Node[];
-  logLevelCount: CommonEntry[];
+  logLevelCount?: CommonEntry[];
+  vNodeList?: CommonEntry[];
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/15cd1c59/ambari-logsearch/ambari-logsearch-web/src/app/models/store.model.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/models/store.model.ts b/ambari-logsearch/ambari-logsearch-web/src/app/models/store.model.ts
index a6a084f..518e7cd 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/models/store.model.ts
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/models/store.model.ts
@@ -53,7 +53,7 @@ export interface AppStore {
   userConfigs: UserConfig[];
   filters: Filter[];
   clusters: string[];
-  components: string[];
+  components: Node[];
   serviceLogsFields: ServiceLogField[];
   auditLogsFields: AuditLogField[];
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/15cd1c59/ambari-logsearch/ambari-logsearch-web/src/app/services/component-generator.service.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/services/component-generator.service.ts b/ambari-logsearch/ambari-logsearch-web/src/app/services/component-generator.service.ts
index c49f40f..43755c0 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/services/component-generator.service.ts
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/services/component-generator.service.ts
@@ -18,13 +18,14 @@
 
 import {Injectable, ComponentFactoryResolver, ViewContainerRef} from '@angular/core';
 import {HostsService} from '@app/services/storage/hosts.service';
+import {ComponentsService} from '@app/services/storage/components.service';
 import {LogsContainerService} from '@app/services/logs-container.service';
 import {NodeBarComponent} from '@app/components/node-bar/node-bar.component';
 
 @Injectable()
 export class ComponentGeneratorService {
 
-  constructor(private resolver: ComponentFactoryResolver, private hostsStorage: HostsService, private logsContainer: LogsContainerService) {
+  constructor(private resolver: ComponentFactoryResolver, private hostsStorage: HostsService, private componentsStorage: ComponentsService, private logsContainer: LogsContainerService) {
   }
 
   private createComponent(type: any, container: ViewContainerRef, properties?: any): void {
@@ -54,4 +55,24 @@ export class ComponentGeneratorService {
     });
   }
 
+  getDataForComponentsNodeBar(componentName: string, container: ViewContainerRef): void {
+    let data;
+    this.componentsStorage.getAll().subscribe(components => {
+      if (container && components && components.length) {
+        const selectedHost = components.find(host => host.name === componentName);
+        data = selectedHost.logLevelCount.map(event => {
+          return {
+            color: this.logsContainer.colors[event.name],
+            value: event.value
+          };
+        });
+        if (data.length) {
+          this.createComponent(NodeBarComponent, container, {
+            data
+          });
+        }
+      }
+    });
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/15cd1c59/ambari-logsearch/ambari-logsearch-web/src/app/services/filtering.service.spec.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/services/filtering.service.spec.ts b/ambari-logsearch/ambari-logsearch-web/src/app/services/filtering.service.spec.ts
index e3f731e..c4db041 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/services/filtering.service.spec.ts
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/services/filtering.service.spec.ts
@@ -24,6 +24,8 @@ import {ComponentsService, components} from '@app/services/storage/components.se
 import {HostsService, hosts} from '@app/services/storage/hosts.service';
 import {UtilsService} from '@app/services/utils.service';
 import {HttpClientService} from '@app/services/http-client.service';
+import {ListItem} from '@app/classes/list-item.class';
+import {Node} from '@app/models/node.model';
 
 import {FilteringService} from './filtering.service';
 
@@ -64,4 +66,29 @@ describe('FilteringService', () => {
   it('should create service', inject([FilteringService], (service: FilteringService) => {
     expect(service).toBeTruthy();
   }));
+
+  describe('#getListItemFromString()', () => {
+    it('should convert string to ListItem', inject([FilteringService], (service: FilteringService) => {
+      const getListItemFromString: (name: string) => ListItem = service['getListItemFromString'];
+      expect(getListItemFromString('customName')).toEqual({
+        label: 'customName',
+        value: 'customName'
+      });
+    }));
+  });
+
+  describe('#getListItemFromNode()', () => {
+    it('should convert Node to ListItem', inject([FilteringService], (service: FilteringService) => {
+      const getListItemFromNode: (node: Node) => ListItem = service['getListItemFromNode'];
+      expect(getListItemFromNode({
+        name: 'customName',
+        value: '1',
+        isParent: true,
+        isRoot: true
+      })).toEqual({
+        label: 'customName (1)',
+        value: 'customName'
+      });
+    }));
+  });
 });

http://git-wip-us.apache.org/repos/asf/ambari/blob/15cd1c59/ambari-logsearch/ambari-logsearch-web/src/app/services/filtering.service.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/services/filtering.service.ts b/ambari-logsearch/ambari-logsearch-web/src/app/services/filtering.service.ts
index 0fff75d..7fe6517 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/services/filtering.service.ts
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/services/filtering.service.ts
@@ -18,12 +18,14 @@
 
 import {Injectable} from '@angular/core';
 import {FormControl, FormGroup} from '@angular/forms';
+import {Response} from '@angular/http';
 import {Subject} from 'rxjs/Subject';
 import {Observable} from 'rxjs/Observable';
 import 'rxjs/add/observable/timer';
 import 'rxjs/add/operator/takeUntil';
 import * as moment from 'moment-timezone';
 import {ListItem} from '@app/classes/list-item.class';
+import {Node} from '@app/models/node.model';
 import {AppSettingsService} from '@app/services/storage/app-settings.service';
 import {ClustersService} from '@app/services/storage/clusters.service';
 import {ComponentsService} from '@app/services/storage/components.service';
@@ -35,29 +37,41 @@ export class FilteringService {
 
   constructor(private httpClient: HttpClientService, private appSettings: AppSettingsService, private clustersStorage: ClustersService, private componentsStorage: ComponentsService, private hostsStorage: HostsService) {
     appSettings.getParameter('timeZone').subscribe(value => this.timeZone = value || this.defaultTimeZone);
-    clustersStorage.getAll().subscribe(clusters => {
-      this.filters.clusters.options = [...this.filters.clusters.options, ...clusters.map(this.getListItem)];
+    clustersStorage.getAll().subscribe((clusters: string[]): void => {
+      this.filters.clusters.options = [...this.filters.clusters.options, ...clusters.map(this.getListItemFromString)];
     });
-    componentsStorage.getAll().subscribe(components => {
-      this.filters.components.options = [...this.filters.components.options, ...components.map(this.getListItem)];
+    componentsStorage.getAll().subscribe((components: Node[]): void => {
+     this.filters.components.options = [...this.filters.components.options, ...components.map(this.getListItemFromNode)];
     });
-    hostsStorage.getAll().subscribe(hosts => {
-      this.filters.hosts.options = [...this.filters.hosts.options, ...hosts.map(host => {
-        return {
-          label: `${host.name} (${host.value})`,
-          value: host.name
-        };
-      })];
+    hostsStorage.getAll().subscribe((hosts: Node[]): void => {
+      this.filters.hosts.options = [...this.filters.hosts.options, ...hosts.map(this.getListItemFromNode)];
     });
   }
 
-  private getListItem(name: string): ListItem {
+  /**
+   * Get instance for dropdown list from string
+   * @param name {string}
+   * @returns {ListItem}
+   */
+  private getListItemFromString(name: string): ListItem {
     return {
       label: name,
       value: name
     };
   }
 
+  /**
+   * Get instance for dropdown list from Node object
+   * @param name {Node}
+   * @returns {ListItem}
+   */
+  private getListItemFromNode(node: Node): ListItem {
+    return {
+      label: `${node.name} (${node.value})`,
+      value: node.name
+    };
+  }
+
   private readonly defaultTimeZone = moment.tz.guess();
 
   private readonly paginationOptions = ['10', '25', '50', '100'];
@@ -423,7 +437,7 @@ export class FilteringService {
   }
 
   loadClusters(): void {
-    this.httpClient.get('clusters').subscribe(response => {
+    this.httpClient.get('clusters').subscribe((response: Response): void => {
       const clusterNames = response.json();
       if (clusterNames) {
         this.clustersStorage.addInstances(clusterNames);
@@ -432,18 +446,21 @@ export class FilteringService {
   }
 
   loadComponents(): void {
-    this.httpClient.get('components').subscribe(response => {
+    this.httpClient.get('components').subscribe((response: Response): void => {
       const jsonResponse = response.json(),
-        components = jsonResponse && jsonResponse.groupList;
+        components = jsonResponse && jsonResponse.vNodeList.map((item): Node => Object.assign(item, {
+            value: item.logLevelCount.reduce((currentValue: number, currentItem): number => {
+              return currentValue + Number(currentItem.value);
+            }, 0)
+          }));
       if (components) {
-        const componentNames = components.map(component => component.type);
-        this.componentsStorage.addInstances(componentNames);
+        this.componentsStorage.addInstances(components);
       }
     });
   }
 
   loadHosts(): void {
-    this.httpClient.get('hosts').subscribe(response => {
+    this.httpClient.get('hosts').subscribe((response: Response): void => {
       const jsonResponse = response.json(),
         hosts = jsonResponse && jsonResponse.vNodeList;
       if (hosts) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/15cd1c59/ambari-logsearch/ambari-logsearch-web/src/app/services/http-client.service.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/services/http-client.service.ts b/ambari-logsearch/ambari-logsearch-web/src/app/services/http-client.service.ts
index 44a5f6a..495f706 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/services/http-client.service.ts
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/services/http-client.service.ts
@@ -62,7 +62,7 @@ export class HttpClientService extends Http {
       params: opts => new ServiceLogsTruncatedQueryParams(opts)
     },
     components: {
-      url: 'service/logs/components'
+      url: 'service/logs/components/levels/counts'
     },
     clusters: {
       url: 'service/logs/clusters'


[25/50] [abbrv] ambari git commit: AMBARI-22122. ambari server silent setup for embedded database option fails sometime. (mpapirkovskyy)

Posted by jl...@apache.org.
AMBARI-22122. ambari server silent setup for embedded database option fails sometime. (mpapirkovskyy)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/fc58250f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/fc58250f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/fc58250f

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: fc58250f7b0b551e0077a047a79813ea3f9bd4e1
Parents: 1da7735
Author: Myroslav Papirkovskyi <mp...@hortonworks.com>
Authored: Wed Oct 4 23:26:18 2017 +0300
Committer: Myroslav Papirkovskyi <mp...@hortonworks.com>
Committed: Wed Oct 4 23:36:11 2017 +0300

----------------------------------------------------------------------
 .../src/main/python/ambari_server/dbConfiguration_linux.py  | 9 +++++----
 1 file changed, 5 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/fc58250f/ambari-server/src/main/python/ambari_server/dbConfiguration_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/python/ambari_server/dbConfiguration_linux.py b/ambari-server/src/main/python/ambari_server/dbConfiguration_linux.py
index 38dfa8c..2ac5325 100644
--- a/ambari-server/src/main/python/ambari_server/dbConfiguration_linux.py
+++ b/ambari-server/src/main/python/ambari_server/dbConfiguration_linux.py
@@ -354,6 +354,7 @@ class PGConfig(LinuxDBMSConfig):
 
   PG_ERROR_BLOCKED = "is being accessed by other users"
   PG_STATUS_RUNNING = None
+  PG_STATUS_STOPPED = "stopped"
   SERVICE_CMD = "/usr/bin/env service"
   PG_SERVICE_NAME = "postgresql"
   PG_HBA_DIR = None
@@ -611,12 +612,12 @@ class PGConfig(LinuxDBMSConfig):
     # on RHEL and SUSE PG_ST_COMD returns RC 0 for running and 3 for stoppped
     if retcode == 0:
       if out.strip() == "Running clusters:":
-        pg_status = "stopped"
+        pg_status = PGConfig.PG_STATUS_STOPPED
       else:
         pg_status = PGConfig.PG_STATUS_RUNNING
     else:
       if retcode == 3:
-        pg_status = "stopped"
+        pg_status = PGConfig.PG_STATUS_STOPPED
       else:
         pg_status = None
     return pg_status, retcode, out, err
@@ -750,7 +751,7 @@ class PGConfig(LinuxDBMSConfig):
     PGConfig._configure_postgresql_conf()
     #restart postgresql if already running
     pg_status, retcode, out, err = PGConfig._get_postgre_status()
-    if pg_status == PGConfig.PG_STATUS_RUNNING:
+    if pg_status != PGConfig.PG_STATUS_STOPPED:
       retcode, out, err = PGConfig._restart_postgres()
       return retcode, out, err
     return 0, "", ""
@@ -770,7 +771,7 @@ class PGConfig(LinuxDBMSConfig):
       process.kill()
       pg_status, retcode, out, err = PGConfig._get_postgre_status()
       # SUSE linux set status of stopped postgresql proc to unused
-      if pg_status == "unused" or pg_status == "stopped":
+      if pg_status == "unused" or pg_status == PGConfig.PG_STATUS_STOPPED:
         print_info_msg("PostgreSQL is stopped. Restarting ...")
         retcode, out, err = run_os_command(PGConfig.PG_START_CMD)
         return retcode, out, err


[46/50] [abbrv] ambari git commit: AMBARI-22147 Move stacks/HDP/3.0/kerberos.json to stacks/kerberos.json (dsen)

Posted by jl...@apache.org.
AMBARI-22147 Move stacks/HDP/3.0/kerberos.json to stacks/kerberos.json (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/2fb4649e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/2fb4649e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/2fb4649e

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 2fb4649e2a9381b98c4867ac4a550f7c3ff7ed23
Parents: 01b79aa
Author: Dmytro Sen <ds...@apache.org>
Authored: Fri Oct 6 19:39:24 2017 +0300
Committer: Dmytro Sen <ds...@apache.org>
Committed: Fri Oct 6 19:39:24 2017 +0300

----------------------------------------------------------------------
 .../server/api/services/AmbariMetaInfo.java     | 54 +++++++------
 .../server/controller/StackVersionResponse.java | 29 -------
 .../internal/StackArtifactResourceProvider.java | 17 +----
 .../ambari/server/stack/ServiceDirectory.java   |  3 +-
 .../ambari/server/stack/StackDirectory.java     | 27 -------
 .../apache/ambari/server/stack/StackModule.java |  6 --
 .../apache/ambari/server/state/ServiceInfo.java |  2 +-
 .../apache/ambari/server/state/StackInfo.java   | 23 ------
 ambari-server/src/main/resources/kerberos.json  | 79 ++++++++++++++++++++
 .../resources/stacks/HDP/2.0.6/kerberos.json    | 79 --------------------
 .../main/resources/stacks/HDP/3.0/kerberos.json | 79 --------------------
 .../server/api/services/AmbariMetaInfoTest.java | 19 +++--
 .../ambari/server/stack/StackManagerTest.java   |  8 --
 .../KerberosDescriptorUpdateHelperTest.java     |  1 +
 ambari-server/src/test/resources/kerberos.json  | 42 +++++++++++
 .../resources/stacks/HDP/2.0.8/kerberos.json    | 42 -----------
 16 files changed, 170 insertions(+), 340 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/2fb4649e/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
index 425d247..46ee65a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
@@ -21,7 +21,6 @@ package org.apache.ambari.server.api.services;
 import static org.apache.ambari.server.controller.spi.Resource.InternalType.Component;
 import static org.apache.ambari.server.controller.spi.Resource.InternalType.HostComponent;
 import static org.apache.ambari.server.controller.utilities.PropertyHelper.AGGREGATE_FUNCTION_IDENTIFIERS;
-import static org.apache.ambari.server.stack.StackDirectory.WIDGETS_DESCRIPTOR_FILE_NAME;
 
 import java.io.File;
 import java.io.FileReader;
@@ -106,6 +105,17 @@ public class AmbariMetaInfo {
    * Version of XML files with support of custom services and custom commands
    */
   public static final String SCHEMA_VERSION_2 = "2.0";
+
+  /**
+   * The filename for a Kerberos descriptor file at either the stack or service level
+   */
+  public static final String KERBEROS_DESCRIPTOR_FILE_NAME = "kerberos.json";
+
+  /**
+   * The filename for a Widgets descriptor file at either the stack or service level
+   */
+  public static final String WIDGETS_DESCRIPTOR_FILE_NAME = "widgets.json";
+
   private final static Logger LOG = LoggerFactory.getLogger(AmbariMetaInfo.class);
 
 
@@ -128,6 +138,7 @@ public class AmbariMetaInfo {
   private File serverVersionFile;
   private File commonWidgetsDescriptorFile;
   private File customActionRoot;
+  private String commonKerberosDescriptorFileLocation;
   private Map<String, VersionDefinitionXml> versionDefinitions = null;
 
 
@@ -217,6 +228,7 @@ public class AmbariMetaInfo {
 
     customActionRoot = new File(conf.getCustomActionDefinitionPath());
 
+    commonKerberosDescriptorFileLocation = new File(conf.getResourceDirPath(), KERBEROS_DESCRIPTOR_FILE_NAME).getAbsolutePath();
     commonWidgetsDescriptorFile = new File(conf.getResourceDirPath(), WIDGETS_DESCRIPTOR_FILE_NAME);
   }
 
@@ -1257,38 +1269,25 @@ public class AmbariMetaInfo {
   public KerberosDescriptor getKerberosDescriptor(String stackName, String stackVersion, boolean includePreconfigureData) throws AmbariException {
     StackInfo stackInfo = getStack(stackName, stackVersion);
 
-    KerberosDescriptor kerberosDescriptor = null;
+    KerberosDescriptor kerberosDescriptor = readKerberosDescriptorFromFile(getCommonKerberosDescriptorFileLocation());
 
+    if (kerberosDescriptor == null) {
+      LOG.warn("Couldn't read common Kerberos descriptor with path {%s}", getCommonKerberosDescriptorFileLocation());
+      kerberosDescriptor = new KerberosDescriptor();
+    }
     // Read in the stack-level Kerberos descriptor pre-configuration data
     if (includePreconfigureData) {
-      kerberosDescriptor = readKerberosDescriptorFromFile(stackInfo.getKerberosDescriptorPreConfigurationFileLocation());
+      KerberosDescriptor preConfigureKerberosDescriptor = readKerberosDescriptorFromFile(stackInfo.getKerberosDescriptorPreConfigurationFileLocation());
 
-      if (kerberosDescriptor != null) {
+      if (preConfigureKerberosDescriptor != null) {
         // Ensure the all services to be pre-configured are flagged appropriately.
-        Map<String, KerberosServiceDescriptor> serviceDescriptors = kerberosDescriptor.getServices();
+        Map<String, KerberosServiceDescriptor> serviceDescriptors = preConfigureKerberosDescriptor.getServices();
         if (serviceDescriptors != null) {
           for (KerberosServiceDescriptor serviceDescriptor : serviceDescriptors.values()) {
             serviceDescriptor.setPreconfigure(true);
           }
         }
-      }
-    }
-
-    // Read in the base stack-level Kerberos descriptor.
-    KerberosDescriptor stackKerberosDescriptor = readKerberosDescriptorFromFile(stackInfo.getKerberosDescriptorFileLocation());
-    if (stackKerberosDescriptor == null) {
-      // If kerberosDescriptor is null and stackKerberosDescriptor is null, then ensure
-      // kerberosDescriptor is an empty KerberosDescriptor.
-      if (kerberosDescriptor == null) {
-        kerberosDescriptor = new KerberosDescriptor();
-      }
-    } else {
-      if (kerberosDescriptor == null) {
-        // If kerberosDescriptor is null; then set it to stackKerberosDescriptor.
-        kerberosDescriptor = stackKerberosDescriptor;
-      } else {
-        // If kerberosDescriptor is not null; then update it using stackKerberosDescriptor.
-        kerberosDescriptor.update(stackKerberosDescriptor);
+        kerberosDescriptor.update(preConfigureKerberosDescriptor);
       }
     }
 
@@ -1314,6 +1313,15 @@ public class AmbariMetaInfo {
   }
 
   /**
+   * Gets the path to the common Kerberos descriptor file
+   *
+   * @return a String containing the path to the common Kerberos descriptor file
+   */
+  protected String getCommonKerberosDescriptorFileLocation() {
+    return commonKerberosDescriptorFileLocation;
+  }
+
+  /**
    * Gets the requested service-level Kerberos descriptor(s)
    * <p/>
    * An array of descriptors are returned since the kerberos.json in a service directory may contain

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fb4649e/ambari-server/src/main/java/org/apache/ambari/server/controller/StackVersionResponse.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/StackVersionResponse.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/StackVersionResponse.java
index 8fa0bf9..38b32d2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/StackVersionResponse.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/StackVersionResponse.java
@@ -42,13 +42,6 @@ public class StackVersionResponse implements Validable{
   private Map<String, Map<String, Map<String, String>>> configTypes;
 
   /**
-   * A File pointing to the stack-level Kerberos descriptor file
-   *
-   * This may be null if a relevant file is not available.
-   */
-  private File stackKerberosDescriptorFile;
-
-  /**
    * A Collection of Files pointing to the service-level Kerberos descriptor files
    *
    * This may be null or empty if no relevant files are available.
@@ -59,7 +52,6 @@ public class StackVersionResponse implements Validable{
   public StackVersionResponse(String stackVersion, String minUpgradeVersion,
                               boolean active, String parentVersion,
                               Map<String, Map<String, Map<String, String>>> configTypes,
-                              File stackKerberosDescriptorFile,
                               Collection<File> serviceKerberosDescriptorFiles,
                               Set<String> upgradePacks, boolean valid, Collection<String> errorSet, String minJdk, String maxJdk) {
     setStackVersion(stackVersion);
@@ -67,7 +59,6 @@ public class StackVersionResponse implements Validable{
     setActive(active);
     setParentVersion(parentVersion);
     setConfigTypes(configTypes);
-    setKerberosDescriptorFile(stackKerberosDescriptorFile);
     setServiceKerberosDescriptorFiles(serviceKerberosDescriptorFiles);
     setUpgradePacks(upgradePacks);
     setValid(valid);
@@ -176,26 +167,6 @@ public class StackVersionResponse implements Validable{
   }
 
   /**
-   * Gets a File pointing to the stack-level Kerberos descriptor
-   *
-   * @return a File pointing to the stack-level Kerberos descriptor, or null if no relevant file is
-   * available
-   */
-  @ApiModelProperty(hidden = true)
-  public File getStackKerberosDescriptorFile() {
-    return stackKerberosDescriptorFile;
-  }
-
-  /**
-   * Sets the stack-level Kerberos descriptor File
-   *
-   * @param stackKerberosDescriptorFile a File pointing to the stack-level Kerberos descriptor
-   */
-  public void setKerberosDescriptorFile(File stackKerberosDescriptorFile) {
-    this.stackKerberosDescriptorFile = stackKerberosDescriptorFile;
-  }
-
-  /**
    * Gets the Collection of Files pointing to the stack-specific service-level Kerberos descriptor
    * files
    *

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fb4649e/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java
index a7f7710..6bb421b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java
@@ -483,7 +483,7 @@ public class StackArtifactResourceProvider extends AbstractControllerResourcePro
   private Map<String, Object> buildStackDescriptor(String stackName, String stackVersion)
       throws NoSuchParentResourceException, IOException {
 
-    KerberosDescriptor kerberosDescriptor = null;
+    KerberosDescriptor kerberosDescriptor = new KerberosDescriptor();
 
     AmbariManagementController controller = getManagementController();
     StackInfo stackInfo;
@@ -496,19 +496,8 @@ public class StackArtifactResourceProvider extends AbstractControllerResourcePro
 
     Collection<KerberosServiceDescriptor> serviceDescriptors = getServiceDescriptors(stackInfo);
 
-    String kerberosFileLocation = stackInfo.getKerberosDescriptorFileLocation();
-    if (kerberosFileLocation != null) {
-      kerberosDescriptor = kerberosDescriptorFactory.createInstance(new File(kerberosFileLocation));
-    } else if (! serviceDescriptors.isEmpty()) {
-      // service descriptors present with no stack descriptor,
-      // create an empty stack descriptor to hold services
-      kerberosDescriptor = new KerberosDescriptor();
-    }
-
-    if (kerberosDescriptor != null) {
-      for (KerberosServiceDescriptor descriptor : serviceDescriptors) {
-        kerberosDescriptor.putService(descriptor);
-      }
+    if (serviceDescriptors != null) {
+      serviceDescriptors.forEach(kerberosDescriptor::putService);
       return kerberosDescriptor.toMap();
     } else {
       return null;

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fb4649e/ambari-server/src/main/java/org/apache/ambari/server/stack/ServiceDirectory.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/ServiceDirectory.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/ServiceDirectory.java
index ae59b3f..119163e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/ServiceDirectory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/ServiceDirectory.java
@@ -24,6 +24,7 @@ import java.util.HashMap;
 import java.util.Map;
 
 import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.state.ServiceInfo;
 import org.apache.ambari.server.state.stack.ServiceMetainfoXml;
 import org.apache.ambari.server.state.stack.StackRoleCommandOrder;
@@ -259,7 +260,7 @@ public abstract class ServiceDirectory extends StackDefinitionDirectory {
     File af = new File(directory, StackDirectory.SERVICE_ALERT_FILE_NAME);
     alertsFile = af.exists() ? af : null;
 
-    File kdf = new File(directory, StackDirectory.KERBEROS_DESCRIPTOR_FILE_NAME);
+    File kdf = new File(directory, AmbariMetaInfo.KERBEROS_DESCRIPTOR_FILE_NAME);
     kerberosDescriptorFile = kdf.exists() ? kdf : null;
 
     File rco = new File(directory, StackDirectory.RCO_FILE_NAME);

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fb4649e/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
index e3c586b..daf8e7c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
@@ -57,14 +57,6 @@ public class StackDirectory extends StackDefinitionDirectory {
   public static final String SERVICE_ALERT_FILE_NAME = "alerts.json";
   public static final String SERVICE_ADVISOR_FILE_NAME = "service_advisor.py";
   /**
-   * The filename for a Kerberos descriptor file at either the stack or service level
-   */
-  public static final String KERBEROS_DESCRIPTOR_FILE_NAME = "kerberos.json";
-  /**
-   * The filename for a Widgets descriptor file at either the stack or service level
-   */
-  public static final String WIDGETS_DESCRIPTOR_FILE_NAME = "widgets.json";
-  /**
    * The filename for a Kerberos descriptor preconfigure file at either the stack or service level
    */
   public static final String KERBEROS_DESCRIPTOR_PRECONFIGURE_FILE_NAME = "kerberos_preconfigure.json";
@@ -84,11 +76,6 @@ public class StackDirectory extends StackDefinitionDirectory {
   private String rcoFilePath;
 
   /**
-   * kerberos descriptor file path
-   */
-  private String kerberosDescriptorFilePath;
-
-  /**
    * kerberos descriptor (preconfigure) file path
    */
   private String kerberosDescriptorPreconfigureFilePath;
@@ -210,15 +197,6 @@ public class StackDirectory extends StackDefinitionDirectory {
   }
 
   /**
-   * Obtain the path to the (stack-level) Kerberos descriptor file
-   *
-   * @return the path to the (stack-level) Kerberos descriptor file
-   */
-  public String getKerberosDescriptorFilePath() {
-    return kerberosDescriptorFilePath;
-  }
-
-  /**
    * Obtain the path to the (stack-level) Kerberos descriptor pre-configuration file
    *
    * @return the path to the (stack-level) Kerberos descriptor pre-configuration file
@@ -300,11 +278,6 @@ public class StackDirectory extends StackDefinitionDirectory {
       rcoFilePath = getAbsolutePath() + File.separator + RCO_FILE_NAME;
     }
 
-    if (subDirs.contains(KERBEROS_DESCRIPTOR_FILE_NAME)) {
-      // kerberosDescriptorFilePath is expected to be absolute
-      kerberosDescriptorFilePath = getAbsolutePath() + File.separator + KERBEROS_DESCRIPTOR_FILE_NAME;
-    }
-
     if (subDirs.contains(KERBEROS_DESCRIPTOR_PRECONFIGURE_FILE_NAME)) {
       // kerberosDescriptorPreconfigureFilePath is expected to be absolute
       kerberosDescriptorPreconfigureFilePath = getAbsolutePath() + File.separator + KERBEROS_DESCRIPTOR_PRECONFIGURE_FILE_NAME;

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fb4649e/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
index 71235f3..e88bbf2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
@@ -284,11 +284,6 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
     mergeConfigurations(parentStack, allStacks, commonServices, extensions);
     mergeRoleCommandOrder(parentStack);
 
-    // grab stack level kerberos.json from parent stack
-    if (stackInfo.getKerberosDescriptorFileLocation() == null) {
-      stackInfo.setKerberosDescriptorFileLocation(parentStack.getModuleInfo().getKerberosDescriptorFileLocation());
-    }
-
     // grab stack level kerberos_preconfigure.json from parent stack
     if (stackInfo.getKerberosDescriptorPreConfigurationFileLocation() == null) {
       stackInfo.setKerberosDescriptorPreConfigurationFileLocation(parentStack.getModuleInfo().getKerberosDescriptorPreConfigurationFileLocation());
@@ -567,7 +562,6 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
       stackInfo.setActive(smx.getVersion().isActive());
       stackInfo.setParentStackVersion(smx.getExtends());
       stackInfo.setRcoFileLocation(stackDirectory.getRcoFilePath());
-      stackInfo.setKerberosDescriptorFileLocation(stackDirectory.getKerberosDescriptorFilePath());
       stackInfo.setKerberosDescriptorPreConfigurationFileLocation(stackDirectory.getKerberosDescriptorPreconfigureFilePath());
       stackInfo.setUpgradesFolder(stackDirectory.getUpgradesDir());
       stackInfo.setUpgradePacks(stackDirectory.getUpgradePacks());

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fb4649e/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceInfo.java
index ef18bd9..8fe6583 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceInfo.java
@@ -134,7 +134,7 @@ public class ServiceInfo implements Validable{
   private String parent;
 
   @XmlElement(name = "widgetsFileName")
-  private String widgetsFileName = StackDirectory.WIDGETS_DESCRIPTOR_FILE_NAME;
+  private String widgetsFileName = AmbariMetaInfo.WIDGETS_DESCRIPTOR_FILE_NAME;
 
   @XmlElement(name = "metricsFileName")
   private String metricsFileName = StackDirectory.SERVICE_METRIC_FILE_NAME;

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fb4649e/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
index 3efc997..c32e907 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
@@ -325,9 +325,6 @@ public class StackInfo implements Comparable<StackInfo>, Validable {
 
   public StackVersionResponse convertToResponse() {
 
-    // Get the stack-level Kerberos descriptor file path
-    String stackDescriptorFileFilePath = getKerberosDescriptorFileLocation();
-
     // Collect the services' Kerberos descriptor files
     Collection<ServiceInfo> serviceInfos = getServices();
     // The collection of service descriptor files. A Set is being used because some Kerberos descriptor
@@ -345,7 +342,6 @@ public class StackInfo implements Comparable<StackInfo>, Validable {
 
     return new StackVersionResponse(getVersion(), getMinUpgradeVersion(),
         isActive(), getParentStackVersion(), getConfigTypeAttributes(),
-        (stackDescriptorFileFilePath == null) ? null : new File(stackDescriptorFileFilePath),
         serviceDescriptorFiles,
         null == upgradePacks ? Collections.emptySet() : upgradePacks.keySet(),
         isValid(), getErrors(), getMinJdk(), getMaxJdk());
@@ -392,25 +388,6 @@ public class StackInfo implements Comparable<StackInfo>, Validable {
   }
 
   /**
-   * Gets the path to the stack-level Kerberos descriptor file
-   *
-   * @return a String containing the path to the stack-level Kerberos descriptor file
-   */
-  public String getKerberosDescriptorFileLocation() {
-    return kerberosDescriptorFileLocation;
-  }
-
-  /**
-   * Sets the path to the stack-level Kerberos descriptor file
-   *
-   * @param kerberosDescriptorFileLocation a String containing the path to the stack-level Kerberos
-   *                                       descriptor file
-   */
-  public void setKerberosDescriptorFileLocation(String kerberosDescriptorFileLocation) {
-    this.kerberosDescriptorFileLocation = kerberosDescriptorFileLocation;
-  }
-
-  /**
    * Gets the path to the stack-level Kerberos descriptor pre-configuration file
    *
    * @return a String containing the path to the stack-level Kerberos descriptor pre-configuration file

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fb4649e/ambari-server/src/main/resources/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/kerberos.json b/ambari-server/src/main/resources/kerberos.json
new file mode 100644
index 0000000..3787db1
--- /dev/null
+++ b/ambari-server/src/main/resources/kerberos.json
@@ -0,0 +1,79 @@
+{
+  "properties": {
+    "realm": "${kerberos-env/realm}",
+    "keytab_dir": "/etc/security/keytabs",
+    "additional_realms": "",
+    "principal_suffix": "-${cluster_name|toLower()}"
+  },
+  "identities": [
+    {
+      "name": "spnego",
+      "principal": {
+        "value": "HTTP/_HOST@${realm}",
+        "type": "service"
+      },
+      "keytab": {
+        "file": "${keytab_dir}/spnego.service.keytab",
+        "owner": {
+          "name": "root",
+          "access": "r"
+        },
+        "group": {
+          "name": "${cluster-env/user_group}",
+          "access": "r"
+        }
+      }
+    },
+    {
+      "name": "smokeuser",
+      "principal": {
+        "value": "${cluster-env/smokeuser}${principal_suffix}@${realm}",
+        "type": "user",
+        "configuration": "cluster-env/smokeuser_principal_name",
+        "local_username": "${cluster-env/smokeuser}"
+      },
+      "keytab": {
+        "file": "${keytab_dir}/smokeuser.headless.keytab",
+        "owner": {
+          "name": "${cluster-env/smokeuser}",
+          "access": "r"
+        },
+        "group": {
+          "name": "${cluster-env/user_group}",
+          "access": "r"
+        },
+        "configuration": "cluster-env/smokeuser_keytab"
+      }
+    }
+  ],
+  "services": [
+    {
+      "name": "AMBARI",
+      "components": [
+        {
+          "name": "AMBARI_SERVER",
+          "identities": [
+            {
+              "name": "ambari-server",
+              "principal": {
+                "value": "ambari-server${principal_suffix}@${realm}",
+                "type": "user",
+                "configuration": "cluster-env/ambari_principal_name"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/ambari.server.keytab",
+                "owner": {
+                  "access": "r"
+                }
+              }
+            },
+            {
+              "name" : "ambari-server_spnego",
+              "reference" : "/spnego"
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fb4649e/ambari-server/src/main/resources/stacks/HDP/2.0.6/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/kerberos.json
deleted file mode 100644
index 3787db1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/kerberos.json
+++ /dev/null
@@ -1,79 +0,0 @@
-{
-  "properties": {
-    "realm": "${kerberos-env/realm}",
-    "keytab_dir": "/etc/security/keytabs",
-    "additional_realms": "",
-    "principal_suffix": "-${cluster_name|toLower()}"
-  },
-  "identities": [
-    {
-      "name": "spnego",
-      "principal": {
-        "value": "HTTP/_HOST@${realm}",
-        "type": "service"
-      },
-      "keytab": {
-        "file": "${keytab_dir}/spnego.service.keytab",
-        "owner": {
-          "name": "root",
-          "access": "r"
-        },
-        "group": {
-          "name": "${cluster-env/user_group}",
-          "access": "r"
-        }
-      }
-    },
-    {
-      "name": "smokeuser",
-      "principal": {
-        "value": "${cluster-env/smokeuser}${principal_suffix}@${realm}",
-        "type": "user",
-        "configuration": "cluster-env/smokeuser_principal_name",
-        "local_username": "${cluster-env/smokeuser}"
-      },
-      "keytab": {
-        "file": "${keytab_dir}/smokeuser.headless.keytab",
-        "owner": {
-          "name": "${cluster-env/smokeuser}",
-          "access": "r"
-        },
-        "group": {
-          "name": "${cluster-env/user_group}",
-          "access": "r"
-        },
-        "configuration": "cluster-env/smokeuser_keytab"
-      }
-    }
-  ],
-  "services": [
-    {
-      "name": "AMBARI",
-      "components": [
-        {
-          "name": "AMBARI_SERVER",
-          "identities": [
-            {
-              "name": "ambari-server",
-              "principal": {
-                "value": "ambari-server${principal_suffix}@${realm}",
-                "type": "user",
-                "configuration": "cluster-env/ambari_principal_name"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/ambari.server.keytab",
-                "owner": {
-                  "access": "r"
-                }
-              }
-            },
-            {
-              "name" : "ambari-server_spnego",
-              "reference" : "/spnego"
-            }
-          ]
-        }
-      ]
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fb4649e/ambari-server/src/main/resources/stacks/HDP/3.0/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/3.0/kerberos.json
deleted file mode 100644
index 3787db1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/kerberos.json
+++ /dev/null
@@ -1,79 +0,0 @@
-{
-  "properties": {
-    "realm": "${kerberos-env/realm}",
-    "keytab_dir": "/etc/security/keytabs",
-    "additional_realms": "",
-    "principal_suffix": "-${cluster_name|toLower()}"
-  },
-  "identities": [
-    {
-      "name": "spnego",
-      "principal": {
-        "value": "HTTP/_HOST@${realm}",
-        "type": "service"
-      },
-      "keytab": {
-        "file": "${keytab_dir}/spnego.service.keytab",
-        "owner": {
-          "name": "root",
-          "access": "r"
-        },
-        "group": {
-          "name": "${cluster-env/user_group}",
-          "access": "r"
-        }
-      }
-    },
-    {
-      "name": "smokeuser",
-      "principal": {
-        "value": "${cluster-env/smokeuser}${principal_suffix}@${realm}",
-        "type": "user",
-        "configuration": "cluster-env/smokeuser_principal_name",
-        "local_username": "${cluster-env/smokeuser}"
-      },
-      "keytab": {
-        "file": "${keytab_dir}/smokeuser.headless.keytab",
-        "owner": {
-          "name": "${cluster-env/smokeuser}",
-          "access": "r"
-        },
-        "group": {
-          "name": "${cluster-env/user_group}",
-          "access": "r"
-        },
-        "configuration": "cluster-env/smokeuser_keytab"
-      }
-    }
-  ],
-  "services": [
-    {
-      "name": "AMBARI",
-      "components": [
-        {
-          "name": "AMBARI_SERVER",
-          "identities": [
-            {
-              "name": "ambari-server",
-              "principal": {
-                "value": "ambari-server${principal_suffix}@${realm}",
-                "type": "user",
-                "configuration": "cluster-env/ambari_principal_name"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/ambari.server.keytab",
-                "owner": {
-                  "access": "r"
-                }
-              }
-            },
-            {
-              "name" : "ambari-server_spnego",
-              "reference" : "/spnego"
-            }
-          ]
-        }
-      ]
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fb4649e/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
index 25e8d04..9285526 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
@@ -151,11 +151,12 @@ public class AmbariMetaInfoTest {
   public static void beforeClass() throws Exception {
     File stacks = new File("src/test/resources/stacks");
     File version = new File("src/test/resources/version");
+    File resourcesRoot = new File("src/test/resources/");
     if (System.getProperty("os.name").contains("Windows")) {
       stacks = new File(ClassLoader.getSystemClassLoader().getResource("stacks").getPath());
       version = new File(new File(ClassLoader.getSystemClassLoader().getResource("").getPath()).getParent(), "version");
     }
-    metaInfo = createAmbariMetaInfo(stacks, version);
+    metaInfo = createAmbariMetaInfo(stacks, version, resourcesRoot);
   }
 
   @AfterClass
@@ -450,7 +451,7 @@ public class AmbariMetaInfoTest {
       f3.createNewFile();
     }
 
-    AmbariMetaInfo ambariMetaInfo = createAmbariMetaInfo(stackRootTmp, version);
+    AmbariMetaInfo ambariMetaInfo = createAmbariMetaInfo(stackRootTmp, version, new File(""));
 
     // Tests the stack is loaded as expected
     getServices();
@@ -737,7 +738,7 @@ public class AmbariMetaInfoTest {
     LOG.info("Stacks file " + stackRoot.getAbsolutePath());
 
 
-    TestAmbariMetaInfo ambariMetaInfo = createAmbariMetaInfo(stackRoot, version);
+    TestAmbariMetaInfo ambariMetaInfo = createAmbariMetaInfo(stackRoot, version, new File(""));
     Assert.assertEquals(1, ambariMetaInfo.getStackManager().getStacks().size());
     Assert.assertEquals(false, ambariMetaInfo.getStackManager().getStack("HDP", "0.1").isValid());
     Assert.assertEquals(2, ambariMetaInfo.getStackManager().getStack("HDP", "0.1").getErrors().size());
@@ -1820,8 +1821,7 @@ public class AmbariMetaInfoTest {
 
   @Test
   public void testReadKerberosDescriptorFromFile() throws AmbariException {
-    StackInfo stackInfo = metaInfo.getStack(STACK_NAME_HDP, "2.0.8");
-    String path = stackInfo.getKerberosDescriptorFileLocation();
+    String path = metaInfo.getCommonKerberosDescriptorFileLocation();
     KerberosDescriptor descriptor = metaInfo.readKerberosDescriptorFromFile(path);
 
     Assert.assertNotNull(descriptor);
@@ -1899,7 +1899,7 @@ public class AmbariMetaInfoTest {
     File widgetsFile = metaInfo.getCommonWidgetsDescriptorFile();
 
     Assert.assertNotNull(widgetsFile);
-    Assert.assertEquals("/var/lib/ambari-server/resources/widgets.json", widgetsFile.getPath());
+    Assert.assertEquals("src/test/resources/widgets.json", widgetsFile.getPath());
   }
 
   private File getStackRootTmp(String buildDir) {
@@ -1937,16 +1937,17 @@ public class AmbariMetaInfoTest {
   private TestAmbariMetaInfo setupTempAmbariMetaInfoExistingDirs(String buildDir) throws Exception {
     File version = getVersion();
     File stackRootTmp = getStackRootTmp(buildDir);
-    TestAmbariMetaInfo ambariMetaInfo = createAmbariMetaInfo(stackRootTmp, version);
+    TestAmbariMetaInfo ambariMetaInfo = createAmbariMetaInfo(stackRootTmp, version, new File(""));
     return ambariMetaInfo;
   }
 
   private static TestAmbariMetaInfo createAmbariMetaInfo(File stackRoot,
-    File versionFile) throws Exception {
+    File versionFile, File resourcesRoot) throws Exception {
 
     Properties properties = new Properties();
     properties.setProperty(Configuration.METADATA_DIR_PATH.getKey(), stackRoot.getPath());
     properties.setProperty(Configuration.SERVER_VERSION_FILE.getKey(), versionFile.getPath());
+    properties.setProperty(Configuration.RESOURCES_DIR.getKey(), resourcesRoot.getPath());
     Configuration configuration = new Configuration(properties);
 
     TestAmbariMetaInfo metaInfo = new TestAmbariMetaInfo(configuration);
@@ -2056,9 +2057,11 @@ public class AmbariMetaInfoTest {
       Configuration config = createNiceMock(Configuration.class);
       if (System.getProperty("os.name").contains("Windows")) {
         expect(config.getSharedResourcesDirPath()).andReturn(ClassLoader.getSystemClassLoader().getResource("").getPath()).anyTimes();
+        expect(config.getResourceDirPath()).andReturn(ClassLoader.getSystemClassLoader().getResource("").getPath()).anyTimes();
       }
       else {
         expect(config.getSharedResourcesDirPath()).andReturn("./src/test/resources").anyTimes();
+        expect(config.getResourceDirPath()).andReturn("./src/test/resources").anyTimes();
       }
 
       replay(config);

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fb4649e/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
index f53e111..b8f513c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
@@ -723,14 +723,6 @@ public class StackManagerTest {
 
   }
 
-  @Test
-  public void testInheritKerberosDescriptor() throws Exception {
-    StackInfo stack = stackManager.getStack("HDP", "2.1.1");
-    String stacksFolder = ClassLoader.getSystemClassLoader().getResource("stacks").getPath();
-    assertEquals(new File(stacksFolder, "HDP/2.0.8/kerberos.json").getAbsolutePath(),
-        stack.getKerberosDescriptorFileLocation());
-  }
-
   /**
    * Tests that {@link UpgradePack} and {@link ConfigUpgradePack} instances are correctly initialized
    * post-unmarshalling.

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fb4649e/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelperTest.java
index 293e050..a3d13b1 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelperTest.java
@@ -66,6 +66,7 @@ public class KerberosDescriptorUpdateHelperTest extends EasyMockSupport {
         properties.put("common.services.path", "src/main/resources/common-services");
         properties.put("server.version.file", "target/version");
         properties.put("custom.action.definitions", "/tmp/nofile");
+        properties.put("resources.dir", "src/main/resources");
         Configuration configuration = new Configuration(properties);
 
         install(new FactoryModuleBuilder().build(StackManagerFactory.class));

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fb4649e/ambari-server/src/test/resources/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/kerberos.json b/ambari-server/src/test/resources/kerberos.json
new file mode 100644
index 0000000..14eefbf
--- /dev/null
+++ b/ambari-server/src/test/resources/kerberos.json
@@ -0,0 +1,42 @@
+{
+  "properties": {
+    "realm": "${cluster-env/kerberos_domain}",
+    "keytab_dir": "/etc/security/keytabs",
+    "additional_realms": ""
+  },
+  "identities": [
+    {
+      "name": "spnego",
+      "principal": {
+        "value": "HTTP/_HOST@${realm}",
+        "type" : "service"
+      },
+      "keytab": {
+        "file": "${keytab_dir}/spnego.service.keytab",
+        "owner": {
+          "name": "root",
+          "access": "r"
+        },
+        "group": {
+          "name": "${cluster-env/user_group}",
+          "access": "r"
+        }
+      }
+    }
+  ],
+  "auth_to_local_properties" : [
+    "hadoop.security.auth_to_local"
+  ],
+  "configurations": [
+    {
+      "core-site": {
+        "hadoop.security.authentication": "kerberos",
+        "hadoop.rpc.protection": "authentication",
+        "hadoop.security.authorization": "true",
+        "hadoop.security.auth_to_local": "",
+        "hadoop.proxyuser.superuser.hosts": "",
+        "hadoop.proxyuser.superuser.groups": ""
+      }
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/2fb4649e/ambari-server/src/test/resources/stacks/HDP/2.0.8/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.8/kerberos.json b/ambari-server/src/test/resources/stacks/HDP/2.0.8/kerberos.json
deleted file mode 100644
index 14eefbf..0000000
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.8/kerberos.json
+++ /dev/null
@@ -1,42 +0,0 @@
-{
-  "properties": {
-    "realm": "${cluster-env/kerberos_domain}",
-    "keytab_dir": "/etc/security/keytabs",
-    "additional_realms": ""
-  },
-  "identities": [
-    {
-      "name": "spnego",
-      "principal": {
-        "value": "HTTP/_HOST@${realm}",
-        "type" : "service"
-      },
-      "keytab": {
-        "file": "${keytab_dir}/spnego.service.keytab",
-        "owner": {
-          "name": "root",
-          "access": "r"
-        },
-        "group": {
-          "name": "${cluster-env/user_group}",
-          "access": "r"
-        }
-      }
-    }
-  ],
-  "auth_to_local_properties" : [
-    "hadoop.security.auth_to_local"
-  ],
-  "configurations": [
-    {
-      "core-site": {
-        "hadoop.security.authentication": "kerberos",
-        "hadoop.rpc.protection": "authentication",
-        "hadoop.security.authorization": "true",
-        "hadoop.security.auth_to_local": "",
-        "hadoop.proxyuser.superuser.hosts": "",
-        "hadoop.proxyuser.superuser.groups": ""
-      }
-    }
-  ]
-}


[34/50] [abbrv] ambari git commit: AMBARI-22124. Refactor AMS logic in stack advisors to service advisors.(vbrodetskyi)

Posted by jl...@apache.org.
AMBARI-22124. Refactor AMS logic in stack advisors to service advisors.(vbrodetskyi)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/0f32765d
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/0f32765d
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/0f32765d

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 0f32765dc2250044c7925f4e68e6f61b7a77d8f8
Parents: 9adfcdc
Author: Vitaly Brodetskyi <vb...@hortonworks.com>
Authored: Fri Oct 6 10:40:33 2017 +0300
Committer: Vitaly Brodetskyi <vb...@hortonworks.com>
Committed: Fri Oct 6 10:40:33 2017 +0300

----------------------------------------------------------------------
 .../AMBARI_METRICS/0.1.0/service_advisor.py     | 787 +++++++++++++++++++
 .../ATLAS/0.7.0.3.0/service_advisor.py          |   5 +-
 .../stacks/HDP/2.0.6/services/stack_advisor.py  | 542 +------------
 .../stacks/HDP/2.2/services/stack_advisor.py    |   1 -
 .../AMBARI_METRICS/test_service_advisor.py      | 596 ++++++++++++++
 .../stacks/2.0.6/common/test_stack_advisor.py   | 576 --------------
 .../stacks/2.2/common/test_stack_advisor.py     | 511 ------------
 7 files changed, 1388 insertions(+), 1630 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/0f32765d/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/service_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/service_advisor.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/service_advisor.py
new file mode 100644
index 0000000..eae98bf
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/service_advisor.py
@@ -0,0 +1,787 @@
+#!/usr/bin/env ambari-python-wrap
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+# Python imports
+import imp
+import re
+import os
+import sys
+import socket
+import traceback
+from math import ceil, floor, log
+
+
+from resource_management.core.logger import Logger
+
+SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
+STACKS_DIR = os.path.join(SCRIPT_DIR, '../../../stacks/')
+PARENT_FILE = os.path.join(STACKS_DIR, 'service_advisor.py')
+
+#split points
+metricsDir = os.path.join(SCRIPT_DIR, 'package')
+print "METRICS_DIR=>" + str(metricsDir)
+serviceMetricsDir = os.path.join(metricsDir, 'files', 'service-metrics')
+customServiceMetricsDir = os.path.join(SCRIPT_DIR, '../../../dashboards/service-metrics')
+sys.path.append(os.path.join(metricsDir, 'scripts'))
+
+from split_points import FindSplitPointsForAMSRegions
+
+try:
+  with open(PARENT_FILE, 'rb') as fp:
+    service_advisor = imp.load_module('service_advisor', fp, PARENT_FILE, ('.py', 'rb', imp.PY_SOURCE))
+except Exception as e:
+  traceback.print_exc()
+  print "Failed to load parent"
+
+class AMBARI_METRICSServiceAdvisor(service_advisor.ServiceAdvisor):
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(AMBARI_METRICSServiceAdvisor, self)
+    self.as_super.__init__(*args, **kwargs)
+
+    # Always call these methods
+    self.modifyMastersWithMultipleInstances()
+    self.modifyCardinalitiesDict()
+    self.modifyHeapSizeProperties()
+    self.modifyNotValuableComponents()
+    self.modifyComponentsNotPreferableOnServer()
+    self.modifyComponentLayoutSchemes()
+
+  def modifyMastersWithMultipleInstances(self):
+    """
+    Modify the set of masters with multiple instances.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyCardinalitiesDict(self):
+    """
+    Modify the dictionary of cardinalities.
+    Must be overriden in child class.
+    """
+    min_val = 1
+
+    self.cardinalitiesDict.update(
+      {
+        'METRICS_COLLECTOR': {"min": min_val}
+      }
+    )
+
+  def modifyHeapSizeProperties(self):
+    """
+    Modify the dictionary of heap size properties.
+    Must be overriden in child class.
+    """
+    self.heap_size_properties = {"METRICS_COLLECTOR":
+                                   [{"config-name": "ams-hbase-env",
+                                     "property": "hbase_master_heapsize",
+                                     "default": "1024m"},
+                                    {"config-name": "ams-hbase-env",
+                                     "property": "hbase_regionserver_heapsize",
+                                     "default": "1024m"},
+                                    {"config-name": "ams-env",
+                                     "property": "metrics_collector_heapsize",
+                                     "default": "512m"}]}
+
+
+  def modifyNotValuableComponents(self):
+    """
+    Modify the set of components whose host assignment is based on other services.
+    Must be overriden in child class.
+    """
+    self.notValuableComponents |= set(['METRICS_MONITOR'])
+
+  def modifyComponentsNotPreferableOnServer(self):
+    """
+    Modify the set of components that are not preferable on the server.
+    Must be overriden in child class.
+    """
+    self.notPreferableOnServerComponents |= set(['METRICS_COLLECTOR'])
+
+
+  def modifyComponentLayoutSchemes(self):
+    """
+    Modify layout scheme dictionaries for components.
+    The scheme dictionary basically maps the number of hosts to
+    host index where component should exist.
+    Must be overriden in child class.
+    """
+    self.componentLayoutSchemes.update({'METRICS_COLLECTOR': {3: 2, 6: 2, 31: 3, "else": 5}})
+
+
+  def getServiceComponentLayoutValidations(self, services, hosts):
+    """
+    Get a list of errors.
+    Must be overriden in child class.
+    """
+
+    return []
+
+  def getAmsMemoryRecommendation(self, services, hosts):
+    # MB per sink in hbase heapsize
+    HEAP_PER_MASTER_COMPONENT = 50
+    HEAP_PER_SLAVE_COMPONENT = 10
+
+    schMemoryMap = {
+      "HDFS": {
+        "NAMENODE": HEAP_PER_MASTER_COMPONENT,
+        "SECONDARY_NAMENODE": HEAP_PER_MASTER_COMPONENT,
+        "DATANODE": HEAP_PER_SLAVE_COMPONENT
+      },
+      "YARN": {
+        "RESOURCEMANAGER": HEAP_PER_MASTER_COMPONENT,
+        "NODEMANAGER": HEAP_PER_SLAVE_COMPONENT,
+        "HISTORYSERVER" : HEAP_PER_MASTER_COMPONENT,
+        "APP_TIMELINE_SERVER": HEAP_PER_MASTER_COMPONENT
+      },
+      "HBASE": {
+        "HBASE_MASTER": HEAP_PER_MASTER_COMPONENT,
+        "HBASE_REGIONSERVER": HEAP_PER_SLAVE_COMPONENT
+      },
+      "HIVE": {
+        "HIVE_METASTORE": HEAP_PER_MASTER_COMPONENT,
+        "HIVE_SERVER": HEAP_PER_MASTER_COMPONENT
+      },
+      "KAFKA": {
+        "KAFKA_BROKER": HEAP_PER_MASTER_COMPONENT
+      },
+      "FLUME": {
+        "FLUME_HANDLER": HEAP_PER_SLAVE_COMPONENT
+      },
+      "STORM": {
+        "NIMBUS": HEAP_PER_MASTER_COMPONENT,
+      },
+      "AMBARI_METRICS": {
+        "METRICS_COLLECTOR": HEAP_PER_MASTER_COMPONENT,
+        "METRICS_MONITOR": HEAP_PER_SLAVE_COMPONENT
+      },
+      "ACCUMULO": {
+        "ACCUMULO_MASTER": HEAP_PER_MASTER_COMPONENT,
+        "ACCUMULO_TSERVER": HEAP_PER_SLAVE_COMPONENT
+      },
+      "LOGSEARCH": {
+        "LOGSEARCH_LOGFEEDER" : HEAP_PER_SLAVE_COMPONENT
+      }
+    }
+    total_sinks_count = 0
+    # minimum heap size
+    hbase_heapsize = 500
+    for serviceName, componentsDict in schMemoryMap.items():
+      for componentName, multiplier in componentsDict.items():
+        schCount = len(
+          self.getHostsWithComponent(serviceName, componentName, services,
+                                     hosts))
+        hbase_heapsize += int((schCount * multiplier))
+        total_sinks_count += schCount
+    collector_heapsize = int(hbase_heapsize/3 if hbase_heapsize > 2048 else 512)
+    hbase_heapsize = min(hbase_heapsize, 32768)
+
+    return self.round_to_n(collector_heapsize), self.round_to_n(hbase_heapsize), total_sinks_count
+
+
+  def round_to_n(self, mem_size, n=128):
+    return int(round(float(mem_size) / float(n))) * int(n)
+
+
+  def getServiceConfigurationRecommendations(self, configurations, clusterData, services, hosts):
+    """
+    Entry point.
+    Must be overriden in child class.
+    """
+    #Logger.info("Class: %s, Method: %s. Recommending Service Configurations." %
+    #            (self.__class__.__name__, inspect.stack()[0][3]))
+
+    recommender = AMBARI_METRICSRecommender()
+    recommender.recommendAmsConfigurationsFromHDP206(configurations, clusterData, services, hosts)
+
+
+
+
+  def getServiceConfigurationsValidationItems(self, configurations, recommendedDefaults, services, hosts):
+    """
+    Entry point.
+    Validate configurations for the service. Return a list of errors.
+    The code for this function should be the same for each Service Advisor.
+    """
+    #Logger.info("Class: %s, Method: %s. Validating Configurations." %
+    #            (self.__class__.__name__, inspect.stack()[0][3]))
+
+    validator = self.getAMBARI_METRICSValidator()
+    # Calls the methods of the validator using arguments,
+    # method(siteProperties, siteRecommendations, configurations, services, hosts)
+    return validator.validateListOfConfigUsingMethod(configurations, recommendedDefaults, services, hosts, validator.validators)
+
+
+  def getAMBARI_METRICSValidator(self):
+    return AMBARI_METRICSValidator()
+
+class AMBARI_METRICSRecommender(service_advisor.ServiceAdvisor):
+  """
+  AMS Recommender suggests properties when adding the service for the first time or modifying configs via the UI.
+  """
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(AMBARI_METRICSRecommender, self)
+    self.as_super.__init__(*args, **kwargs)
+
+
+
+  def getPreferredMountPoints(self, hostInfo):
+
+    # '/etc/resolv.conf', '/etc/hostname', '/etc/hosts' are docker specific mount points
+    undesirableMountPoints = ["/", "/home", "/etc/resolv.conf", "/etc/hosts",
+                              "/etc/hostname", "/tmp"]
+    undesirableFsTypes = ["devtmpfs", "tmpfs", "vboxsf", "CDFS"]
+    mountPoints = []
+    if hostInfo and "disk_info" in hostInfo:
+      mountPointsDict = {}
+      for mountpoint in hostInfo["disk_info"]:
+        if not (mountpoint["mountpoint"] in undesirableMountPoints or
+                mountpoint["mountpoint"].startswith(("/boot", "/mnt")) or
+                mountpoint["type"] in undesirableFsTypes or
+                mountpoint["available"] == str(0)):
+          mountPointsDict[mountpoint["mountpoint"]] = self.to_number(mountpoint["available"])
+      if mountPointsDict:
+        mountPoints = sorted(mountPointsDict, key=mountPointsDict.get, reverse=True)
+    mountPoints.append("/")
+    return mountPoints
+
+  def recommendAmsConfigurationsFromHDP206(self, configurations, clusterData, services, hosts):
+    putAmsEnvProperty = self.putProperty(configurations, "ams-env", services)
+    putAmsHbaseSiteProperty = self.putProperty(configurations, "ams-hbase-site", services)
+    putAmsSiteProperty = self.putProperty(configurations, "ams-site", services)
+    putHbaseEnvProperty = self.putProperty(configurations, "ams-hbase-env", services)
+    putGrafanaProperty = self.putProperty(configurations, "ams-grafana-env", services)
+    putGrafanaPropertyAttribute = self.putPropertyAttribute(configurations, "ams-grafana-env")
+
+    amsCollectorHosts = self.getComponentHostNames(services, "AMBARI_METRICS", "METRICS_COLLECTOR")
+
+    serviceAdvisor = AMBARI_METRICSServiceAdvisor()
+
+    # TODO set "timeline.metrics.service.webapp.address" to 0.0.0.0:port in upgrade catalog
+    timeline_metrics_service_webapp_address = '0.0.0.0'
+
+    putAmsSiteProperty("timeline.metrics.service.webapp.address", str(timeline_metrics_service_webapp_address) + ":6188")
+
+    log_dir = "/var/log/ambari-metrics-collector"
+    if "ams-env" in services["configurations"]:
+      if "metrics_collector_log_dir" in services["configurations"]["ams-env"]["properties"]:
+        log_dir = services["configurations"]["ams-env"]["properties"]["metrics_collector_log_dir"]
+      putHbaseEnvProperty("hbase_log_dir", log_dir)
+
+    defaultFs = 'file:///'
+    if "core-site" in services["configurations"] and \
+      "fs.defaultFS" in services["configurations"]["core-site"]["properties"]:
+      defaultFs = services["configurations"]["core-site"]["properties"]["fs.defaultFS"]
+
+    operatingMode = "embedded"
+    if "ams-site" in services["configurations"]:
+      if "timeline.metrics.service.operation.mode" in services["configurations"]["ams-site"]["properties"]:
+        operatingMode = services["configurations"]["ams-site"]["properties"]["timeline.metrics.service.operation.mode"]
+
+    if len(amsCollectorHosts) > 1 :
+      operatingMode = "distributed"
+      putAmsSiteProperty("timeline.metrics.service.operation.mode", operatingMode)
+
+    if operatingMode == "distributed":
+      putAmsSiteProperty("timeline.metrics.service.watcher.disabled", 'true')
+      putAmsHbaseSiteProperty("hbase.cluster.distributed", 'true')
+    else:
+      putAmsSiteProperty("timeline.metrics.service.watcher.disabled", 'false')
+      putAmsHbaseSiteProperty("hbase.cluster.distributed", 'false')
+
+    rootDir = "file:///var/lib/ambari-metrics-collector/hbase"
+    tmpDir = "/var/lib/ambari-metrics-collector/hbase-tmp"
+    zk_port_default = []
+    if "ams-hbase-site" in services["configurations"]:
+      if "hbase.rootdir" in services["configurations"]["ams-hbase-site"]["properties"]:
+        rootDir = services["configurations"]["ams-hbase-site"]["properties"]["hbase.rootdir"]
+      if "hbase.tmp.dir" in services["configurations"]["ams-hbase-site"]["properties"]:
+        tmpDir = services["configurations"]["ams-hbase-site"]["properties"]["hbase.tmp.dir"]
+      if "hbase.zookeeper.property.clientPort" in services["configurations"]["ams-hbase-site"]["properties"]:
+        zk_port_default = services["configurations"]["ams-hbase-site"]["properties"]["hbase.zookeeper.property.clientPort"]
+
+      # Skip recommendation item if default value is present
+    if operatingMode == "distributed" and not "{{zookeeper_clientPort}}" in zk_port_default:
+      zkPort = self.getZKPort(services)
+      putAmsHbaseSiteProperty("hbase.zookeeper.property.clientPort", zkPort)
+    elif operatingMode == "embedded" and not "{{zookeeper_clientPort}}" in zk_port_default:
+      putAmsHbaseSiteProperty("hbase.zookeeper.property.clientPort", "61181")
+
+    mountpoints = ["/"]
+    for collectorHostName in amsCollectorHosts:
+      for host in hosts["items"]:
+        if host["Hosts"]["host_name"] == collectorHostName:
+          mountpoints = self.getPreferredMountPoints(host["Hosts"])
+          break
+    isLocalRootDir = rootDir.startswith("file://") or (defaultFs.startswith("file://") and rootDir.startswith("/"))
+    if isLocalRootDir:
+      rootDir = re.sub("^file:///|/", "", rootDir, count=1)
+      rootDir = "file://" + os.path.join(mountpoints[0], rootDir)
+    tmpDir = re.sub("^file:///|/", "", tmpDir, count=1)
+    if len(mountpoints) > 1 and isLocalRootDir:
+      tmpDir = os.path.join(mountpoints[1], tmpDir)
+    else:
+      tmpDir = os.path.join(mountpoints[0], tmpDir)
+    putAmsHbaseSiteProperty("hbase.tmp.dir", tmpDir)
+
+    if operatingMode == "distributed":
+      putAmsHbaseSiteProperty("hbase.rootdir", "/user/ams/hbase")
+
+    if operatingMode == "embedded":
+      if isLocalRootDir:
+        putAmsHbaseSiteProperty("hbase.rootdir", rootDir)
+      else:
+        putAmsHbaseSiteProperty("hbase.rootdir", "file:///var/lib/ambari-metrics-collector/hbase")
+
+    collector_heapsize, hbase_heapsize, total_sinks_count = serviceAdvisor.getAmsMemoryRecommendation(services, hosts)
+
+    putAmsEnvProperty("metrics_collector_heapsize", collector_heapsize)
+
+    putAmsSiteProperty("timeline.metrics.cache.size", max(100, int(log(total_sinks_count)) * 100))
+    putAmsSiteProperty("timeline.metrics.cache.commit.interval", min(10, max(12 - int(log(total_sinks_count)), 2)))
+
+    # blockCache = 0.3, memstore = 0.35, phoenix-server = 0.15, phoenix-client = 0.25
+    putAmsHbaseSiteProperty("hfile.block.cache.size", 0.3)
+    putAmsHbaseSiteProperty("hbase.hregion.memstore.flush.size", 134217728)
+    putAmsHbaseSiteProperty("hbase.regionserver.global.memstore.upperLimit", 0.35)
+    putAmsHbaseSiteProperty("hbase.regionserver.global.memstore.lowerLimit", 0.3)
+
+    if len(amsCollectorHosts) > 1:
+      pass
+    else:
+      # blockCache = 0.3, memstore = 0.3, phoenix-server = 0.2, phoenix-client = 0.3
+      if total_sinks_count >= 2000:
+        putAmsHbaseSiteProperty("hbase.regionserver.handler.count", 60)
+        putAmsHbaseSiteProperty("hbase.regionserver.hlog.blocksize", 134217728)
+        putAmsHbaseSiteProperty("hbase.regionserver.maxlogs", 64)
+        putAmsHbaseSiteProperty("hbase.hregion.memstore.flush.size", 268435456)
+        putAmsHbaseSiteProperty("hbase.regionserver.global.memstore.upperLimit", 0.3)
+        putAmsHbaseSiteProperty("hbase.regionserver.global.memstore.lowerLimit", 0.25)
+        putAmsHbaseSiteProperty("phoenix.query.maxGlobalMemoryPercentage", 20)
+        putAmsHbaseSiteProperty("phoenix.coprocessor.maxMetaDataCacheSize", 81920000)
+        putAmsSiteProperty("phoenix.query.maxGlobalMemoryPercentage", 30)
+        putAmsSiteProperty("timeline.metrics.service.resultset.fetchSize", 10000)
+      elif total_sinks_count >= 1000:
+        putAmsHbaseSiteProperty("hbase.regionserver.handler.count", 60)
+        putAmsHbaseSiteProperty("hbase.regionserver.hlog.blocksize", 134217728)
+        putAmsHbaseSiteProperty("hbase.regionserver.maxlogs", 64)
+        putAmsHbaseSiteProperty("hbase.hregion.memstore.flush.size", 268435456)
+        putAmsHbaseSiteProperty("phoenix.coprocessor.maxMetaDataCacheSize", 40960000)
+        putAmsSiteProperty("timeline.metrics.service.resultset.fetchSize", 5000)
+      else:
+        putAmsHbaseSiteProperty("phoenix.coprocessor.maxMetaDataCacheSize", 20480000)
+      pass
+
+    metrics_api_handlers = min(50, max(20, int(total_sinks_count / 100)))
+    putAmsSiteProperty("timeline.metrics.service.handler.thread.count", metrics_api_handlers)
+
+    serviceAdvisor = AMBARI_METRICSServiceAdvisor()
+
+    # Distributed mode heap size
+    if operatingMode == "distributed":
+      hbase_heapsize = max(hbase_heapsize, 1024)
+      putHbaseEnvProperty("hbase_master_heapsize", "512")
+      putHbaseEnvProperty("hbase_master_xmn_size", "102") #20% of 512 heap size
+      putHbaseEnvProperty("hbase_regionserver_heapsize", hbase_heapsize)
+      putHbaseEnvProperty("regionserver_xmn_size", serviceAdvisor.round_to_n(0.15 * hbase_heapsize,64))
+    else:
+      # Embedded mode heap size : master + regionserver
+      hbase_rs_heapsize = 512
+      putHbaseEnvProperty("hbase_regionserver_heapsize", hbase_rs_heapsize)
+      putHbaseEnvProperty("hbase_master_heapsize", hbase_heapsize)
+      putHbaseEnvProperty("hbase_master_xmn_size", serviceAdvisor.round_to_n(0.15*(hbase_heapsize + hbase_rs_heapsize),64))
+
+    # If no local DN in distributed mode
+    if operatingMode == "distributed":
+      dn_hosts = self.getComponentHostNames(services, "HDFS", "DATANODE")
+      # call by Kerberos wizard sends only the service being affected
+      # so it is possible for dn_hosts to be None but not amsCollectorHosts
+      if dn_hosts and len(dn_hosts) > 0:
+        if set(amsCollectorHosts).intersection(dn_hosts):
+          collector_cohosted_with_dn = "true"
+        else:
+          collector_cohosted_with_dn = "false"
+        putAmsHbaseSiteProperty("dfs.client.read.shortcircuit", collector_cohosted_with_dn)
+
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+
+    ams_hbase_site = None
+    ams_hbase_env = None
+
+    # Overriden properties form the UI
+    if "ams-hbase-site" in services["configurations"]:
+      ams_hbase_site = services["configurations"]["ams-hbase-site"]["properties"]
+    if "ams-hbase-env" in services["configurations"]:
+      ams_hbase_env = services["configurations"]["ams-hbase-env"]["properties"]
+
+    # Recommendations
+    if not ams_hbase_site:
+      ams_hbase_site = configurations["ams-hbase-site"]["properties"]
+    if not ams_hbase_env:
+      ams_hbase_env = configurations["ams-hbase-env"]["properties"]
+
+    split_point_finder = FindSplitPointsForAMSRegions(
+      ams_hbase_site, ams_hbase_env, serviceMetricsDir, customServiceMetricsDir, operatingMode, servicesList)
+
+    result = split_point_finder.get_split_points()
+    precision_splits = ' '
+    aggregate_splits = ' '
+    if result.precision:
+      precision_splits = result.precision
+    if result.aggregate:
+      aggregate_splits = result.aggregate
+    putAmsSiteProperty("timeline.metrics.host.aggregate.splitpoints", ','.join(precision_splits))
+    putAmsSiteProperty("timeline.metrics.cluster.aggregate.splitpoints", ','.join(aggregate_splits))
+
+    component_grafana_exists = False
+    for service in services['services']:
+      if 'components' in service:
+        for component in service['components']:
+          if 'StackServiceComponents' in component:
+            # If Grafana is installed the hostnames would indicate its location
+            if 'METRICS_GRAFANA' in component['StackServiceComponents']['component_name'] and\
+              len(component['StackServiceComponents']['hostnames']) != 0:
+              component_grafana_exists = True
+              break
+    pass
+
+    if not component_grafana_exists:
+      putGrafanaPropertyAttribute("metrics_grafana_password", "visible", "false")
+
+    pass
+
+
+
+class AMBARI_METRICSValidator(service_advisor.ServiceAdvisor):
+  """
+  AMS Validator checks the correctness of properties whenever the service is first added or the user attempts to
+  change configs via the UI.
+  """
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(AMBARI_METRICSValidator, self)
+    self.as_super.__init__(*args, **kwargs)
+
+    self.validators = [("ams-hbase-site", self.validateAmsHbaseSiteConfigurationsFromHDP206),
+                       ("ams-hbase-env", self.validateAmsHbaseEnvConfigurationsFromHDP206),
+                       ("ams-site", self.validateAmsSiteConfigurationsFromHDP206),
+                       ("ams-env", self.validateAmsEnvConfigurationsFromHDP206),
+                       ("ams-grafana-env", self.validateGrafanaEnvConfigurationsFromHDP206)]
+
+
+
+  def getPreferredMountPoints(self, hostInfo):
+
+    # '/etc/resolv.conf', '/etc/hostname', '/etc/hosts' are docker specific mount points
+    undesirableMountPoints = ["/", "/home", "/etc/resolv.conf", "/etc/hosts",
+                              "/etc/hostname", "/tmp"]
+    undesirableFsTypes = ["devtmpfs", "tmpfs", "vboxsf", "CDFS"]
+    mountPoints = []
+    if hostInfo and "disk_info" in hostInfo:
+      mountPointsDict = {}
+      for mountpoint in hostInfo["disk_info"]:
+        if not (mountpoint["mountpoint"] in undesirableMountPoints or
+                mountpoint["mountpoint"].startswith(("/boot", "/mnt")) or
+                mountpoint["type"] in undesirableFsTypes or
+                mountpoint["available"] == str(0)):
+          mountPointsDict[mountpoint["mountpoint"]] = self.to_number(mountpoint["available"])
+      if mountPointsDict:
+        mountPoints = sorted(mountPointsDict, key=mountPointsDict.get, reverse=True)
+    mountPoints.append("/")
+    return mountPoints
+
+  def validateAmsHbaseSiteConfigurationsFromHDP206(self, properties, recommendedDefaults, configurations, services, hosts):
+
+    amsCollectorHosts = self.getComponentHostNames(services, "AMBARI_METRICS", "METRICS_COLLECTOR")
+    ams_site = self.getSiteProperties(configurations, "ams-site")
+    core_site = self.getSiteProperties(configurations, "core-site")
+
+    serviceAdvisor = AMBARI_METRICSServiceAdvisor()
+
+    collector_heapsize, hbase_heapsize, total_sinks_count = serviceAdvisor.getAmsMemoryRecommendation(services, hosts)
+    recommendedDiskSpace = 10485760
+    # TODO validate configuration for multiple AMBARI_METRICS collectors
+    if len(amsCollectorHosts) > 1:
+      pass
+    else:
+      if total_sinks_count > 2000:
+        recommendedDiskSpace  = 104857600  # * 1k == 100 Gb
+      elif total_sinks_count > 500:
+        recommendedDiskSpace  = 52428800  # * 1k == 50 Gb
+      elif total_sinks_count > 250:
+        recommendedDiskSpace  = 20971520  # * 1k == 20 Gb
+
+    validationItems = []
+
+    rootdir_item = None
+    op_mode = ams_site.get("timeline.metrics.service.operation.mode")
+    default_fs = core_site.get("fs.defaultFS") if core_site else "file:///"
+    hbase_rootdir = properties.get("hbase.rootdir")
+    hbase_tmpdir = properties.get("hbase.tmp.dir")
+    distributed = properties.get("hbase.cluster.distributed")
+    is_local_root_dir = hbase_rootdir.startswith("file://") or (default_fs.startswith("file://") and hbase_rootdir.startswith("/"))
+
+    if op_mode == "distributed" and is_local_root_dir:
+      rootdir_item = self.getWarnItem("In distributed mode hbase.rootdir should point to HDFS.")
+    elif op_mode == "embedded":
+      if distributed.lower() == "false" and hbase_rootdir.startswith('/') or hbase_rootdir.startswith("hdfs://"):
+        rootdir_item = self.getWarnItem("In embedded mode hbase.rootdir cannot point to schemaless values or HDFS, "
+                                        "Example - file:// for localFS")
+      pass
+
+    distributed_item = None
+    if op_mode == "distributed" and not distributed.lower() == "true":
+      distributed_item = self.getErrorItem("hbase.cluster.distributed property should be set to true for "
+                                           "distributed mode")
+    if op_mode == "embedded" and distributed.lower() == "true":
+      distributed_item = self.getErrorItem("hbase.cluster.distributed property should be set to false for embedded mode")
+
+    hbase_zk_client_port = properties.get("hbase.zookeeper.property.clientPort")
+    zkPort = self.getZKPort(services)
+    hbase_zk_client_port_item = None
+    if distributed.lower() == "true" and op_mode == "distributed" and \
+        hbase_zk_client_port != zkPort and hbase_zk_client_port != "{{zookeeper_clientPort}}":
+      hbase_zk_client_port_item = self.getErrorItem("In AMS distributed mode, hbase.zookeeper.property.clientPort "
+                                                    "should be the cluster zookeeper server port : {0}".format(zkPort))
+
+    if distributed.lower() == "false" and op_mode == "embedded" and \
+        hbase_zk_client_port == zkPort and hbase_zk_client_port != "{{zookeeper_clientPort}}":
+      hbase_zk_client_port_item = self.getErrorItem("In AMS embedded mode, hbase.zookeeper.property.clientPort "
+                                                    "should be a different port than cluster zookeeper port."
+                                                    "(default:61181)")
+
+    validationItems.extend([{"config-name":'hbase.rootdir', "item": rootdir_item },
+                            {"config-name":'hbase.cluster.distributed', "item": distributed_item },
+                            {"config-name":'hbase.zookeeper.property.clientPort', "item": hbase_zk_client_port_item }])
+
+    for collectorHostName in amsCollectorHosts:
+      for host in hosts["items"]:
+        if host["Hosts"]["host_name"] == collectorHostName:
+          if op_mode == 'embedded' or is_local_root_dir:
+            validationItems.extend([{"config-name": 'hbase.rootdir', "item": self.validatorEnoughDiskSpace(properties, 'hbase.rootdir', host["Hosts"], recommendedDiskSpace)}])
+            validationItems.extend([{"config-name": 'hbase.rootdir', "item": self.validatorNotRootFs(properties, recommendedDefaults, 'hbase.rootdir', host["Hosts"])}])
+            validationItems.extend([{"config-name": 'hbase.tmp.dir', "item": self.validatorNotRootFs(properties, recommendedDefaults, 'hbase.tmp.dir', host["Hosts"])}])
+
+          dn_hosts = self.getComponentHostNames(services, "HDFS", "DATANODE")
+          if is_local_root_dir:
+            mountPoints = []
+            for mountPoint in host["Hosts"]["disk_info"]:
+              mountPoints.append(mountPoint["mountpoint"])
+            hbase_rootdir_mountpoint = self.getMountPointForDir(hbase_rootdir, mountPoints)
+            hbase_tmpdir_mountpoint = self.getMountPointForDir(hbase_tmpdir, mountPoints)
+            preferred_mountpoints = self.getPreferredMountPoints(host['Hosts'])
+            # hbase.rootdir and hbase.tmp.dir shouldn't point to the same partition
+            # if multiple preferred_mountpoints exist
+            if hbase_rootdir_mountpoint == hbase_tmpdir_mountpoint and \
+              len(preferred_mountpoints) > 1:
+              item = self.getWarnItem("Consider not using {0} partition for storing metrics temporary data. "
+                                      "{0} partition is already used as hbase.rootdir to store metrics data".format(hbase_tmpdir_mountpoint))
+              validationItems.extend([{"config-name":'hbase.tmp.dir', "item": item}])
+
+            # if METRICS_COLLECTOR is co-hosted with DATANODE
+            # cross-check dfs.datanode.data.dir and hbase.rootdir
+            # they shouldn't share same disk partition IO
+            hdfs_site = self.getSiteProperties(configurations, "hdfs-site")
+            dfs_datadirs = hdfs_site.get("dfs.datanode.data.dir").split(",") if hdfs_site and "dfs.datanode.data.dir" in hdfs_site else []
+            if dn_hosts and collectorHostName in dn_hosts and ams_site and \
+              dfs_datadirs and len(preferred_mountpoints) > len(dfs_datadirs):
+              for dfs_datadir in dfs_datadirs:
+                dfs_datadir_mountpoint = self.getMountPointForDir(dfs_datadir, mountPoints)
+                if dfs_datadir_mountpoint == hbase_rootdir_mountpoint:
+                  item = self.getWarnItem("Consider not using {0} partition for storing metrics data. "
+                                          "{0} is already used by datanode to store HDFS data".format(hbase_rootdir_mountpoint))
+                  validationItems.extend([{"config-name": 'hbase.rootdir', "item": item}])
+                  break
+          # If no local DN in distributed mode
+          elif collectorHostName not in dn_hosts and distributed.lower() == "true":
+            item = self.getWarnItem("It's recommended to install Datanode component on {0} "
+                                    "to speed up IO operations between HDFS and Metrics "
+                                    "Collector in distributed mode ".format(collectorHostName))
+            validationItems.extend([{"config-name": "hbase.cluster.distributed", "item": item}])
+          # Short circuit read should be enabled in distibuted mode
+          # if local DN installed
+          else:
+            validationItems.extend([{"config-name": "dfs.client.read.shortcircuit", "item": self.validatorEqualsToRecommendedItem(properties, recommendedDefaults, "dfs.client.read.shortcircuit")}])
+
+    return self.toConfigurationValidationProblems(validationItems, "ams-hbase-site")
+
+
+  def validateAmsHbaseEnvConfigurationsFromHDP206(self, properties, recommendedDefaults, configurations, services, hosts):
+
+    ams_env = self.getSiteProperties(configurations, "ams-env")
+    amsHbaseSite = self.getSiteProperties(configurations, "ams-hbase-site")
+    validationItems = []
+    mb = 1024 * 1024
+    gb = 1024 * mb
+
+    regionServerItem = self.validatorLessThenDefaultValue(properties, recommendedDefaults, "hbase_regionserver_heapsize") ## FIXME if new service added
+    if regionServerItem:
+      validationItems.extend([{"config-name": "hbase_regionserver_heapsize", "item": regionServerItem}])
+
+    hbaseMasterHeapsizeItem = self.validatorLessThenDefaultValue(properties, recommendedDefaults, "hbase_master_heapsize")
+    if hbaseMasterHeapsizeItem:
+      validationItems.extend([{"config-name": "hbase_master_heapsize", "item": hbaseMasterHeapsizeItem}])
+
+    logDirItem = self.validatorEqualsPropertyItem(properties, "hbase_log_dir", ams_env, "metrics_collector_log_dir")
+    if logDirItem:
+      validationItems.extend([{"config-name": "hbase_log_dir", "item": logDirItem}])
+
+    hbase_master_heapsize = self.to_number(properties["hbase_master_heapsize"])
+    hbase_master_xmn_size = self.to_number(properties["hbase_master_xmn_size"])
+    hbase_regionserver_heapsize = self.to_number(properties["hbase_regionserver_heapsize"])
+    hbase_regionserver_xmn_size = self.to_number(properties["regionserver_xmn_size"])
+
+    # Validate Xmn settings.
+    masterXmnItem = None
+    regionServerXmnItem = None
+    is_hbase_distributed = amsHbaseSite.get("hbase.cluster.distributed").lower() == 'true'
+
+    if is_hbase_distributed:
+
+      if not regionServerItem and hbase_regionserver_heapsize > 32768:
+        regionServerItem = self.getWarnItem("Value is more than the recommended maximum heap size of 32G.")
+        validationItems.extend([{"config-name": "hbase_regionserver_heapsize", "item": regionServerItem}])
+
+      minMasterXmn = 0.12 * hbase_master_heapsize
+      maxMasterXmn = 0.2 * hbase_master_heapsize
+      if hbase_master_xmn_size < minMasterXmn:
+        masterXmnItem = self.getWarnItem("Value is lesser than the recommended minimum Xmn size of {0} "
+                                         "(12% of hbase_master_heapsize)".format(int(ceil(minMasterXmn))))
+
+      if hbase_master_xmn_size > maxMasterXmn:
+        masterXmnItem = self.getWarnItem("Value is greater than the recommended maximum Xmn size of {0} "
+                                         "(20% of hbase_master_heapsize)".format(int(floor(maxMasterXmn))))
+
+      minRegionServerXmn = 0.12 * hbase_regionserver_heapsize
+      maxRegionServerXmn = 0.2 * hbase_regionserver_heapsize
+      if hbase_regionserver_xmn_size < minRegionServerXmn:
+        regionServerXmnItem = self.getWarnItem("Value is lesser than the recommended minimum Xmn size of {0} "
+                                               "(12% of hbase_regionserver_heapsize)"
+                                               .format(int(ceil(minRegionServerXmn))))
+
+      if hbase_regionserver_xmn_size > maxRegionServerXmn:
+        regionServerXmnItem = self.getWarnItem("Value is greater than the recommended maximum Xmn size of {0} "
+                                               "(20% of hbase_regionserver_heapsize)"
+                                               .format(int(floor(maxRegionServerXmn))))
+    else:
+
+      if not hbaseMasterHeapsizeItem and (hbase_master_heapsize + hbase_regionserver_heapsize) > 32768:
+        hbaseMasterHeapsizeItem = self.getWarnItem("Value of Master + Regionserver heapsize is more than the recommended maximum heap size of 32G.")
+        validationItems.extend([{"config-name": "hbase_master_heapsize", "item": hbaseMasterHeapsizeItem}])
+
+      minMasterXmn = 0.12 * (hbase_master_heapsize + hbase_regionserver_heapsize)
+      maxMasterXmn = 0.2 *  (hbase_master_heapsize + hbase_regionserver_heapsize)
+      if hbase_master_xmn_size < minMasterXmn:
+        masterXmnItem = self.getWarnItem("Value is lesser than the recommended minimum Xmn size of {0} "
+                                         "(12% of hbase_master_heapsize + hbase_regionserver_heapsize)"
+                                         .format(int(ceil(minMasterXmn))))
+
+      if hbase_master_xmn_size > maxMasterXmn:
+        masterXmnItem = self.getWarnItem("Value is greater than the recommended maximum Xmn size of {0} "
+                                         "(20% of hbase_master_heapsize + hbase_regionserver_heapsize)"
+                                         .format(int(floor(maxMasterXmn))))
+    if masterXmnItem:
+      validationItems.extend([{"config-name": "hbase_master_xmn_size", "item": masterXmnItem}])
+
+    if regionServerXmnItem:
+      validationItems.extend([{"config-name": "regionserver_xmn_size", "item": regionServerXmnItem}])
+
+    if hbaseMasterHeapsizeItem is None:
+      hostMasterComponents = {}
+
+      for service in services["services"]:
+        for component in service["components"]:
+          if component["StackServiceComponents"]["hostnames"] is not None:
+            for hostName in component["StackServiceComponents"]["hostnames"]:
+              if self.isMasterComponent(component):
+                if hostName not in hostMasterComponents.keys():
+                  hostMasterComponents[hostName] = []
+                hostMasterComponents[hostName].append(component["StackServiceComponents"]["component_name"])
+
+      amsCollectorHosts = self.getComponentHostNames(services, "AMBARI_METRICS", "METRICS_COLLECTOR")
+      for collectorHostName in amsCollectorHosts:
+        for host in hosts["items"]:
+          if host["Hosts"]["host_name"] == collectorHostName:
+            # AMS Collector co-hosted with other master components in bigger clusters
+            if len(hosts['items']) > 31 and \
+                            len(hostMasterComponents[collectorHostName]) > 2 and \
+                            host["Hosts"]["total_mem"] < 32*mb: # < 32Gb(total_mem in k)
+              masterHostMessage = "Host {0} is used by multiple master components ({1}). " \
+                                  "It is recommended to use a separate host for the " \
+                                  "Ambari Metrics Collector component and ensure " \
+                                  "the host has sufficient memory available."
+
+              hbaseMasterHeapsizeItem = self.getWarnItem(masterHostMessage.format(
+                  collectorHostName, str(", ".join(hostMasterComponents[collectorHostName]))))
+              if hbaseMasterHeapsizeItem:
+                validationItems.extend([{"config-name": "hbase_master_heapsize", "item": hbaseMasterHeapsizeItem}])
+      pass
+
+    return self.toConfigurationValidationProblems(validationItems, "ams-hbase-env")
+
+
+  def validateAmsSiteConfigurationsFromHDP206(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = []
+
+    serviceAdvisor = AMBARI_METRICSServiceAdvisor()
+
+    op_mode = properties.get("timeline.metrics.service.operation.mode")
+    correct_op_mode_item = None
+    if op_mode not in ("embedded", "distributed"):
+      correct_op_mode_item = self.getErrorItem("Correct value should be set.")
+      pass
+    elif len(self.getComponentHostNames(services, "AMBARI_METRICS", "METRICS_COLLECTOR")) > 1 and op_mode != 'distributed':
+      correct_op_mode_item = self.getErrorItem("Correct value should be 'distributed' for clusters with more then 1 Metrics collector")
+    elif op_mode == 'embedded':
+      collector_heapsize, hbase_heapsize, total_sinks_count = serviceAdvisor.getAmsMemoryRecommendation(services, hosts)
+      if total_sinks_count > 1000:
+        correct_op_mode_item = self.getWarnItem("Number of sinks writing metrics to collector is expected to be more than 1000. "
+                                                "'Embedded' mode AMS might not be able to handle the load. Consider moving to distributed mode.")
+
+    validationItems.extend([{"config-name":'timeline.metrics.service.operation.mode', "item": correct_op_mode_item }])
+    return self.toConfigurationValidationProblems(validationItems, "ams-site")
+
+
+  def validateAmsEnvConfigurationsFromHDP206(self, properties, recommendedDefaults, configurations, services, hosts):
+
+    validationItems = []
+    collectorHeapsizeDefaultItem = self.validatorLessThenDefaultValue(properties, recommendedDefaults, "metrics_collector_heapsize")
+    validationItems.extend([{"config-name": "metrics_collector_heapsize", "item": collectorHeapsizeDefaultItem}])
+
+    ams_env = self.getSiteProperties(configurations, "ams-env")
+    collector_heapsize = self.to_number(ams_env.get("metrics_collector_heapsize"))
+    if collector_heapsize > 32768:
+      collectorHeapsizeMaxItem = self.getWarnItem("Value is more than the recommended maximum heap size of 32G.")
+      validationItems.extend([{"config-name": "metrics_collector_heapsize", "item": collectorHeapsizeMaxItem}])
+
+    return self.toConfigurationValidationProblems(validationItems, "ams-env")
+
+
+  def validateGrafanaEnvConfigurationsFromHDP206(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = []
+
+    grafana_pwd = properties.get("metrics_grafana_password")
+    grafana_pwd_length_item = None
+    if len(grafana_pwd) < 4:
+      grafana_pwd_length_item = self.getErrorItem("Grafana password length should be at least 4.")
+      pass
+    validationItems.extend([{"config-name":'metrics_grafana_password', "item": grafana_pwd_length_item }])
+    return self.toConfigurationValidationProblems(validationItems, "ams-site")
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/0f32765d/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/service_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/service_advisor.py b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/service_advisor.py
index a2e31cc..058e086 100644
--- a/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/service_advisor.py
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/service_advisor.py
@@ -74,7 +74,10 @@ class AtlasServiceAdvisor(service_advisor.ServiceAdvisor):
     Modify the dictionary of heap size properties.
     Must be overriden in child class.
     """
-    pass
+    self.heap_size_properties = {"ATLAS_SERVER":
+                                   [{"config-name": "atlas-env",
+                                     "property": "atlas_server_xmx",
+                                     "default": "2048m"}]}
 
   def modifyNotValuableComponents(self):
     """

http://git-wip-us.apache.org/repos/asf/ambari/blob/0f32765d/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
index 5307176..a194332 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/stack_advisor.py
@@ -172,7 +172,6 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
       "HDFS": self.recommendHDFSConfigurations,
       "HBASE": self.recommendHbaseConfigurations,
       "STORM": self.recommendStormConfigurations,
-      "AMBARI_METRICS": self.recommendAmsConfigurations,
       "RANGER": self.recommendRangerConfigurations,
       "ZOOKEEPER": self.recommendZookeeperConfigurations,
       "OOZIE": self.recommendOozieConfigurations
@@ -509,66 +508,6 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
                 putRangerAuditProperty(item['target_configname'], rangerAuditProperty)
 
 
-  def getAmsMemoryRecommendation(self, services, hosts):
-    # MB per sink in hbase heapsize
-    HEAP_PER_MASTER_COMPONENT = 50
-    HEAP_PER_SLAVE_COMPONENT = 10
-
-    schMemoryMap = {
-      "HDFS": {
-        "NAMENODE": HEAP_PER_MASTER_COMPONENT,
-        "SECONDARY_NAMENODE": HEAP_PER_MASTER_COMPONENT,
-        "DATANODE": HEAP_PER_SLAVE_COMPONENT
-      },
-      "YARN": {
-        "RESOURCEMANAGER": HEAP_PER_MASTER_COMPONENT,
-        "NODEMANAGER": HEAP_PER_SLAVE_COMPONENT,
-        "HISTORYSERVER" : HEAP_PER_MASTER_COMPONENT,
-        "APP_TIMELINE_SERVER": HEAP_PER_MASTER_COMPONENT
-      },
-      "HBASE": {
-        "HBASE_MASTER": HEAP_PER_MASTER_COMPONENT,
-        "HBASE_REGIONSERVER": HEAP_PER_SLAVE_COMPONENT
-      },
-      "HIVE": {
-        "HIVE_METASTORE": HEAP_PER_MASTER_COMPONENT,
-        "HIVE_SERVER": HEAP_PER_MASTER_COMPONENT
-      },
-      "KAFKA": {
-        "KAFKA_BROKER": HEAP_PER_MASTER_COMPONENT
-      },
-      "FLUME": {
-        "FLUME_HANDLER": HEAP_PER_SLAVE_COMPONENT
-      },
-      "STORM": {
-        "NIMBUS": HEAP_PER_MASTER_COMPONENT,
-      },
-      "AMBARI_METRICS": {
-        "METRICS_COLLECTOR": HEAP_PER_MASTER_COMPONENT,
-        "METRICS_MONITOR": HEAP_PER_SLAVE_COMPONENT
-      },
-      "ACCUMULO": {
-        "ACCUMULO_MASTER": HEAP_PER_MASTER_COMPONENT,
-        "ACCUMULO_TSERVER": HEAP_PER_SLAVE_COMPONENT
-      },
-      "LOGSEARCH": {
-        "LOGSEARCH_LOGFEEDER" : HEAP_PER_SLAVE_COMPONENT
-      }
-    }
-    total_sinks_count = 0
-    # minimum heap size
-    hbase_heapsize = 500
-    for serviceName, componentsDict in schMemoryMap.items():
-      for componentName, multiplier in componentsDict.items():
-        schCount = len(
-          self.getHostsWithComponent(serviceName, componentName, services,
-                                     hosts))
-        hbase_heapsize += int((schCount * multiplier))
-        total_sinks_count += schCount
-    collector_heapsize = int(hbase_heapsize/3 if hbase_heapsize > 2048 else 512)
-    hbase_heapsize = min(hbase_heapsize, 32768)
-
-    return round_to_n(collector_heapsize), round_to_n(hbase_heapsize), total_sinks_count
 
   def recommendStormConfigurations(self, configurations, clusterData, services, hosts):
     putStormSiteProperty = self.putProperty(configurations, "storm-site", services)
@@ -577,216 +516,6 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
     if 'AMBARI_METRICS' in servicesList:
       putStormSiteProperty('metrics.reporter.register', 'org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter')
 
-  def recommendAmsConfigurations(self, configurations, clusterData, services, hosts):
-    putAmsEnvProperty = self.putProperty(configurations, "ams-env", services)
-    putAmsHbaseSiteProperty = self.putProperty(configurations, "ams-hbase-site", services)
-    putAmsSiteProperty = self.putProperty(configurations, "ams-site", services)
-    putHbaseEnvProperty = self.putProperty(configurations, "ams-hbase-env", services)
-    putGrafanaProperty = self.putProperty(configurations, "ams-grafana-env", services)
-    putGrafanaPropertyAttribute = self.putPropertyAttribute(configurations, "ams-grafana-env")
-
-    amsCollectorHosts = self.getComponentHostNames(services, "AMBARI_METRICS", "METRICS_COLLECTOR")
-
-    # TODO set "timeline.metrics.service.webapp.address" to 0.0.0.0:port in upgrade catalog
-    timeline_metrics_service_webapp_address = '0.0.0.0'
-
-    putAmsSiteProperty("timeline.metrics.service.webapp.address", str(timeline_metrics_service_webapp_address) + ":6188")
-
-    log_dir = "/var/log/ambari-metrics-collector"
-    if "ams-env" in services["configurations"]:
-      if "metrics_collector_log_dir" in services["configurations"]["ams-env"]["properties"]:
-        log_dir = services["configurations"]["ams-env"]["properties"]["metrics_collector_log_dir"]
-      putHbaseEnvProperty("hbase_log_dir", log_dir)
-
-    defaultFs = 'file:///'
-    if "core-site" in services["configurations"] and \
-      "fs.defaultFS" in services["configurations"]["core-site"]["properties"]:
-      defaultFs = services["configurations"]["core-site"]["properties"]["fs.defaultFS"]
-
-    operatingMode = "embedded"
-    if "ams-site" in services["configurations"]:
-      if "timeline.metrics.service.operation.mode" in services["configurations"]["ams-site"]["properties"]:
-        operatingMode = services["configurations"]["ams-site"]["properties"]["timeline.metrics.service.operation.mode"]
-
-    if len(amsCollectorHosts) > 1 :
-      operatingMode = "distributed"
-      putAmsSiteProperty("timeline.metrics.service.operation.mode", operatingMode)
-
-    if operatingMode == "distributed":
-      putAmsSiteProperty("timeline.metrics.service.watcher.disabled", 'true')
-      putAmsHbaseSiteProperty("hbase.cluster.distributed", 'true')
-    else:
-      putAmsSiteProperty("timeline.metrics.service.watcher.disabled", 'false')
-      putAmsHbaseSiteProperty("hbase.cluster.distributed", 'false')
-
-    rootDir = "file:///var/lib/ambari-metrics-collector/hbase"
-    tmpDir = "/var/lib/ambari-metrics-collector/hbase-tmp"
-    zk_port_default = []
-    if "ams-hbase-site" in services["configurations"]:
-      if "hbase.rootdir" in services["configurations"]["ams-hbase-site"]["properties"]:
-        rootDir = services["configurations"]["ams-hbase-site"]["properties"]["hbase.rootdir"]
-      if "hbase.tmp.dir" in services["configurations"]["ams-hbase-site"]["properties"]:
-        tmpDir = services["configurations"]["ams-hbase-site"]["properties"]["hbase.tmp.dir"]
-      if "hbase.zookeeper.property.clientPort" in services["configurations"]["ams-hbase-site"]["properties"]:
-        zk_port_default = services["configurations"]["ams-hbase-site"]["properties"]["hbase.zookeeper.property.clientPort"]
-
-      # Skip recommendation item if default value is present
-    if operatingMode == "distributed" and not "{{zookeeper_clientPort}}" in zk_port_default:
-      zkPort = self.getZKPort(services)
-      putAmsHbaseSiteProperty("hbase.zookeeper.property.clientPort", zkPort)
-    elif operatingMode == "embedded" and not "{{zookeeper_clientPort}}" in zk_port_default:
-      putAmsHbaseSiteProperty("hbase.zookeeper.property.clientPort", "61181")
-
-    mountpoints = ["/"]
-    for collectorHostName in amsCollectorHosts:
-      for host in hosts["items"]:
-        if host["Hosts"]["host_name"] == collectorHostName:
-          mountpoints = self.getPreferredMountPoints(host["Hosts"])
-          break
-    isLocalRootDir = rootDir.startswith("file://") or (defaultFs.startswith("file://") and rootDir.startswith("/"))
-    if isLocalRootDir:
-      rootDir = re.sub("^file:///|/", "", rootDir, count=1)
-      rootDir = "file://" + os.path.join(mountpoints[0], rootDir)
-    tmpDir = re.sub("^file:///|/", "", tmpDir, count=1)
-    if len(mountpoints) > 1 and isLocalRootDir:
-      tmpDir = os.path.join(mountpoints[1], tmpDir)
-    else:
-      tmpDir = os.path.join(mountpoints[0], tmpDir)
-    putAmsHbaseSiteProperty("hbase.tmp.dir", tmpDir)
-
-    if operatingMode == "distributed":
-      putAmsHbaseSiteProperty("hbase.rootdir", "/user/ams/hbase")
-
-    if operatingMode == "embedded":
-      if isLocalRootDir:
-        putAmsHbaseSiteProperty("hbase.rootdir", rootDir)
-      else:
-        putAmsHbaseSiteProperty("hbase.rootdir", "file:///var/lib/ambari-metrics-collector/hbase")
-
-    collector_heapsize, hbase_heapsize, total_sinks_count = self.getAmsMemoryRecommendation(services, hosts)
-
-    putAmsEnvProperty("metrics_collector_heapsize", collector_heapsize)
-
-    putAmsSiteProperty("timeline.metrics.cache.size", max(100, int(log(total_sinks_count)) * 100))
-    putAmsSiteProperty("timeline.metrics.cache.commit.interval", min(10, max(12 - int(log(total_sinks_count)), 2)))
-
-    # blockCache = 0.3, memstore = 0.35, phoenix-server = 0.15, phoenix-client = 0.25
-    putAmsHbaseSiteProperty("hfile.block.cache.size", 0.3)
-    putAmsHbaseSiteProperty("hbase.hregion.memstore.flush.size", 134217728)
-    putAmsHbaseSiteProperty("hbase.regionserver.global.memstore.upperLimit", 0.35)
-    putAmsHbaseSiteProperty("hbase.regionserver.global.memstore.lowerLimit", 0.3)
-
-    if len(amsCollectorHosts) > 1:
-      pass
-    else:
-      # blockCache = 0.3, memstore = 0.3, phoenix-server = 0.2, phoenix-client = 0.3
-      if total_sinks_count >= 2000:
-        putAmsHbaseSiteProperty("hbase.regionserver.handler.count", 60)
-        putAmsHbaseSiteProperty("hbase.regionserver.hlog.blocksize", 134217728)
-        putAmsHbaseSiteProperty("hbase.regionserver.maxlogs", 64)
-        putAmsHbaseSiteProperty("hbase.hregion.memstore.flush.size", 268435456)
-        putAmsHbaseSiteProperty("hbase.regionserver.global.memstore.upperLimit", 0.3)
-        putAmsHbaseSiteProperty("hbase.regionserver.global.memstore.lowerLimit", 0.25)
-        putAmsHbaseSiteProperty("phoenix.query.maxGlobalMemoryPercentage", 20)
-        putAmsHbaseSiteProperty("phoenix.coprocessor.maxMetaDataCacheSize", 81920000)
-        putAmsSiteProperty("phoenix.query.maxGlobalMemoryPercentage", 30)
-        putAmsSiteProperty("timeline.metrics.service.resultset.fetchSize", 10000)
-      elif total_sinks_count >= 1000:
-        putAmsHbaseSiteProperty("hbase.regionserver.handler.count", 60)
-        putAmsHbaseSiteProperty("hbase.regionserver.hlog.blocksize", 134217728)
-        putAmsHbaseSiteProperty("hbase.regionserver.maxlogs", 64)
-        putAmsHbaseSiteProperty("hbase.hregion.memstore.flush.size", 268435456)
-        putAmsHbaseSiteProperty("phoenix.coprocessor.maxMetaDataCacheSize", 40960000)
-        putAmsSiteProperty("timeline.metrics.service.resultset.fetchSize", 5000)
-      else:
-        putAmsHbaseSiteProperty("phoenix.coprocessor.maxMetaDataCacheSize", 20480000)
-      pass
-
-    metrics_api_handlers = min(50, max(20, int(total_sinks_count / 100)))
-    putAmsSiteProperty("timeline.metrics.service.handler.thread.count", metrics_api_handlers)
-
-    # Distributed mode heap size
-    if operatingMode == "distributed":
-      hbase_heapsize = max(hbase_heapsize, 1024)
-      putHbaseEnvProperty("hbase_master_heapsize", "512")
-      putHbaseEnvProperty("hbase_master_xmn_size", "102") #20% of 512 heap size
-      putHbaseEnvProperty("hbase_regionserver_heapsize", hbase_heapsize)
-      putHbaseEnvProperty("regionserver_xmn_size", round_to_n(0.15 * hbase_heapsize,64))
-    else:
-      # Embedded mode heap size : master + regionserver
-      hbase_rs_heapsize = 512
-      putHbaseEnvProperty("hbase_regionserver_heapsize", hbase_rs_heapsize)
-      putHbaseEnvProperty("hbase_master_heapsize", hbase_heapsize)
-      putHbaseEnvProperty("hbase_master_xmn_size", round_to_n(0.15*(hbase_heapsize + hbase_rs_heapsize),64))
-
-    # If no local DN in distributed mode
-    if operatingMode == "distributed":
-      dn_hosts = self.getComponentHostNames(services, "HDFS", "DATANODE")
-      # call by Kerberos wizard sends only the service being affected
-      # so it is possible for dn_hosts to be None but not amsCollectorHosts
-      if dn_hosts and len(dn_hosts) > 0:
-        if set(amsCollectorHosts).intersection(dn_hosts):
-          collector_cohosted_with_dn = "true"
-        else:
-          collector_cohosted_with_dn = "false"
-        putAmsHbaseSiteProperty("dfs.client.read.shortcircuit", collector_cohosted_with_dn)
-
-    #split points
-    scriptDir = os.path.dirname(os.path.abspath(__file__))
-    metricsDir = os.path.join(scriptDir, '../../../../common-services/AMBARI_METRICS/0.1.0/package')
-    serviceMetricsDir = os.path.join(metricsDir, 'files', 'service-metrics')
-    customServiceMetricsDir = os.path.join(scriptDir, '../../../../dashboards/service-metrics')
-    sys.path.append(os.path.join(metricsDir, 'scripts'))
-    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
-
-    from split_points import FindSplitPointsForAMSRegions
-
-    ams_hbase_site = None
-    ams_hbase_env = None
-
-    # Overriden properties form the UI
-    if "ams-hbase-site" in services["configurations"]:
-      ams_hbase_site = services["configurations"]["ams-hbase-site"]["properties"]
-    if "ams-hbase-env" in services["configurations"]:
-       ams_hbase_env = services["configurations"]["ams-hbase-env"]["properties"]
-
-    # Recommendations
-    if not ams_hbase_site:
-      ams_hbase_site = configurations["ams-hbase-site"]["properties"]
-    if not ams_hbase_env:
-      ams_hbase_env = configurations["ams-hbase-env"]["properties"]
-
-    split_point_finder = FindSplitPointsForAMSRegions(
-      ams_hbase_site, ams_hbase_env, serviceMetricsDir, customServiceMetricsDir, operatingMode, servicesList)
-
-    result = split_point_finder.get_split_points()
-    precision_splits = ' '
-    aggregate_splits = ' '
-    if result.precision:
-      precision_splits = result.precision
-    if result.aggregate:
-      aggregate_splits = result.aggregate
-    putAmsSiteProperty("timeline.metrics.host.aggregate.splitpoints", ','.join(precision_splits))
-    putAmsSiteProperty("timeline.metrics.cluster.aggregate.splitpoints", ','.join(aggregate_splits))
-
-    component_grafana_exists = False
-    for service in services['services']:
-      if 'components' in service:
-        for component in service['components']:
-          if 'StackServiceComponents' in component:
-            # If Grafana is installed the hostnames would indicate its location
-            if 'METRICS_GRAFANA' in component['StackServiceComponents']['component_name'] and\
-              len(component['StackServiceComponents']['hostnames']) != 0:
-              component_grafana_exists = True
-              break
-    pass
-
-    if not component_grafana_exists:
-      putGrafanaPropertyAttribute("metrics_grafana_password", "visible", "false")
-
-    pass
-
-
 
   def getServiceConfigurationValidators(self):
     return {
@@ -797,12 +526,7 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
       "YARN": {"yarn-site": self.validateYARNConfigurations,
                "yarn-env": self.validateYARNEnvConfigurations},
       "HBASE": {"hbase-env": self.validateHbaseEnvConfigurations},
-      "STORM": {"storm-site": self.validateStormConfigurations},
-      "AMBARI_METRICS": {"ams-hbase-site": self.validateAmsHbaseSiteConfigurations,
-              "ams-hbase-env": self.validateAmsHbaseEnvConfigurations,
-              "ams-site": self.validateAmsSiteConfigurations,
-              "ams-env": self.validateAmsEnvConfigurations,
-               "ams-grafana-env": self.validateGrafanaEnvConfigurations}
+      "STORM": {"storm-site": self.validateStormConfigurations}
     }
 
   def validateMinMax(self, items, recommendedDefaults, configurations):
@@ -834,148 +558,7 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
       items.extend(self.toConfigurationValidationProblems(validationItems, configName))
     pass
 
-  def validateAmsSiteConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
-    validationItems = []
 
-    op_mode = properties.get("timeline.metrics.service.operation.mode")
-    correct_op_mode_item = None
-    if op_mode not in ("embedded", "distributed"):
-      correct_op_mode_item = self.getErrorItem("Correct value should be set.")
-      pass
-    elif len(self.getComponentHostNames(services, "AMBARI_METRICS", "METRICS_COLLECTOR")) > 1 and op_mode != 'distributed':
-      correct_op_mode_item = self.getErrorItem("Correct value should be 'distributed' for clusters with more then 1 Metrics collector")
-    elif op_mode == 'embedded':
-      collector_heapsize, hbase_heapsize, total_sinks_count = self.getAmsMemoryRecommendation(services, hosts)
-      if total_sinks_count > 1000:
-        correct_op_mode_item = self.getWarnItem("Number of sinks writing metrics to collector is expected to be more than 1000. "
-                                                "'Embedded' mode AMS might not be able to handle the load. Consider moving to distributed mode.")
-
-    validationItems.extend([{"config-name":'timeline.metrics.service.operation.mode', "item": correct_op_mode_item }])
-    return self.toConfigurationValidationProblems(validationItems, "ams-site")
-
-  def validateGrafanaEnvConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
-    validationItems = []
-
-    grafana_pwd = properties.get("metrics_grafana_password")
-    grafana_pwd_length_item = None
-    if len(grafana_pwd) < 4:
-      grafana_pwd_length_item = self.getErrorItem("Grafana password length should be at least 4.")
-      pass
-    validationItems.extend([{"config-name":'metrics_grafana_password', "item": grafana_pwd_length_item }])
-    return self.toConfigurationValidationProblems(validationItems, "ams-site")
-
-  def validateAmsHbaseSiteConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
-
-    amsCollectorHosts = self.getComponentHostNames(services, "AMBARI_METRICS", "METRICS_COLLECTOR")
-    ams_site = getSiteProperties(configurations, "ams-site")
-    core_site = getSiteProperties(configurations, "core-site")
-
-    collector_heapsize, hbase_heapsize, total_sinks_count = self.getAmsMemoryRecommendation(services, hosts)
-    recommendedDiskSpace = 10485760
-    # TODO validate configuration for multiple AMBARI_METRICS collectors
-    if len(amsCollectorHosts) > 1:
-      pass
-    else:
-      if total_sinks_count > 2000:
-        recommendedDiskSpace  = 104857600  # * 1k == 100 Gb
-      elif total_sinks_count > 500:
-        recommendedDiskSpace  = 52428800  # * 1k == 50 Gb
-      elif total_sinks_count > 250:
-        recommendedDiskSpace  = 20971520  # * 1k == 20 Gb
-
-    validationItems = []
-
-    rootdir_item = None
-    op_mode = ams_site.get("timeline.metrics.service.operation.mode")
-    default_fs = core_site.get("fs.defaultFS") if core_site else "file:///"
-    hbase_rootdir = properties.get("hbase.rootdir")
-    hbase_tmpdir = properties.get("hbase.tmp.dir")
-    distributed = properties.get("hbase.cluster.distributed")
-    is_local_root_dir = hbase_rootdir.startswith("file://") or (default_fs.startswith("file://") and hbase_rootdir.startswith("/"))
-
-    if op_mode == "distributed" and is_local_root_dir:
-      rootdir_item = self.getWarnItem("In distributed mode hbase.rootdir should point to HDFS.")
-    elif op_mode == "embedded":
-      if distributed.lower() == "false" and hbase_rootdir.startswith('/') or hbase_rootdir.startswith("hdfs://"):
-        rootdir_item = self.getWarnItem("In embedded mode hbase.rootdir cannot point to schemaless values or HDFS, "
-                                        "Example - file:// for localFS")
-      pass
-
-    distributed_item = None
-    if op_mode == "distributed" and not distributed.lower() == "true":
-      distributed_item = self.getErrorItem("hbase.cluster.distributed property should be set to true for "
-                                           "distributed mode")
-    if op_mode == "embedded" and distributed.lower() == "true":
-      distributed_item = self.getErrorItem("hbase.cluster.distributed property should be set to false for embedded mode")
-
-    hbase_zk_client_port = properties.get("hbase.zookeeper.property.clientPort")
-    zkPort = self.getZKPort(services)
-    hbase_zk_client_port_item = None
-    if distributed.lower() == "true" and op_mode == "distributed" and \
-        hbase_zk_client_port != zkPort and hbase_zk_client_port != "{{zookeeper_clientPort}}":
-      hbase_zk_client_port_item = self.getErrorItem("In AMS distributed mode, hbase.zookeeper.property.clientPort "
-                                                    "should be the cluster zookeeper server port : {0}".format(zkPort))
-
-    if distributed.lower() == "false" and op_mode == "embedded" and \
-        hbase_zk_client_port == zkPort and hbase_zk_client_port != "{{zookeeper_clientPort}}":
-      hbase_zk_client_port_item = self.getErrorItem("In AMS embedded mode, hbase.zookeeper.property.clientPort "
-                                                    "should be a different port than cluster zookeeper port."
-                                                    "(default:61181)")
-
-    validationItems.extend([{"config-name":'hbase.rootdir', "item": rootdir_item },
-                            {"config-name":'hbase.cluster.distributed', "item": distributed_item },
-                            {"config-name":'hbase.zookeeper.property.clientPort', "item": hbase_zk_client_port_item }])
-
-    for collectorHostName in amsCollectorHosts:
-      for host in hosts["items"]:
-        if host["Hosts"]["host_name"] == collectorHostName:
-          if op_mode == 'embedded' or is_local_root_dir:
-            validationItems.extend([{"config-name": 'hbase.rootdir', "item": self.validatorEnoughDiskSpace(properties, 'hbase.rootdir', host["Hosts"], recommendedDiskSpace)}])
-            validationItems.extend([{"config-name": 'hbase.rootdir', "item": self.validatorNotRootFs(properties, recommendedDefaults, 'hbase.rootdir', host["Hosts"])}])
-            validationItems.extend([{"config-name": 'hbase.tmp.dir', "item": self.validatorNotRootFs(properties, recommendedDefaults, 'hbase.tmp.dir', host["Hosts"])}])
-
-          dn_hosts = self.getComponentHostNames(services, "HDFS", "DATANODE")
-          if is_local_root_dir:
-            mountPoints = []
-            for mountPoint in host["Hosts"]["disk_info"]:
-              mountPoints.append(mountPoint["mountpoint"])
-            hbase_rootdir_mountpoint = self.getMountPointForDir(hbase_rootdir, mountPoints)
-            hbase_tmpdir_mountpoint = self.getMountPointForDir(hbase_tmpdir, mountPoints)
-            preferred_mountpoints = self.getPreferredMountPoints(host['Hosts'])
-            # hbase.rootdir and hbase.tmp.dir shouldn't point to the same partition
-            # if multiple preferred_mountpoints exist
-            if hbase_rootdir_mountpoint == hbase_tmpdir_mountpoint and \
-              len(preferred_mountpoints) > 1:
-              item = self.getWarnItem("Consider not using {0} partition for storing metrics temporary data. "
-                                      "{0} partition is already used as hbase.rootdir to store metrics data".format(hbase_tmpdir_mountpoint))
-              validationItems.extend([{"config-name":'hbase.tmp.dir', "item": item}])
-
-            # if METRICS_COLLECTOR is co-hosted with DATANODE
-            # cross-check dfs.datanode.data.dir and hbase.rootdir
-            # they shouldn't share same disk partition IO
-            hdfs_site = getSiteProperties(configurations, "hdfs-site")
-            dfs_datadirs = hdfs_site.get("dfs.datanode.data.dir").split(",") if hdfs_site and "dfs.datanode.data.dir" in hdfs_site else []
-            if dn_hosts and collectorHostName in dn_hosts and ams_site and \
-              dfs_datadirs and len(preferred_mountpoints) > len(dfs_datadirs):
-              for dfs_datadir in dfs_datadirs:
-                dfs_datadir_mountpoint = self.getMountPointForDir(dfs_datadir, mountPoints)
-                if dfs_datadir_mountpoint == hbase_rootdir_mountpoint:
-                  item = self.getWarnItem("Consider not using {0} partition for storing metrics data. "
-                                          "{0} is already used by datanode to store HDFS data".format(hbase_rootdir_mountpoint))
-                  validationItems.extend([{"config-name": 'hbase.rootdir', "item": item}])
-                  break
-          # If no local DN in distributed mode
-          elif collectorHostName not in dn_hosts and distributed.lower() == "true":
-            item = self.getWarnItem("It's recommended to install Datanode component on {0} "
-                                    "to speed up IO operations between HDFS and Metrics "
-                                    "Collector in distributed mode ".format(collectorHostName))
-            validationItems.extend([{"config-name": "hbase.cluster.distributed", "item": item}])
-          # Short circuit read should be enabled in distibuted mode
-          # if local DN installed
-          else:
-            validationItems.extend([{"config-name": "dfs.client.read.shortcircuit", "item": self.validatorEqualsToRecommendedItem(properties, recommendedDefaults, "dfs.client.read.shortcircuit")}])
-
-    return self.toConfigurationValidationProblems(validationItems, "ams-hbase-site")
 
   def validateStormConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
     validationItems = []
@@ -990,132 +573,9 @@ class HDP206StackAdvisor(DefaultStackAdvisor):
 
     return self.toConfigurationValidationProblems(validationItems, "storm-site")
 
-  def validateAmsHbaseEnvConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
-
-    ams_env = getSiteProperties(configurations, "ams-env")
-    amsHbaseSite = getSiteProperties(configurations, "ams-hbase-site")
-    validationItems = []
-    mb = 1024 * 1024
-    gb = 1024 * mb
-
-    regionServerItem = self.validatorLessThenDefaultValue(properties, recommendedDefaults, "hbase_regionserver_heapsize") ## FIXME if new service added
-    if regionServerItem:
-      validationItems.extend([{"config-name": "hbase_regionserver_heapsize", "item": regionServerItem}])
-
-    hbaseMasterHeapsizeItem = self.validatorLessThenDefaultValue(properties, recommendedDefaults, "hbase_master_heapsize")
-    if hbaseMasterHeapsizeItem:
-      validationItems.extend([{"config-name": "hbase_master_heapsize", "item": hbaseMasterHeapsizeItem}])
-
-    logDirItem = self.validatorEqualsPropertyItem(properties, "hbase_log_dir", ams_env, "metrics_collector_log_dir")
-    if logDirItem:
-      validationItems.extend([{"config-name": "hbase_log_dir", "item": logDirItem}])
-
-    hbase_master_heapsize = self.to_number(properties["hbase_master_heapsize"])
-    hbase_master_xmn_size = self.to_number(properties["hbase_master_xmn_size"])
-    hbase_regionserver_heapsize = self.to_number(properties["hbase_regionserver_heapsize"])
-    hbase_regionserver_xmn_size = self.to_number(properties["regionserver_xmn_size"])
-
-    # Validate Xmn settings.
-    masterXmnItem = None
-    regionServerXmnItem = None
-    is_hbase_distributed = amsHbaseSite.get("hbase.cluster.distributed").lower() == 'true'
-
-    if is_hbase_distributed:
-
-      if not regionServerItem and hbase_regionserver_heapsize > 32768:
-        regionServerItem = self.getWarnItem("Value is more than the recommended maximum heap size of 32G.")
-        validationItems.extend([{"config-name": "hbase_regionserver_heapsize", "item": regionServerItem}])
-
-      minMasterXmn = 0.12 * hbase_master_heapsize
-      maxMasterXmn = 0.2 * hbase_master_heapsize
-      if hbase_master_xmn_size < minMasterXmn:
-        masterXmnItem = self.getWarnItem("Value is lesser than the recommended minimum Xmn size of {0} "
-                                         "(12% of hbase_master_heapsize)".format(int(ceil(minMasterXmn))))
-
-      if hbase_master_xmn_size > maxMasterXmn:
-        masterXmnItem = self.getWarnItem("Value is greater than the recommended maximum Xmn size of {0} "
-                                         "(20% of hbase_master_heapsize)".format(int(floor(maxMasterXmn))))
-
-      minRegionServerXmn = 0.12 * hbase_regionserver_heapsize
-      maxRegionServerXmn = 0.2 * hbase_regionserver_heapsize
-      if hbase_regionserver_xmn_size < minRegionServerXmn:
-        regionServerXmnItem = self.getWarnItem("Value is lesser than the recommended minimum Xmn size of {0} "
-                                               "(12% of hbase_regionserver_heapsize)"
-                                               .format(int(ceil(minRegionServerXmn))))
-
-      if hbase_regionserver_xmn_size > maxRegionServerXmn:
-        regionServerXmnItem = self.getWarnItem("Value is greater than the recommended maximum Xmn size of {0} "
-                                               "(20% of hbase_regionserver_heapsize)"
-                                               .format(int(floor(maxRegionServerXmn))))
-    else:
-
-      if not hbaseMasterHeapsizeItem and (hbase_master_heapsize + hbase_regionserver_heapsize) > 32768:
-        hbaseMasterHeapsizeItem = self.getWarnItem("Value of Master + Regionserver heapsize is more than the recommended maximum heap size of 32G.")
-        validationItems.extend([{"config-name": "hbase_master_heapsize", "item": hbaseMasterHeapsizeItem}])
-
-      minMasterXmn = 0.12 * (hbase_master_heapsize + hbase_regionserver_heapsize)
-      maxMasterXmn = 0.2 *  (hbase_master_heapsize + hbase_regionserver_heapsize)
-      if hbase_master_xmn_size < minMasterXmn:
-        masterXmnItem = self.getWarnItem("Value is lesser than the recommended minimum Xmn size of {0} "
-                                         "(12% of hbase_master_heapsize + hbase_regionserver_heapsize)"
-                                         .format(int(ceil(minMasterXmn))))
-
-      if hbase_master_xmn_size > maxMasterXmn:
-        masterXmnItem = self.getWarnItem("Value is greater than the recommended maximum Xmn size of {0} "
-                                         "(20% of hbase_master_heapsize + hbase_regionserver_heapsize)"
-                                         .format(int(floor(maxMasterXmn))))
-    if masterXmnItem:
-      validationItems.extend([{"config-name": "hbase_master_xmn_size", "item": masterXmnItem}])
-
-    if regionServerXmnItem:
-      validationItems.extend([{"config-name": "regionserver_xmn_size", "item": regionServerXmnItem}])
-
-    if hbaseMasterHeapsizeItem is None:
-      hostMasterComponents = {}
-
-      for service in services["services"]:
-        for component in service["components"]:
-          if component["StackServiceComponents"]["hostnames"] is not None:
-            for hostName in component["StackServiceComponents"]["hostnames"]:
-              if self.isMasterComponent(component):
-                if hostName not in hostMasterComponents.keys():
-                  hostMasterComponents[hostName] = []
-                hostMasterComponents[hostName].append(component["StackServiceComponents"]["component_name"])
-
-      amsCollectorHosts = self.getComponentHostNames(services, "AMBARI_METRICS", "METRICS_COLLECTOR")
-      for collectorHostName in amsCollectorHosts:
-        for host in hosts["items"]:
-          if host["Hosts"]["host_name"] == collectorHostName:
-            # AMS Collector co-hosted with other master components in bigger clusters
-            if len(hosts['items']) > 31 and \
-                            len(hostMasterComponents[collectorHostName]) > 2 and \
-                            host["Hosts"]["total_mem"] < 32*mb: # < 32Gb(total_mem in k)
-              masterHostMessage = "Host {0} is used by multiple master components ({1}). " \
-                                  "It is recommended to use a separate host for the " \
-                                  "Ambari Metrics Collector component and ensure " \
-                                  "the host has sufficient memory available."
-
-              hbaseMasterHeapsizeItem = self.getWarnItem(masterHostMessage.format(
-                  collectorHostName, str(", ".join(hostMasterComponents[collectorHostName]))))
-              if hbaseMasterHeapsizeItem:
-                validationItems.extend([{"config-name": "hbase_master_heapsize", "item": hbaseMasterHeapsizeItem}])
-      pass
 
-    return self.toConfigurationValidationProblems(validationItems, "ams-hbase-env")
-
-  def validateAmsEnvConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
-
-    validationItems = []
-    collectorHeapsizeDefaultItem = self.validatorLessThenDefaultValue(properties, recommendedDefaults, "metrics_collector_heapsize")
-    validationItems.extend([{"config-name": "metrics_collector_heapsize", "item": collectorHeapsizeDefaultItem}])
 
-    ams_env = getSiteProperties(configurations, "ams-env")
-    collector_heapsize = self.to_number(ams_env.get("metrics_collector_heapsize"))
-    if collector_heapsize > 32768:
-      collectorHeapsizeMaxItem = self.getWarnItem("Value is more than the recommended maximum heap size of 32G.")
-      validationItems.extend([{"config-name": "metrics_collector_heapsize", "item": collectorHeapsizeMaxItem}])
 
-    return self.toConfigurationValidationProblems(validationItems, "ams-env")
 
   def getMemorySizeRequired(self, services, components, configurations):
     totalMemoryRequired = 512*1024*1024 # 512Mb for OS needs

http://git-wip-us.apache.org/repos/asf/ambari/blob/0f32765d/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
index 2dc1738..4cb0d9e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
@@ -91,7 +91,6 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
       "HBASE": self.recommendHBASEConfigurations,
       "MAPREDUCE2": self.recommendMapReduce2Configurations,
       "TEZ": self.recommendTezConfigurations,
-      "AMBARI_METRICS": self.recommendAmsConfigurations,
       "YARN": self.recommendYARNConfigurations,
       "STORM": self.recommendStormConfigurations,
       "KNOX": self.recommendKnoxConfigurations,


[24/50] [abbrv] ambari git commit: AMBARI-22125. Better defaults for druid memory configs to avoid OutOfMemoryErrors on small machines

Posted by jl...@apache.org.
AMBARI-22125. Better defaults for druid memory configs to avoid OutOfMemoryErrors on small machines


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/1da77356
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/1da77356
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/1da77356

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 1da77356b0de98df51f48bedc087daf01aedbd7c
Parents: d86f764
Author: Nishant <ni...@gmail.com>
Authored: Thu Oct 5 01:21:00 2017 +0530
Committer: Nishant <ni...@gmail.com>
Committed: Thu Oct 5 01:21:00 2017 +0530

----------------------------------------------------------------------
 .../DRUID/0.9.2/configuration/druid-broker.xml  |   6 +
 .../0.9.2/configuration/druid-historical.xml    |   6 +
 .../stacks/HDP/2.6/services/stack_advisor.py    |  28 ++-
 .../stacks/2.6/common/test_stack_advisor.py     | 190 ++++++++++++++++++-
 4 files changed, 219 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/1da77356/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-broker.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-broker.xml b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-broker.xml
index 4f05da0..6146ca3 100644
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-broker.xml
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-broker.xml
@@ -69,6 +69,12 @@
     <on-ambari-upgrade add="false"/>
   </property>
   <property>
+    <name>druid.processing.numMergeBuffers</name>
+    <value>2</value>
+    <description>The number of direct memory buffers available for merging query results. The buffers are sized by druid.processing.buffer.sizeBytes.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
     <name>druid.broker.cache.useCache</name>
     <value>true</value>
     <description>Enable the cache on the broker.</description>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1da77356/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-historical.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-historical.xml b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-historical.xml
index 9b65404..5ff30ce 100644
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-historical.xml
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-historical.xml
@@ -39,6 +39,12 @@
     <on-ambari-upgrade add="false"/>
   </property>
   <property>
+    <name>druid.processing.numMergeBuffers</name>
+    <value>2</value>
+    <description>The number of direct memory buffers available for merging query results. The buffers are sized by druid.processing.buffer.sizeBytes.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
     <name>druid.processing.buffer.sizeBytes</name>
     <value>1073741824</value>
     <value-attributes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1da77356/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
index 723ff4e..0d2925e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
@@ -16,9 +16,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
 """
-import math
-
 import json
+import math
 import re
 from resource_management.libraries.functions import format
 
@@ -144,7 +143,7 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
       # JVM Configs go to env properties
       putEnvProperty = self.putProperty(configurations, "druid-env", services)
 
-      # processing thread pool Config
+      # processing thread pool and memory configs
       for component in ['DRUID_HISTORICAL', 'DRUID_BROKER']:
           component_hosts = self.getHostsWithComponent("DRUID", component, services, hosts)
           nodeType = self.DRUID_COMPONENT_NODE_TYPE_MAP[component]
@@ -154,8 +153,31 @@ class HDP26StackAdvisor(HDP25StackAdvisor):
               processingThreads = 1
               if totalAvailableCpu > 1:
                   processingThreads = totalAvailableCpu - 1
+              numMergeBuffers = max(2, processingThreads/4)
               putComponentProperty('druid.processing.numThreads', processingThreads)
               putComponentProperty('druid.server.http.numThreads', max(10, (totalAvailableCpu * 17) / 16 + 2) + 30)
+              putComponentProperty('druid.processing.numMergeBuffers', numMergeBuffers)
+              totalAvailableMemInMb = self.getMinMemory(component_hosts) / 1024
+              maxAvailableBufferSizeInMb = totalAvailableMemInMb/(processingThreads + numMergeBuffers)
+              putComponentProperty('druid.processing.buffer.sizeBytes', self.getDruidProcessingBufferSizeInMb(totalAvailableMemInMb) * 1024 * 1024)
+
+
+  # returns the recommended druid processing buffer size in Mb.
+  # the recommended buffer size is kept lower then the max available memory to have enough free memory to load druid data.
+  # for low memory nodes, the actual allocated buffer size is small to keep some free memory for memory mapping of segments
+  # If user installs all druid processes on a single node, memory available for loading segments will be further decreased.
+  def getDruidProcessingBufferSizeInMb(self, maxAvailableBufferSizeInMb):
+      if maxAvailableBufferSizeInMb <= 256:
+          return min(64, maxAvailableBufferSizeInMb)
+      elif maxAvailableBufferSizeInMb <= 1024:
+          return 128
+      elif maxAvailableBufferSizeInMb <= 2048:
+          return 256
+      elif maxAvailableBufferSizeInMb <= 6144:
+          return 512
+      # High Memory nodes below
+      else :
+          return 1024
 
   def recommendSupersetConfigurations(self, configurations, clusterData, services, hosts):
       # superset is in list of services to be installed

http://git-wip-us.apache.org/repos/asf/ambari/blob/1da77356/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
index f8483c1..ec44b3d 100644
--- a/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
@@ -188,9 +188,15 @@ class TestHDP26StackAdvisor(TestCase):
     self.stackAdvisor.recommendDruidConfigurations(configurations, clusterData, services, hosts)
     self.assertEquals(configurations,
                       {'druid-historical': {
-                        'properties': {'druid.processing.numThreads': '3', 'druid.server.http.numThreads': '40'}},
+                        'properties': {'druid.processing.numThreads': '3',
+                                       'druid.server.http.numThreads': '40',
+                                       'druid.processing.numMergeBuffers': '2',
+                                       'druid.processing.buffer.sizeBytes': '1073741824'}},
                         'druid-broker': {
-                          'properties': {'druid.processing.numThreads': '3', 'druid.server.http.numThreads': '40'}},
+                          'properties': {'druid.processing.numThreads': '3',
+                                         'druid.server.http.numThreads': '40',
+                                         'druid.processing.numMergeBuffers': '2',
+                                         'druid.processing.buffer.sizeBytes': '1073741824'}},
                         'druid-common': {'properties': {'druid.extensions.loadList': '["mysql-metadata-storage"]',
                                                         'druid.metadata.storage.connector.port': '3306',
                                                         'druid.metadata.storage.connector.connectURI': 'jdbc:mysql://c6401.ambari.apache.org:3306/druid?createDatabaseIfNotExist=true',
@@ -767,9 +773,15 @@ class TestHDP26StackAdvisor(TestCase):
     self.stackAdvisor.recommendDruidConfigurations(configurations, clusterData, services, hosts)
     self.assertEquals(configurations,
                       {'druid-historical': {
-                        'properties': {'druid.processing.numThreads': '3', 'druid.server.http.numThreads': '40'}},
+                        'properties': {'druid.processing.numThreads': '3',
+                                       'druid.server.http.numThreads': '40',
+                                       'druid.processing.numMergeBuffers': '2',
+                                       'druid.processing.buffer.sizeBytes': '1073741824'}},
                         'druid-broker': {
-                          'properties': {'druid.processing.numThreads': '3', 'druid.server.http.numThreads': '40'}},
+                          'properties': {'druid.processing.numThreads': '3',
+                                         'druid.server.http.numThreads': '40',
+                                         'druid.processing.numMergeBuffers': '2',
+                                         'druid.processing.buffer.sizeBytes': '1073741824'}},
                         'druid-common': {'properties': {'druid.extensions.loadList': '["postgresql-metadata-storage"]',
                                                         'druid.metadata.storage.connector.port': '5432',
                                                         'druid.metadata.storage.connector.connectURI': 'jdbc:postgresql://c6401.ambari.apache.org:5432/druid',
@@ -875,9 +887,15 @@ class TestHDP26StackAdvisor(TestCase):
     self.stackAdvisor.recommendDruidConfigurations(configurations, clusterData, services, hosts)
     self.assertEquals(configurations,
                       {'druid-historical': {
-                        'properties': {'druid.processing.numThreads': '3', 'druid.server.http.numThreads': '40'}},
+                        'properties': {'druid.processing.numThreads': '3',
+                                       'druid.server.http.numThreads': '40',
+                                       'druid.processing.numMergeBuffers': '2',
+                                       'druid.processing.buffer.sizeBytes': '1073741824'}},
                         'druid-broker': {
-                          'properties': {'druid.processing.numThreads': '3', 'druid.server.http.numThreads': '40'}},
+                          'properties': {'druid.processing.numThreads': '3',
+                                         'druid.server.http.numThreads': '40',
+                                         'druid.processing.numMergeBuffers': '2',
+                                         'druid.processing.buffer.sizeBytes': '1073741824'}},
                         'druid-common': {'properties': {'druid.extensions.loadList': '[]',
                                                         'druid.metadata.storage.connector.port': '1527',
                                                         'druid.metadata.storage.connector.connectURI': 'jdbc:derby://c6401.ambari.apache.org:1527/druid;create=true',
@@ -892,6 +910,7 @@ class TestHDP26StackAdvisor(TestCase):
                       )
 
 
+
   def test_recommendDruidConfigurations_property_existence_check(self):
       # Test for https://issues.apache.org/jira/browse/AMBARI-19144
       hosts = {
@@ -1069,9 +1088,15 @@ class TestHDP26StackAdvisor(TestCase):
     self.stackAdvisor.recommendDruidConfigurations(configurations, clusterData, services, hosts)
     self.assertEquals(configurations,
                       {'druid-historical': {
-                        'properties': {'druid.processing.numThreads': '2', 'druid.server.http.numThreads': '40'}},
+                        'properties': {'druid.processing.numThreads': '2',
+                                       'druid.server.http.numThreads': '40',
+                                       'druid.processing.numMergeBuffers': '2',
+                                       'druid.processing.buffer.sizeBytes': '536870912'}},
                         'druid-broker': {
-                          'properties': {'druid.processing.numThreads': '1', 'druid.server.http.numThreads': '40'}},
+                          'properties': {'druid.processing.numThreads': '1',
+                                         'druid.server.http.numThreads': '40',
+                                         'druid.processing.numMergeBuffers': '2',
+                                         'druid.processing.buffer.sizeBytes': '268435456'}},
                         'druid-common': {'properties': {'druid.extensions.loadList': '[]',
                                                         'druid.metadata.storage.connector.port': '1527',
                                                         'druid.metadata.storage.connector.connectURI': 'jdbc:derby://c6401.ambari.apache.org:1527/druid;create=true',
@@ -1086,6 +1111,155 @@ class TestHDP26StackAdvisor(TestCase):
                                                               'druid.broker.jvm.heap.memory': {'maximum': '1877'}}}}
                       )
 
+  def test_recommendDruidConfigurations_low_mem_hosts(self):
+    hosts = {
+      "items": [
+        {
+          "href": "/api/v1/hosts/c6401.ambari.apache.org",
+          "Hosts": {
+            "cpu_count": 8,
+            "total_mem": 102400,
+            "disk_info": [
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"},
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"}
+            ],
+            "public_host_name": "c6401.ambari.apache.org",
+            "host_name": "c6401.ambari.apache.org"
+          }
+        }, {
+          "href": "/api/v1/hosts/c6402.ambari.apache.org",
+          "Hosts": {
+            "cpu_count": 4,
+            "total_mem": 204800,
+            "disk_info": [
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"},
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"}
+            ],
+            "public_host_name": "c6402.ambari.apache.org",
+            "host_name": "c6402.ambari.apache.org"
+          }
+        },
+        {
+          "href": "/api/v1/hosts/c6403.ambari.apache.org",
+          "Hosts": {
+            "cpu_count": 6,
+            "total_mem": 409600,
+            "disk_info": [
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"},
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"}
+            ],
+            "public_host_name": "c6403.ambari.apache.org",
+            "host_name": "c6403.ambari.apache.org"
+          }
+        }
+      ]
+    }
+
+    services = {
+      "Versions": {
+        "parent_stack_version": "2.5",
+        "stack_name": "HDP",
+        "stack_version": "2.6",
+        "stack_hierarchy": {
+          "stack_name": "HDP",
+          "stack_versions": ["2.5", "2.4", "2.3", "2.2", "2.1", "2.0.6"]
+        }
+      },
+      "services": [{
+        "StackServices": {
+          "service_name": "DRUID",
+        },
+        "components": [
+          {
+            "StackServiceComponents": {
+              "component_name": "DRUID_COORDINATOR",
+              "hostnames": ["c6401.ambari.apache.org"]
+            },
+          },
+          {
+            "StackServiceComponents": {
+              "component_name": "DRUID_OVERLORD",
+              "hostnames": ["c6401.ambari.apache.org"]
+            },
+          },
+          {
+            "StackServiceComponents": {
+              "component_name": "DRUID_BROKER",
+              "hostnames": ["c6402.ambari.apache.org", "c6403.ambari.apache.org"]
+            },
+          },
+          {
+            "StackServiceComponents": {
+              "component_name": "DRUID_HISTORICAL",
+              "hostnames": ["c6401.ambari.apache.org", "c6403.ambari.apache.org"]
+            },
+          },
+          {
+            "StackServiceComponents": {
+              "component_name": "DRUID_MIDDLEMANAGER",
+              "hostnames": ["c6401.ambari.apache.org"]
+            },
+          }
+        ]
+      }
+      ],
+      "configurations": {
+        "druid-common": {
+          "properties": {
+            "database_name": "druid",
+            "metastore_hostname": "c6401.ambari.apache.org",
+            "druid.metadata.storage.type": "derby",
+            "druid.extensions.loadList": "[\"mysql-metadata-storage\"]",
+            "druid.extensions.pullList": "[]"
+          }
+        }
+      }
+    }
+
+    clusterData = {
+    }
+
+    configurations = {
+    }
+
+    self.stackAdvisor.recommendDruidConfigurations(configurations, clusterData, services, hosts)
+    self.assertEquals(configurations,
+                    {'druid-historical': {
+                      'properties': {'druid.processing.numThreads': '5',
+                                     'druid.server.http.numThreads': '40',
+                                     'druid.processing.numMergeBuffers': '2',
+                                     'druid.processing.buffer.sizeBytes': '67108864'}},
+                      'druid-broker': {
+                        'properties': {'druid.processing.numThreads': '3',
+                                       'druid.server.http.numThreads': '40',
+                                       'druid.processing.numMergeBuffers': '2',
+                                       'druid.processing.buffer.sizeBytes': '67108864'}},
+                      'druid-common': {'properties': {'druid.extensions.loadList': '[]',
+                                                      'druid.metadata.storage.connector.port': '1527',
+                                                      'druid.metadata.storage.connector.connectURI': 'jdbc:derby://c6401.ambari.apache.org:1527/druid;create=true',
+                                                      'druid.zk.service.host': ''
+                                                      }},
+                      'druid-env': {'properties': {},
+                                    'property_attributes': {'druid.coordinator.jvm.heap.memory': {'maximum': '1024'},
+                                                            'druid.overlord.jvm.heap.memory': {'maximum': '1024'},
+                                                            'druid.middlemanager.jvm.heap.memory': {
+                                                              'maximum': '1024'},
+                                                            'druid.historical.jvm.heap.memory': {'maximum': '1024'},
+                                                            'druid.broker.jvm.heap.memory': {'maximum': '1024'}}}}
+                    )
+
 
   def test_recommendAtlasConfigurations(self):
     configurations = {


[50/50] [abbrv] ambari git commit: Merge remote-tracking branch 'origin/trunk' into branch-feature-AMBARI-14714

Posted by jl...@apache.org.
Merge remote-tracking branch 'origin/trunk' into branch-feature-AMBARI-14714


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c36afcdd
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c36afcdd
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c36afcdd

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: c36afcdd051fa28cff2d299f1b97ed92e32fbbe5
Parents: ba1ec6d e61556c
Author: Jayush Luniya <jl...@hortonworks.com>
Authored: Sun Oct 8 23:00:20 2017 -0700
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Sun Oct 8 23:00:20 2017 -0700

----------------------------------------------------------------------
 ambari-agent/pom.xml                            |   4 +-
 .../main/python/ambari_agent/AmbariConfig.py    |  74 +-
 .../ambari_agent/CustomServiceOrchestrator.py   |   6 +-
 .../src/main/python/ambari_agent/FileCache.py   |   5 +-
 .../main/python/ambari_agent/ProcessHelper.py   |  71 --
 .../src/main/python/ambari_agent/StatusCheck.py | 142 ----
 .../src/main/python/ambari_agent/main.py        |  12 +-
 .../test/python/ambari_agent/TestFileCache.py   |   4 +-
 .../test/python/ambari_agent/TestLiveStatus.py  |   5 +-
 .../src/test/python/ambari_agent/TestMain.py    |  13 +-
 .../python/ambari_agent/TestProcessHelper.py    |  70 --
 .../test/python/ambari_agent/TestSecurity.py    |   1 -
 .../test/python/ambari_agent/TestStatusCheck.py | 180 -----
 .../resource_management/TestPackageResource.py  |   6 +-
 .../python/resource_management/TestScript.py    |  26 +-
 .../core/providers/package/__init__.py          |   4 +-
 .../core/providers/package/apt.py               |  35 +-
 .../core/providers/package/choco.py             |   8 +-
 .../core/providers/package/yumrpm.py            |  10 +-
 .../core/providers/package/zypper.py            |   7 +-
 .../core/resources/packaging.py                 |  14 +-
 .../libraries/functions/component_version.py    |  26 +-
 .../libraries/functions/conf_select.py          | 362 +++------
 .../libraries/functions/repository_util.py      |   8 +-
 .../libraries/functions/stack_select.py         |  69 +-
 .../libraries/script/script.py                  |  22 +-
 ambari-infra/ambari-infra-assembly/pom.xml      |   8 +
 .../src/main/package/deb/manager/postinst       |   5 +
 .../src/main/package/deb/solr-client/postinst   |  13 +
 .../src/main/package/rpm/manager/postinstall.sh |  20 +
 .../main/package/rpm/solr-client/postinstall.sh |  28 +
 .../org/apache/ambari/infra/InfraManager.java   |   4 +-
 .../conf/batch/InfraManagerBatchConfig.java     |  86 --
 .../infra/job/dummy/DummyJobConfiguration.java  | 108 +++
 .../src/main/resources/infraManager.sh          |  10 +-
 ambari-infra/ambari-infra-solr-client/build.xml |   3 +
 .../src/main/python/solrDataManager.py          | 150 ++--
 .../src/main/resources/solrIndexHelper.sh       |   5 +-
 .../logsearch/steps/LogSearchUISteps.java       |   4 +-
 .../resources/stories/selenium/login.ui.story   |   4 +-
 .../ambari-logsearch-web/package.json           |  17 +-
 ambari-logsearch/ambari-logsearch-web/pom.xml   |   6 +-
 .../ambari-logsearch-web/src/app/app.module.ts  |   6 +
 .../src/app/classes/active-service-log-entry.ts |  23 +
 .../src/app/classes/histogram-options.ts        |  36 +
 .../src/app/classes/list-item.class.ts          |  25 -
 .../src/app/classes/list-item.ts                |  26 +
 .../src/app/classes/models/app-settings.ts      |  27 +
 .../src/app/classes/models/app-state.ts         |  43 +
 .../src/app/classes/models/audit-log-field.ts   | 225 ++++++
 .../src/app/classes/models/audit-log.ts         |  46 ++
 .../src/app/classes/models/bar-graph.ts         |  24 +
 .../src/app/classes/models/common-entry.ts      |  22 +
 .../src/app/classes/models/count.ts             |  22 +
 .../src/app/classes/models/filter.ts            |  25 +
 .../src/app/classes/models/graph.ts             |  23 +
 .../src/app/classes/models/log-field.ts         |  27 +
 .../src/app/classes/models/log.ts               |  38 +
 .../src/app/classes/models/node.ts              |  30 +
 .../src/app/classes/models/service-log-field.ts | 107 +++
 .../src/app/classes/models/service-log.ts       |  27 +
 .../app/classes/models/solr-collection-state.ts |  23 +
 .../src/app/classes/models/store.ts             | 180 +++++
 .../src/app/classes/models/user-config.ts       |  26 +
 .../queries/audit-logs-query-params.class.ts    |  46 --
 .../classes/queries/audit-logs-query-params.ts  |  46 ++
 .../app/classes/queries/query-params.class.ts   |  23 -
 .../src/app/classes/queries/query-params.ts     |  23 +
 ...ce-logs-histogram-query-params.class.spec.ts | 203 -----
 ...service-logs-histogram-query-params.class.ts |  70 --
 .../service-logs-histogram-query-params.spec.ts | 203 +++++
 .../service-logs-histogram-query-params.ts      |  70 ++
 .../queries/service-logs-query-params.class.ts  |  30 -
 .../queries/service-logs-query-params.ts        |  30 +
 .../service-logs-truncated-query-params.ts      |  36 +
 .../app/classes/service-log-context-entry.ts    |  26 +
 .../dropdown-button.component.html              |   3 +-
 .../dropdown-button.component.spec.ts           |   5 +-
 .../dropdown-button.component.ts                |   2 +-
 .../dropdown-list/dropdown-list.component.html  |   2 +-
 .../dropdown-list.component.spec.ts             |  12 +-
 .../dropdown-list/dropdown-list.component.ts    |  11 +-
 .../filter-button.component.spec.ts             |   5 +-
 .../filter-button/filter-button.component.ts    |   2 +-
 .../filter-dropdown.component.spec.ts           |   5 +-
 .../filters-panel/filters-panel.component.html  |   3 +-
 .../filters-panel/filters-panel.component.less  |   2 +-
 .../filters-panel.component.spec.ts             |   5 +-
 .../filters-panel/filters-panel.component.ts    |   4 +-
 .../log-context/log-context.component.html      |  33 +
 .../log-context/log-context.component.less      |  23 +
 .../log-context/log-context.component.spec.ts   | 108 +++
 .../log-context/log-context.component.ts        |  91 +++
 .../log-file-entry.component.html               |  20 +
 .../log-file-entry.component.less               |  31 +
 .../log-file-entry.component.spec.ts            |  56 ++
 .../log-file-entry/log-file-entry.component.ts  |  51 ++
 .../logs-container.component.html               |  12 +-
 .../logs-container.component.spec.ts            |   5 +-
 .../logs-container/logs-container.component.ts  |  44 +-
 .../logs-list/logs-list.component.html          |  30 +-
 .../logs-list/logs-list.component.less          |  36 +-
 .../logs-list/logs-list.component.spec.ts       |   3 +
 .../components/logs-list/logs-list.component.ts |  34 +-
 .../main-container.component.html               |   7 +
 .../main-container.component.less               |   4 +
 .../main-container.component.spec.ts            |  13 +-
 .../main-container/main-container.component.ts  |  32 +-
 .../menu-button/menu-button.component.spec.ts   |   5 +-
 .../menu-button/menu-button.component.ts        |   2 +-
 .../search-box/search-box.component.ts          |   2 +-
 .../time-histogram.component.less               |  22 +-
 .../time-histogram/time-histogram.component.ts  |  94 ++-
 .../timezone-picker.component.spec.ts           |   5 +-
 .../src/app/components/variables.less           |  30 +
 .../src/app/models/app-settings.model.ts        |  27 -
 .../src/app/models/app-state.model.ts           |  35 -
 .../src/app/models/audit-log-field.model.ts     | 225 ------
 .../src/app/models/audit-log.model.ts           |  46 --
 .../src/app/models/bar-graph.model.ts           |  24 -
 .../src/app/models/common-entry.model.ts        |  22 -
 .../src/app/models/count.model.ts               |  22 -
 .../src/app/models/filter.model.ts              |  25 -
 .../src/app/models/graph.model.ts               |  23 -
 .../src/app/models/log-field.model.ts           |  27 -
 .../src/app/models/log.model.ts                 |  37 -
 .../src/app/models/node.model.ts                |  29 -
 .../src/app/models/service-log-field.model.ts   | 107 ---
 .../src/app/models/service-log.model.ts         |  27 -
 .../app/models/solr-collection-state.model.ts   |  23 -
 .../src/app/models/store.model.ts               | 169 ----
 .../src/app/models/user-config.model.ts         |  26 -
 .../services/component-actions.service.spec.ts  |   8 +-
 .../app/services/component-actions.service.ts   |  52 +-
 .../component-generator.service.spec.ts         |  10 +-
 .../app/services/component-generator.service.ts |  23 +-
 .../src/app/services/filtering.service.spec.ts  |  27 +
 .../src/app/services/filtering.service.ts       |  97 ++-
 .../src/app/services/http-client.service.ts     |  15 +-
 .../app/services/logs-container.service.spec.ts |   8 +-
 .../src/app/services/logs-container.service.ts  |  70 +-
 .../services/storage/app-settings.service.ts    |   4 +-
 .../app/services/storage/app-state.service.ts   |   4 +-
 .../storage/audit-logs-fields.service.ts        |   2 +-
 .../app/services/storage/audit-logs.service.ts  |   2 +-
 .../app/services/storage/clusters.service.ts    |   2 +-
 .../app/services/storage/components.service.ts  |   2 +-
 .../src/app/services/storage/filters.service.ts |   2 +-
 .../src/app/services/storage/graphs.service.ts  |   2 +-
 .../src/app/services/storage/hosts.service.ts   |   2 +-
 .../app/services/storage/reducers.service.ts    |   2 +
 .../storage/service-logs-fields.service.ts      |   2 +-
 .../service-logs-histogram-data.service.ts      |   2 +-
 .../storage/service-logs-truncated.service.ts   |  32 +
 .../services/storage/service-logs.service.ts    |   2 +-
 .../services/storage/user-configs.service.ts    |   2 +-
 .../src/assets/i18n/en.json                     |   7 +-
 .../ambari-logsearch-web/webpack.config.js      |  21 +-
 ambari-logsearch/ambari-logsearch-web/yarn.lock | 288 +++++--
 .../timeline/HBaseTimelineMetricStore.java      |   8 +-
 .../timeline/HBaseTimelineMetricStoreTest.java  |  36 +-
 ambari-server/pom.xml                           |   2 +
 ambari-server/src/main/assemblies/server.xml    |  20 +-
 .../actionmanager/ExecutionCommandWrapper.java  |   3 +-
 .../ambari/server/actionmanager/Stage.java      |   3 +-
 .../ambari/server/agent/HeartbeatMonitor.java   |   3 +-
 .../ambari/server/agent/StatusCommand.java      |  16 +
 .../server/api/services/AmbariMetaInfo.java     |  61 +-
 .../AmbariManagementControllerImpl.java         |  22 +-
 .../server/controller/ControllerModule.java     |   4 +
 .../server/controller/KerberosHelperImpl.java   |  16 +-
 .../controller/ResourceProviderFactory.java     |   8 +
 .../server/controller/StackVersionResponse.java |  29 -
 .../AbstractControllerResourceProvider.java     |   4 +
 .../internal/AlertTargetResourceProvider.java   |   3 +-
 .../internal/ClientConfigResourceProvider.java  |   2 -
 .../internal/DefaultProviderModule.java         |   4 -
 .../internal/StackArtifactResourceProvider.java |  35 +-
 .../internal/UpgradeResourceProvider.java       |  77 +-
 .../internal/ViewInstanceResourceProvider.java  | 147 ++--
 .../upgrade/HostVersionOutOfSyncListener.java   |   4 +
 .../ambari/server/mpack/MpackGenerator.java     |  17 -
 .../orm/entities/RepositoryVersionEntity.java   |  34 +-
 .../server/orm/entities/WidgetLayoutEntity.java |   6 +-
 .../upgrades/AbstractUpgradeServerAction.java   |   6 +-
 .../upgrades/AutoSkipFailedSummaryAction.java   |  15 +-
 .../upgrades/ComponentVersionCheckAction.java   |   2 +-
 .../serveraction/upgrades/ConfigureAction.java  |  11 +-
 .../upgrades/FinalizeUpgradeAction.java         |   7 +-
 .../FixCapacitySchedulerOrderingPolicy.java     |  12 +-
 .../serveraction/upgrades/FixLzoCodecPath.java  |  10 +-
 .../upgrades/FixOozieAdminUsers.java            |  10 +-
 .../upgrades/FixYarnWebServiceUrl.java          |  11 +-
 .../upgrades/HBaseConfigCalculation.java        |  10 +-
 .../HBaseEnvMaxDirectMemorySizeAction.java      |  11 +-
 .../upgrades/HiveEnvClasspathAction.java        |  11 +-
 .../upgrades/HiveZKQuorumConfigAction.java      |  13 +-
 .../upgrades/KerberosKeytabsAction.java         |  13 +-
 .../upgrades/ManualStageAction.java             |   3 +-
 .../upgrades/OozieConfigCalculation.java        |  11 +-
 .../upgrades/PreconfigureKerberosAction.java    |   2 +-
 .../upgrades/RangerConfigCalculation.java       |  11 +-
 .../RangerKerberosConfigCalculation.java        |  11 +-
 .../upgrades/RangerKmsProxyConfig.java          |  11 +-
 .../RangerUsersyncConfigCalculation.java        |  11 +-
 .../upgrades/RangerWebAlertConfigAction.java    |   9 +-
 .../upgrades/SparkShufflePropertyConfig.java    |  11 +-
 .../upgrades/UpdateDesiredRepositoryAction.java |  10 +-
 .../upgrades/UpgradeUserKerberosDescriptor.java |   6 +-
 .../upgrades/YarnConfigCalculation.java         |  11 +-
 .../ambari/server/stack/ServiceDirectory.java   |   3 +-
 .../ambari/server/stack/StackContext.java       |  15 +-
 .../ambari/server/stack/StackDirectory.java     |  74 +-
 .../ambari/server/stack/StackManager.java       |  11 +-
 .../apache/ambari/server/stack/StackModule.java |  16 -
 .../org/apache/ambari/server/state/Host.java    |  17 +
 .../apache/ambari/server/state/ServiceInfo.java |   2 +-
 .../apache/ambari/server/state/StackInfo.java   |  39 -
 .../server/state/cluster/ClusterImpl.java       |   2 +-
 .../ambari/server/state/host/HostImpl.java      |  47 ++
 .../AbstractKerberosDescriptorContainer.java    |  26 +-
 .../state/kerberos/KerberosDescriptor.java      |   3 +-
 .../ambari/server/state/stack/UpgradePack.java  |   6 +
 .../stack/upgrade/ServerSideActionTask.java     |  15 +
 .../state/stack/upgrade/TaskParameter.java      |  41 +
 .../svccomphost/ServiceComponentHostImpl.java   |   9 +-
 .../server/topology/AsyncCallableService.java   | 110 ++-
 .../ambari/server/topology/TopologyManager.java |  26 +-
 .../topology/tasks/ConfigureClusterTask.java    | 124 +--
 .../server/upgrade/UpgradeCatalog260.java       |  31 +
 .../ambari_server/dbConfiguration_linux.py      |   9 +-
 .../python/ambari_server/resourceFilesKeeper.py |   7 +-
 .../python/ambari_server/serverConfiguration.py |   2 +-
 .../main/python/ambari_server/setupMpacks.py    |   2 +-
 .../1.6.1.2.2.0/configuration/accumulo-env.xml  |   6 +
 .../0.1.0/configuration/infra-solr-env.xml      |   6 +
 .../0.1.0/configuration/ams-env.xml             |   6 +
 .../AMBARI_METRICS/0.1.0/service_advisor.py     | 787 +++++++++++++++++++
 .../ATLAS/0.1.0.2.3/configuration/atlas-env.xml |   6 +
 .../ATLAS/0.7.0.3.0/configuration/atlas-env.xml |   6 +
 .../ATLAS/0.7.0.3.0/service_advisor.py          |   5 +-
 .../DRUID/0.10.1/configuration/druid-broker.xml | 106 +++
 .../DRUID/0.10.1/configuration/druid-common.xml | 270 +++++++
 .../0.10.1/configuration/druid-coordinator.xml  |  43 +
 .../DRUID/0.10.1/configuration/druid-env.xml    | 248 ++++++
 .../0.10.1/configuration/druid-historical.xml   |  94 +++
 .../DRUID/0.10.1/configuration/druid-log4j.xml  |  84 ++
 .../0.10.1/configuration/druid-logrotate.xml    |  68 ++
 .../configuration/druid-middlemanager.xml       | 122 +++
 .../0.10.1/configuration/druid-overlord.xml     |  52 ++
 .../DRUID/0.10.1/configuration/druid-router.xml |  59 ++
 .../common-services/DRUID/0.10.1/metainfo.xml   | 223 ++++++
 .../DRUID/0.10.1/package/scripts/broker.py      |  28 +
 .../DRUID/0.10.1/package/scripts/coordinator.py |  28 +
 .../DRUID/0.10.1/package/scripts/druid.py       | 307 ++++++++
 .../DRUID/0.10.1/package/scripts/druid_node.py  | 114 +++
 .../DRUID/0.10.1/package/scripts/historical.py  |  28 +
 .../0.10.1/package/scripts/middlemanager.py     |  28 +
 .../DRUID/0.10.1/package/scripts/overlord.py    |  28 +
 .../DRUID/0.10.1/package/scripts/params.py      | 200 +++++
 .../DRUID/0.10.1/package/scripts/router.py      |  28 +
 .../0.10.1/package/scripts/service_check.py     |  44 ++
 .../0.10.1/package/scripts/status_params.py     |  24 +
 .../DRUID/0.10.1/quicklinks/quicklinks.json     |  37 +
 .../DRUID/0.10.1/role_command_order.json        |  17 +
 .../DRUID/0.10.1/themes/theme.json              | 120 +++
 .../DRUID/0.9.2/configuration/druid-broker.xml  | 100 ---
 .../DRUID/0.9.2/configuration/druid-common.xml  | 270 -------
 .../0.9.2/configuration/druid-coordinator.xml   |  43 -
 .../DRUID/0.9.2/configuration/druid-env.xml     | 242 ------
 .../0.9.2/configuration/druid-historical.xml    |  88 ---
 .../DRUID/0.9.2/configuration/druid-log4j.xml   |  84 --
 .../0.9.2/configuration/druid-logrotate.xml     |  68 --
 .../0.9.2/configuration/druid-middlemanager.xml | 122 ---
 .../0.9.2/configuration/druid-overlord.xml      |  52 --
 .../DRUID/0.9.2/configuration/druid-router.xml  |  59 --
 .../common-services/DRUID/0.9.2/metainfo.xml    | 223 ------
 .../DRUID/0.9.2/package/scripts/broker.py       |  28 -
 .../DRUID/0.9.2/package/scripts/coordinator.py  |  28 -
 .../DRUID/0.9.2/package/scripts/druid.py        | 307 --------
 .../DRUID/0.9.2/package/scripts/druid_node.py   | 114 ---
 .../DRUID/0.9.2/package/scripts/historical.py   |  28 -
 .../0.9.2/package/scripts/middlemanager.py      |  28 -
 .../DRUID/0.9.2/package/scripts/overlord.py     |  28 -
 .../DRUID/0.9.2/package/scripts/params.py       | 200 -----
 .../DRUID/0.9.2/package/scripts/router.py       |  28 -
 .../0.9.2/package/scripts/service_check.py      |  44 --
 .../0.9.2/package/scripts/status_params.py      |  24 -
 .../DRUID/0.9.2/quicklinks/quicklinks.json      |  37 -
 .../DRUID/0.9.2/role_command_order.json         |  17 -
 .../DRUID/0.9.2/themes/theme.json               | 120 ---
 .../0.5.0.2.1/configuration/falcon-env.xml      |   6 +
 .../FLUME/1.4.0.2.0/configuration/flume-env.xml |   6 +
 .../0.96.0.2.0/configuration/hbase-env.xml      |   6 +
 .../HBASE/2.0.0.3.0/configuration/hbase-env.xml |   6 +
 .../HDFS/2.1.0.2.0/configuration/hadoop-env.xml |  10 +
 .../HDFS/2.1.0.2.0/configuration/hdfs-site.xml  |   1 +
 .../common-services/HDFS/2.1.0.2.0/widgets.json |   4 +-
 .../HDFS/3.0.0.3.0/configuration/hadoop-env.xml |  10 +
 .../HDFS/3.0.0.3.0/configuration/hdfs-site.xml  |   1 +
 .../HIVE/0.12.0.2.0/configuration/hive-env.xml  |  12 +
 .../package/scripts/hive_server_upgrade.py      |   5 -
 .../0.12.0.2.0/package/scripts/params_linux.py  |   5 +-
 .../0.12.0.2.0/package/scripts/status_params.py |   8 +-
 .../HIVE/0.12.0.2.0/package/scripts/webhcat.py  |   2 +-
 .../HIVE/2.1.0.3.0/configuration/hive-env.xml   |  12 +
 .../0.10.0.3.0/configuration/kafka-env.xml      |   6 +
 .../KAFKA/0.8.1/configuration/kafka-env.xml     |   6 +
 .../KNOX/0.5.0.2.2/configuration/knox-env.xml   |  10 +
 .../KNOX/0.5.0.3.0/configuration/knox-env.xml   |  10 +
 .../0.5.0/configuration/logsearch-env.xml       |   6 +
 .../1.0.0.2.3/configuration/mahout-env.xml      |   6 +
 .../OOZIE/4.0.0.2.0/configuration/oozie-env.xml |  10 +
 .../OOZIE/4.2.0.3.0/configuration/oozie-env.xml |  10 +
 .../RANGER/0.4.0/configuration/ranger-env.xml   |  10 +
 .../0.4.0/package/scripts/ranger_admin.py       |  13 +-
 .../1.0.0.3.0/configuration/ranger-env.xml      |  10 +
 .../0.5.0.2.3/configuration/kms-env.xml         |  10 +
 .../1.0.0.3.0/configuration/kms-env.xml         |  10 +
 .../RANGER_KMS/1.0.0.3.0/service_advisor.py     |   3 +-
 .../SPARK/1.2.1/configuration/spark-env.xml     |  10 +
 .../SPARK/2.2.0/configuration/livy-env.xml      |  10 +
 .../SPARK/2.2.0/configuration/spark-env.xml     |  10 +
 .../SPARK2/2.0.0/configuration/spark2-env.xml   |  10 +
 .../SQOOP/1.4.4.2.0/configuration/sqoop-env.xml |   6 +
 .../SQOOP/1.4.4.3.0/configuration/sqoop-env.xml |   6 +
 .../STORM/0.9.1/configuration/storm-env.xml     |   6 +
 .../STORM/1.0.1.3.0/configuration/storm-env.xml |   6 +
 .../0.15.0/configuration/superset-env.xml       |   6 +
 .../TEZ/0.4.0.2.1/configuration/tez-env.xml     |  10 +
 .../TEZ/0.9.0.3.0/configuration/tez-env.xml     |  10 +
 .../configuration-mapred/mapred-env.xml         |   6 +
 .../YARN/2.1.0.2.0/configuration/yarn-env.xml   |   6 +
 .../configuration-mapred/mapred-env.xml         |   6 +
 .../YARN/3.0.0.3.0/configuration/yarn-env.xml   |   6 +
 .../0.6.0/configuration/zeppelin-env.xml        |  10 +
 .../0.7.0/configuration/zeppelin-env.xml        |  10 +
 .../ZEPPELIN/0.7.0/package/scripts/master.py    |  25 +-
 .../3.4.5/configuration/zookeeper-env.xml       |   6 +
 .../src/main/resources/configuration-schema.xsd |   2 +-
 .../custom_actions/scripts/install_packages.py  |  35 +-
 ambari-server/src/main/resources/kerberos.json  |  79 ++
 .../main/resources/scripts/Ambaripreupload.py   |   4 +-
 .../scripts/post-user-creation-hook.sh          |   2 +-
 .../stack-hooks/after-INSTALL/scripts/hook.py   |  37 +
 .../stack-hooks/after-INSTALL/scripts/params.py | 108 +++
 .../scripts/shared_initialization.py            | 132 ++++
 .../before-ANY/files/changeToSecureUid.sh       |  64 ++
 .../stack-hooks/before-ANY/scripts/hook.py      |  36 +
 .../stack-hooks/before-ANY/scripts/params.py    | 254 ++++++
 .../before-ANY/scripts/shared_initialization.py | 273 +++++++
 .../stack-hooks/before-INSTALL/scripts/hook.py  |  37 +
 .../before-INSTALL/scripts/params.py            | 115 +++
 .../scripts/repo_initialization.py              |  75 ++
 .../scripts/shared_initialization.py            |  37 +
 .../stack-hooks/before-RESTART/scripts/hook.py  |  29 +
 .../before-START/files/checkForFormat.sh        |  65 ++
 .../before-START/files/fast-hdfs-resource.jar   | Bin 0 -> 28296600 bytes
 .../before-START/files/task-log4j.properties    | 134 ++++
 .../before-START/files/topology_script.py       |  66 ++
 .../before-START/scripts/custom_extensions.py   | 173 ++++
 .../stack-hooks/before-START/scripts/hook.py    |  43 +
 .../stack-hooks/before-START/scripts/params.py  | 380 +++++++++
 .../before-START/scripts/rack_awareness.py      |  48 ++
 .../scripts/shared_initialization.py            | 256 ++++++
 .../templates/commons-logging.properties.j2     |  43 +
 .../templates/exclude_hosts_list.j2             |  21 +
 .../templates/hadoop-metrics2.properties.j2     | 107 +++
 .../before-START/templates/health_check.j2      |  81 ++
 .../templates/include_hosts_list.j2             |  21 +
 .../templates/topology_mappings.data.j2         |  24 +
 .../HDP/2.0.6/configuration/cluster-env.xml     |  10 +
 .../2.0.6/hooks/after-INSTALL/scripts/hook.py   |  37 -
 .../2.0.6/hooks/after-INSTALL/scripts/params.py | 115 ---
 .../scripts/shared_initialization.py            | 132 ----
 .../hooks/before-ANY/files/changeToSecureUid.sh |  64 --
 .../HDP/2.0.6/hooks/before-ANY/scripts/hook.py  |  36 -
 .../2.0.6/hooks/before-ANY/scripts/params.py    | 277 -------
 .../before-ANY/scripts/shared_initialization.py | 281 -------
 .../2.0.6/hooks/before-INSTALL/scripts/hook.py  |  37 -
 .../hooks/before-INSTALL/scripts/params.py      | 115 ---
 .../scripts/repo_initialization.py              |  75 --
 .../scripts/shared_initialization.py            |  37 -
 .../2.0.6/hooks/before-RESTART/scripts/hook.py  |  29 -
 .../hooks/before-START/files/checkForFormat.sh  |  65 --
 .../before-START/files/fast-hdfs-resource.jar   | Bin 28296600 -> 0 bytes
 .../before-START/files/task-log4j.properties    | 134 ----
 .../hooks/before-START/files/topology_script.py |  66 --
 .../before-START/scripts/custom_extensions.py   | 173 ----
 .../2.0.6/hooks/before-START/scripts/hook.py    |  43 -
 .../2.0.6/hooks/before-START/scripts/params.py  | 387 ---------
 .../before-START/scripts/rack_awareness.py      |  48 --
 .../scripts/shared_initialization.py            | 256 ------
 .../templates/commons-logging.properties.j2     |  43 -
 .../templates/exclude_hosts_list.j2             |  21 -
 .../templates/hadoop-metrics2.properties.j2     | 107 ---
 .../before-START/templates/health_check.j2      |  81 --
 .../templates/include_hosts_list.j2             |  21 -
 .../templates/topology_mappings.data.j2         |  24 -
 .../resources/stacks/HDP/2.0.6/kerberos.json    |  79 --
 .../stacks/HDP/2.0.6/services/stack_advisor.py  | 562 +------------
 .../resources/stacks/HDP/2.0.6/widgets.json     |  95 ---
 .../services/HBASE/configuration/hbase-env.xml  |   6 +
 .../stacks/HDP/2.2/services/stack_advisor.py    |   1 -
 .../services/ECS/package/scripts/ecs_client.py  |   2 +-
 .../services/ECS/package/scripts/params.py      |   2 +-
 .../services/ATLAS/configuration/atlas-env.xml  |   6 +
 .../HIVE/configuration/llap-daemon-log4j.xml    |   2 +-
 .../services/SPARK/configuration/livy-env.xml   |  10 +
 .../stacks/HDP/2.5/services/stack_advisor.py    |   3 +-
 .../stacks/HDP/2.6/kerberos_preconfigure.json   |   9 +
 .../stacks/HDP/2.6/services/DRUID/metainfo.xml  |   5 +-
 .../HIVE/configuration/tez-interactive-site.xml |   6 +
 .../services/SPARK/configuration/livy-env.xml   |  10 +
 .../services/YARN/configuration/yarn-site.xml   |  18 +
 .../ZEPPELIN/configuration/zeppelin-env.xml     |  10 +
 .../stacks/HDP/2.6/services/stack_advisor.py    |  28 +-
 .../HDP/3.0/configuration/cluster-env.xml       |  10 +
 .../HDP/3.0/hooks/after-INSTALL/scripts/hook.py |  37 -
 .../3.0/hooks/after-INSTALL/scripts/params.py   | 109 ---
 .../scripts/shared_initialization.py            | 140 ----
 .../hooks/before-ANY/files/changeToSecureUid.sh |  53 --
 .../HDP/3.0/hooks/before-ANY/scripts/hook.py    |  36 -
 .../HDP/3.0/hooks/before-ANY/scripts/params.py  | 259 ------
 .../before-ANY/scripts/shared_initialization.py | 239 ------
 .../3.0/hooks/before-INSTALL/scripts/hook.py    |  37 -
 .../3.0/hooks/before-INSTALL/scripts/params.py  | 115 ---
 .../scripts/repo_initialization.py              |  76 --
 .../scripts/shared_initialization.py            |  37 -
 .../3.0/hooks/before-RESTART/scripts/hook.py    |  29 -
 .../hooks/before-START/files/checkForFormat.sh  |  65 --
 .../before-START/files/fast-hdfs-resource.jar   | Bin 28296600 -> 0 bytes
 .../before-START/files/task-log4j.properties    | 134 ----
 .../hooks/before-START/files/topology_script.py |  66 --
 .../HDP/3.0/hooks/before-START/scripts/hook.py  |  40 -
 .../3.0/hooks/before-START/scripts/params.py    | 364 ---------
 .../before-START/scripts/rack_awareness.py      |  47 --
 .../scripts/shared_initialization.py            | 249 ------
 .../templates/commons-logging.properties.j2     |  43 -
 .../templates/exclude_hosts_list.j2             |  21 -
 .../templates/hadoop-metrics2.properties.j2     | 107 ---
 .../before-START/templates/health_check.j2      |  81 --
 .../templates/include_hosts_list.j2             |  21 -
 .../templates/topology_mappings.data.j2         |  24 -
 .../main/resources/stacks/HDP/3.0/kerberos.json |  79 --
 .../main/resources/stacks/HDP/3.0/widgets.json  |  95 ---
 .../src/main/resources/upgrade-pack.xsd         |   9 +
 ambari-server/src/main/resources/widgets.json   |  95 +++
 .../server/api/services/AmbariMetaInfoTest.java |  49 +-
 .../AmbariManagementControllerImplTest.java     |   6 +-
 .../AmbariManagementControllerTest.java         |  17 +
 .../internal/UpgradeResourceProviderTest.java   |  33 +-
 .../upgrade/StackVersionListenerTest.java       |   1 -
 .../apache/ambari/server/orm/OrmTestHelper.java |   2 +-
 .../FixCapacitySchedulerOrderingPolicyTest.java |   2 +-
 .../upgrades/FixOozieAdminUsersTest.java        |   2 +-
 .../upgrades/FixYarnWebServiceUrlTest.java      |   2 +-
 .../HBaseEnvMaxDirectMemorySizeActionTest.java  |   2 +-
 .../upgrades/HiveEnvClasspathActionTest.java    |   2 +-
 .../upgrades/HiveZKQuorumConfigActionTest.java  |   2 +-
 .../upgrades/KerberosKeytabsActionTest.java     |  17 +
 .../upgrades/RangerConfigCalculationTest.java   |   2 +-
 .../RangerKerberosConfigCalculationTest.java    |   2 +-
 .../upgrades/RangerKmsProxyConfigTest.java      |   2 +-
 .../RangerUsersyncConfigCalculationTest.java    |   2 +-
 .../RangerWebAlertConfigActionTest.java         |   6 +-
 .../SparkShufflePropertyConfigTest.java         |   2 +-
 .../UpgradeUserKerberosDescriptorTest.java      |   4 +-
 .../ambari/server/stack/StackManagerTest.java   |   8 -
 .../state/kerberos/KerberosDescriptorTest.java  |  56 ++
 .../KerberosDescriptorUpdateHelperTest.java     |   1 +
 .../kerberos/VariableReplacementHelperTest.java |  35 +-
 .../svccomphost/ServiceComponentHostTest.java   | 102 ++-
 .../topology/AsyncCallableServiceTest.java      |  89 +--
 .../ClusterDeployWithStartOnlyTest.java         |   6 +
 ...InstallWithoutStartOnComponentLevelTest.java |   6 +
 .../ClusterInstallWithoutStartTest.java         |   6 +
 .../topology/ConfigureClusterTaskTest.java      |  64 +-
 .../server/topology/TopologyManagerTest.java    |  14 +-
 .../server/upgrade/UpgradeCatalog260Test.java   |  14 +
 .../src/test/python/TestAmbariServer.py         |   4 +-
 ambari-server/src/test/python/TestMpacks.py     |  12 +-
 .../src/test/python/TestResourceFilesKeeper.py  |   1 +
 .../src/test/python/TestUpgradeSummary.py       |   6 +-
 .../AMBARI_METRICS/test_service_advisor.py      | 596 ++++++++++++++
 .../HIVE/test_jdbc_driver_config.py             |  18 +-
 .../RANGER/test_db_flavor_config.py             |  17 +-
 .../RANGER_KMS/test_db_flavor_config.py         |  17 +-
 .../SQOOP/test_jdbc_driver_config.py            |  16 +-
 .../configs/ranger_admin_default.json           |  55 ++
 .../custom_actions/TestInstallPackages.py       |  50 +-
 .../stacks/2.0.6/HBASE/test_hbase_client.py     |   1 -
 .../stacks/2.0.6/HBASE/test_hbase_master.py     |   6 +-
 .../2.0.6/HBASE/test_phoenix_queryserver.py     |   7 +
 .../python/stacks/2.0.6/HDFS/test_datanode.py   |  38 +-
 .../python/stacks/2.0.6/HDFS/test_namenode.py   |   5 +-
 .../stacks/2.0.6/HIVE/test_hive_metastore.py    |  42 +-
 .../stacks/2.0.6/HIVE/test_hive_server.py       | 158 ++--
 .../2.0.6/HIVE/test_hive_service_check.py       |   4 +-
 .../stacks/2.0.6/HIVE/test_webhcat_server.py    |  41 +-
 .../stacks/2.0.6/OOZIE/test_oozie_server.py     |   9 +
 .../2.0.6/OOZIE/test_oozie_service_check.py     |   5 +-
 .../stacks/2.0.6/YARN/test_historyserver.py     |   5 +-
 .../stacks/2.0.6/YARN/test_mapreduce2_client.py |   1 +
 .../stacks/2.0.6/common/test_stack_advisor.py   | 576 --------------
 .../python/stacks/2.0.6/configs/default.json    |   1 +
 .../hooks/after-INSTALL/test_after_install.py   |  88 +--
 .../2.0.6/hooks/before-ANY/test_before_any.py   |  24 +-
 .../hooks/before-INSTALL/test_before_install.py |  14 +-
 .../hooks/before-START/test_before_start.py     |  21 +-
 .../stacks/2.1/FALCON/test_falcon_server.py     |  26 +-
 .../stacks/2.1/HIVE/test_hive_metastore.py      |  54 +-
 .../stacks/2.2/PIG/test_pig_service_check.py    |  13 +
 .../stacks/2.2/SPARK/test_job_history_server.py |  18 +-
 .../stacks/2.2/common/test_conf_select.py       |  13 +-
 .../stacks/2.2/common/test_stack_advisor.py     | 511 ------------
 .../2.2/common/test_stack_advisor_perf.py       |  66 +-
 .../stacks/2.3/MAHOUT/test_mahout_client.py     |   2 +-
 .../2.3/MAHOUT/test_mahout_service_check.py     |  28 +-
 .../2.3/SPARK/test_spark_thrift_server.py       |   8 +-
 .../stacks/2.3/common/test_stack_advisor.py     |   2 +-
 .../stacks/2.5/RANGER_KMS/test_kms_server.py    |  24 +-
 .../python/stacks/2.5/SPARK/test_spark_livy.py  |  16 +-
 .../stacks/2.5/ZEPPELIN/test_zeppelin_060.py    |  20 +-
 .../stacks/2.5/configs/ranger-kms-secured.json  |   6 +-
 .../test/python/stacks/2.6/DRUID/test_druid.py  |  22 +-
 .../stacks/2.6/SPARK2/test_spark_livy2.py       |  16 +-
 .../stacks/2.6/ZEPPELIN/test_zeppelin_070.py    | 236 +++---
 .../stacks/2.6/common/test_stack_advisor.py     | 190 ++++-
 .../src/test/python/stacks/utils/RMFTestCase.py |  43 +-
 ambari-server/src/test/resources/kerberos.json  |  42 +
 .../resources/stacks/HDP/2.0.8/kerberos.json    |  42 -
 ambari-server/src/test/resources/widgets.json   |  95 +++
 ambari-web/app/controllers/installer.js         |   8 +-
 .../journalNode/progress_controller.js          |   4 +-
 .../journalNode/step4_controller.js             |   6 +-
 .../nameNode/step5_controller.js                |   6 +-
 .../highAvailability/progress_controller.js     |   4 +-
 .../progress_popup_controller.js                |   2 +-
 .../main/admin/kerberos/step2_controller.js     |   7 +-
 .../main/admin/stack_and_upgrade_controller.js  |   4 +-
 ambari-web/app/controllers/main/host/details.js |   2 -
 .../controllers/main/service/info/summary.js    |   1 +
 .../main/service/reassign/step4_controller.js   |   2 -
 .../app/controllers/wizard/step6_controller.js  |  15 +-
 .../app/controllers/wizard/step8_controller.js  |  15 +-
 .../app/mappers/repository_version_mapper.js    |   2 +-
 ambari-web/app/messages.js                      |   8 +-
 .../app/mixins/common/configs/configs_saver.js  |  26 +-
 .../main/service/configs/config_overridable.js  |   1 -
 .../models/stack_version/repository_version.js  |   2 +-
 ambari-web/app/routes/add_kerberos_routes.js    |  22 +-
 ambari-web/app/styles/alerts.less               |  14 +-
 ambari-web/app/styles/application.less          |   7 +-
 ambari-web/app/styles/bootstrap_overrides.less  |   4 +
 ambari-web/app/styles/dashboard.less            |  22 +-
 ambari-web/app/styles/modal_popups.less         |   2 +-
 ambari-web/app/styles/stack_versions.less       |  13 +-
 .../app/styles/theme/bootstrap-ambari.css       |  17 +-
 ambari-web/app/styles/top-nav.less              |   5 +-
 ambari-web/app/styles/wizard.less               |   9 +
 ambari-web/app/templates/common/breadcrumbs.hbs |   4 +-
 .../stack_upgrade/stack_upgrade_wizard.hbs      |   2 +-
 .../main/service/info/service_alert_popup.hbs   |   6 +-
 .../app/templates/main/service/info/summary.hbs |  21 +-
 .../service/info/summary/master_components.hbs  |   4 -
 .../templates/main/service/services/hdfs.hbs    |  38 +-
 ambari-web/app/templates/wizard/step6.hbs       |   2 +-
 ambari-web/app/templates/wizard/step9.hbs       |  12 +-
 ambari-web/app/utils/ajax/ajax.js               |   2 -
 .../stack_upgrade/upgrade_version_box_view.js   |  70 +-
 .../upgrade_version_column_view.js              |   2 +-
 .../admin/stack_upgrade/upgrade_wizard_view.js  |  26 +-
 .../app/views/main/service/info/summary.js      |   8 +
 .../app/views/main/service/services/hdfs.js     |   2 -
 ambari-web/test/controllers/installer_test.js   |  37 +-
 .../journalNode/progress_controller_test.js     |   2 -
 .../progress_controller_test.js                 |   3 -
 .../progress_popup_controller_test.js           |   4 +-
 .../admin/kerberos/step2_controller_test.js     |   9 +-
 .../mixins/common/configs/configs_saver_test.js |  28 +-
 .../stack_version/repository_version_test.js    |  12 +-
 .../upgrade_version_box_view_test.js            | 149 ++--
 .../upgrade_version_column_view_test.js         |   6 +
 .../stack_upgrade/upgrade_wizard_view_test.js   |  38 +
 .../src/main/assemblies/hdf-ambari-mpack.xml    |   1 +
 .../HIVE/package/scripts/hive_client.py         |   2 -
 .../HIVE/package/scripts/hive_metastore.py      |   1 -
 .../HIVE/package/scripts/hive_server.py         |   2 +-
 .../package/scripts/hive_server_interactive.py  |   1 -
 .../HIVE/package/scripts/webhcat_server.py      |   2 -
 .../scripts/application_timeline_server.py      |   1 -
 .../YARN/package/scripts/historyserver.py       |   1 -
 .../YARN/package/scripts/mapreduce2_client.py   |   2 -
 .../YARN/package/scripts/nodemanager.py         |   1 -
 .../YARN/package/scripts/resourcemanager.py     |   1 -
 .../YARN/package/scripts/yarn_client.py         |   1 -
 .../ui/hive-web/app/adapters/application.js     |  18 +
 .../app/controllers/visualization-ui.js         |  10 +-
 .../ui/hive-web/app/utils/constants.js          |   1 +
 .../savedQueries/SavedQueryResourceManager.java |  17 +-
 .../resources/ui/app/components/job-item.js     |  49 +-
 .../resources/ui/app/components/jobs-browser.js |   3 +
 .../ui/app/components/query-result-table.js     |   2 +-
 .../src/main/resources/ui/app/routes/jobs.js    |  11 +
 .../main/resources/ui/app/routes/queries/new.js |   2 +
 .../resources/ui/app/routes/queries/query.js    | 240 ++++--
 .../src/main/resources/ui/app/services/jobs.js  |  31 +-
 .../src/main/resources/ui/app/styles/app.scss   |   8 +
 .../ui/app/templates/components/job-item.hbs    |   2 +-
 .../app/templates/components/jobs-browser.hbs   |   2 +-
 .../main/resources/ui/app/templates/jobs.hbs    |   1 +
 .../hive20/src/main/resources/ui/yarn.lock      |   2 +-
 .../apache/ambari/view/utils/hdfs/HdfsApi.java  |   2 +-
 614 files changed, 13386 insertions(+), 14066 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-agent/src/main/python/ambari_agent/CustomServiceOrchestrator.py
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/assemblies/server.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/java/org/apache/ambari/server/agent/StatusCommand.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/agent/StatusCommand.java
index a5b3622,9f8a095..5cc5dd5
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/StatusCommand.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/StatusCommand.java
@@@ -39,9 -39,9 +39,12 @@@ public class StatusCommand extends Agen
    @SerializedName("serviceName")
    private String serviceName;
  
 +  @SerializedName("serviceType")
 +  private String serviceType;
 +
+   @SerializedName("role")
+   private String role;
+ 
    @SerializedName("componentName")
    private String componentName;
  

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
index 8883ced,46ee65a..c07df43
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
@@@ -134,8 -136,9 +145,10 @@@ public class AmbariMetaInfo 
    private File commonServicesRoot;
    private File extensionsRoot;
    private File serverVersionFile;
+   private File commonWidgetsDescriptorFile;
    private File customActionRoot;
 +  private File mpacksV2Staging;
+   private String commonKerberosDescriptorFileLocation;
    private Map<String, VersionDefinitionXml> versionDefinitions = null;
  
  
@@@ -243,9 -228,8 +256,11 @@@
  
      customActionRoot = new File(conf.getCustomActionDefinitionPath());
  
 +    String mpacksV2StagingPath = conf.getMpacksV2StagingPath();
 +    mpacksV2Staging = new File(mpacksV2StagingPath);
 +
+     commonKerberosDescriptorFileLocation = new File(conf.getResourceDirPath(), KERBEROS_DESCRIPTOR_FILE_NAME).getAbsolutePath();
+     commonWidgetsDescriptorFile = new File(conf.getResourceDirPath(), WIDGETS_DESCRIPTOR_FILE_NAME);
    }
  
    /**
@@@ -1414,10 -1360,9 +1425,10 @@@
    /**
     * Ensures that the map of version definition files is populated
     */
-   private void ensureVersionDefinitions() {
+   private synchronized void ensureVersionDefinitions() {
      if (null != versionDefinitions) {
 -      return;
 +      if(versionDefinitions.size() > 0)
 +        return;
      }
  
      versionDefinitions = new HashMap<>();
@@@ -1519,6 -1446,9 +1530,10 @@@
      }
  
      return null;
 +
    }
+ 
+   public File getCommonWidgetsDescriptorFile() {
+     return commonWidgetsDescriptorFile;
+   }
  }

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java
index c56483d,dc97871..a4339ab
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java
@@@ -71,9 -72,9 +72,10 @@@ import org.apache.ambari.server.control
  import org.apache.ambari.server.controller.internal.KerberosDescriptorResourceProvider;
  import org.apache.ambari.server.controller.internal.MemberResourceProvider;
  import org.apache.ambari.server.controller.internal.RepositoryVersionResourceProvider;
 +import org.apache.ambari.server.controller.internal.ServiceGroupResourceProvider;
  import org.apache.ambari.server.controller.internal.ServiceResourceProvider;
  import org.apache.ambari.server.controller.internal.UpgradeResourceProvider;
+ import org.apache.ambari.server.controller.internal.ViewInstanceResourceProvider;
  import org.apache.ambari.server.controller.logging.LoggingRequestHelperFactory;
  import org.apache.ambari.server.controller.logging.LoggingRequestHelperFactoryImpl;
  import org.apache.ambari.server.controller.metrics.MetricPropertyProviderFactory;

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/java/org/apache/ambari/server/controller/ResourceProviderFactory.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractControllerResourceProvider.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClientConfigResourceProvider.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/DefaultProviderModule.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/java/org/apache/ambari/server/events/listeners/upgrade/HostVersionOutOfSyncListener.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/java/org/apache/ambari/server/mpack/MpackGenerator.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/mpack/MpackGenerator.java
index 5d4b832,0000000..130ff62
mode 100644,000000..100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/mpack/MpackGenerator.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/mpack/MpackGenerator.java
@@@ -1,561 -1,0 +1,544 @@@
 +/**
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + * <p>
 + * http://www.apache.org/licenses/LICENSE-2.0
 + * <p>
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.ambari.server.mpack;
 +
 +import java.io.BufferedOutputStream;
 +import java.io.BufferedWriter;
 +import java.io.File;
 +import java.io.FileInputStream;
 +import java.io.FileNotFoundException;
 +import java.io.FileOutputStream;
 +import java.io.FileWriter;
 +import java.io.IOException;
 +import java.net.URL;
 +import java.nio.file.Files;
 +import java.nio.file.Path;
 +import java.nio.file.Paths;
 +import java.nio.file.StandardCopyOption;
 +import java.util.ArrayList;
 +import java.util.Collections;
 +import java.util.HashMap;
 +import java.util.List;
 +import java.util.Map;
 +
 +import javax.xml.bind.JAXBContext;
 +import javax.xml.bind.Marshaller;
 +
 +import org.apache.ambari.server.stack.ServiceDirectory;
 +import org.apache.ambari.server.stack.StackDirectory;
 +import org.apache.ambari.server.stack.StackManager;
 +import org.apache.ambari.server.state.Mpack;
 +import org.apache.ambari.server.state.Packlet;
 +import org.apache.ambari.server.state.PropertyInfo;
 +import org.apache.ambari.server.state.QuickLinksConfigurationInfo;
 +import org.apache.ambari.server.state.ServiceInfo;
 +import org.apache.ambari.server.state.StackId;
 +import org.apache.ambari.server.state.StackInfo;
 +import org.apache.ambari.server.state.ThemeInfo;
 +import org.apache.ambari.server.state.quicklinks.QuickLinks;
 +import org.apache.ambari.server.state.repository.ManifestServiceInfo;
 +import org.apache.ambari.server.state.repository.VersionDefinitionXml;
 +import org.apache.ambari.server.state.stack.ConfigurationXml;
 +import org.apache.ambari.server.state.stack.RepositoryXml;
 +import org.apache.ambari.server.state.stack.ServiceMetainfoXml;
 +import org.apache.ambari.server.state.stack.StackMetainfoXml;
 +import org.apache.ambari.server.state.stack.StackRoleCommandOrder;
 +import org.apache.ambari.server.state.theme.Theme;
 +import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
 +import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream;
 +import org.apache.commons.compress.compressors.gzip.GzipCompressorOutputStream;
 +import org.apache.commons.compress.utils.IOUtils;
 +import org.apache.commons.io.FileUtils;
 +import org.apache.commons.lang.StringUtils;
 +import org.codehaus.jackson.map.ObjectMapper;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
 +
 +import com.google.gson.Gson;
 +import com.google.gson.GsonBuilder;
 +
 +/**
 + * Generate management pack from stack definition
 + */
 +public class MpackGenerator {
 +  private static final Logger LOG = LoggerFactory.getLogger
 +    (MpackGenerator.class);
 +
 +  private static final ObjectMapper mapper = new ObjectMapper();
 +  private File commonServicesRoot;
 +  private File stackRoot;
 +  private File mpacksRoot;
 +  private StackId srcStackId;
 +  private StackId dstStackId;
 +  private StackManager stackManager;
 +  private VersionDefinitionXml vdf;
 +  private Gson gson = new GsonBuilder().setPrettyPrinting().create();
 +
 +  /**
 +   * {@link MpackGenerator} constructor
 +   * @param stackRoot           Stack root directory
 +   * @param commonServicesRoot  Common services root directory
 +   * @param srcStackId          Source stack id
 +   * @param vdfUrl              Version Definition File
 +   * @param mpacksRoot          Management pack root directory
 +   * @throws Exception
 +   */
 +  public MpackGenerator(File stackRoot, File commonServicesRoot, StackId srcStackId, URL vdfUrl, File mpacksRoot)
 +    throws Exception {
 +    this.commonServicesRoot = commonServicesRoot;
 +    this.stackRoot = stackRoot;
 +    this.mpacksRoot = mpacksRoot;
 +    this.srcStackId = srcStackId;
 +    this.stackManager = new StackManager(stackRoot, commonServicesRoot, false);
 +    this.vdf = VersionDefinitionXml.load(vdfUrl);
 +    this.dstStackId = new StackId(vdf.release.stackId);
 +  }
 +
 +  /**
 +   * Generate management pack
 +   * @throws Exception
 +   */
 +  public void generateMpack() throws Exception {
 +    String dstStackName = dstStackId.getStackName();
 +    String version = vdf.release.version;
 +    String build = vdf.release.build;
 +
 +    System.out.println("===========================================================");
 +    System.out.println("Source Stack Id: " + srcStackId);
 +    System.out.println("Destination Stack Id: " + dstStackId);
 +    System.out.println("===========================================================");
 +
 +    String mpackName = dstStackName.toLowerCase() + "-ambari-mpack" + "-" + version + "-" + build;
 +    File mpackRootDir = new File(mpacksRoot.getAbsolutePath() + File.separator + mpackName);
 +    if (!mpacksRoot.exists()) {
 +      mpacksRoot.mkdirs();
 +    }
 +    if (mpackRootDir.exists()) {
 +      FileUtils.deleteDirectory(mpackRootDir);
 +    }
 +    mpackRootDir.mkdir();
 +
 +    File mpackPackletsDir = new File(mpackRootDir.getAbsolutePath() + File.separator + "packlets");
 +    if (mpackPackletsDir.exists()) {
 +      mpackPackletsDir.delete();
 +    }
 +    mpackPackletsDir.mkdir();
 +
 +    StackInfo srcStackInfo = stackManager.getStack(srcStackId.getStackName(), srcStackId.getStackVersion());
 +    StackRoleCommandOrder stackRoleCommandOrder = srcStackInfo.getRoleCommandOrder();
 +    FileWriter stackRCOFile = new FileWriter(
 +      mpackRootDir.getAbsolutePath() + File.separator + StackDirectory.RCO_FILE_NAME);
 +    mapper.writerWithDefaultPrettyPrinter().writeValue(stackRCOFile, stackRoleCommandOrder.getContent());
 +
 +    // Export stack configs
 +    File stackConfigDir = new File(
 +      mpackRootDir.getAbsolutePath() + File.separator + StackDirectory.SERVICE_CONFIG_FOLDER_NAME);
 +    exportConfigs(srcStackInfo.getProperties(), stackConfigDir);
 +
-     // Export widgets.json
-     exportFile(new File(srcStackInfo.getWidgetsDescriptorFileLocation()), mpackRootDir);
- 
-     // Export kerberos.json
-     exportFile(new File(srcStackInfo.getKerberosDescriptorFileLocation()), mpackRootDir);
- 
 +    // Export repoinfo.xml
 +    RepositoryXml repositoryXml =  srcStackInfo.getRepositoryXml();
 +    JAXBContext ctx = JAXBContext.newInstance(RepositoryXml.class);
 +    Marshaller marshaller = ctx.createMarshaller();
 +    marshaller.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, Boolean.TRUE);
 +    File reposDir = new File(mpackRootDir.getAbsolutePath() + File.separator + "repos");
 +    if (!reposDir.exists()) {
 +      reposDir.mkdir();
 +    }
 +    FileOutputStream repoXmlFileStream = new FileOutputStream(
 +      reposDir.getAbsolutePath() + File.separator + "repoinfo.xml");
 +    marshaller.marshal(repositoryXml, repoXmlFileStream);
 +    repoXmlFileStream.flush();
 +    repoXmlFileStream.close();
 +
 +    // Copy JSON cluster property files (example: stacks/HDP/2.0.6/properties/stack_tools.json)
 +    File destPropertiesDir = new File(mpackRootDir.getAbsoluteFile() + File.separator + "properties");
 +    if(!destPropertiesDir.exists()) {
 +      destPropertiesDir.mkdir();
 +    }
 +    String srcStackName = srcStackId.getStackName();
 +    String currentStackVersion = srcStackId.getStackVersion();
 +
 +    while (!StringUtils.isEmpty(currentStackVersion)) {
 +      StackInfo currentStackInfo = stackManager.getStack(srcStackName, currentStackVersion);
 +      File srcPropertiesDir = new File(stackRoot.getAbsolutePath() + File.separator + srcStackName + File.separator + currentStackVersion + File.separator + "properties");
 +      if (srcPropertiesDir.exists() && srcPropertiesDir.isDirectory()) {
 +        for (File srcPropertiesFile : srcPropertiesDir.listFiles()) {
 +          File destPropertiesFile = new File(destPropertiesDir.getAbsolutePath() + File.separator + srcPropertiesFile.getName());
 +          if (!destPropertiesFile.exists()) {
 +            FileUtils.copyFile(srcPropertiesFile, destPropertiesFile);
 +          }
 +        }
 +      }
 +      currentStackVersion = currentStackInfo.getParentStackVersion();
 +    }
 +
-     // Copy stack hooks folder
-     String srcStackHooksFolder = srcStackInfo.getStackHooksFolder();
-     srcStackHooksFolder = stackRoot.getAbsolutePath() + File.separator + srcStackHooksFolder;
-     File srcStackHooksFile = new File(srcStackHooksFolder);
-     if (srcStackHooksFile != null && srcStackHooksFile.exists()) {
-       File destStackHooksFile = new File(
-         mpackRootDir.getAbsolutePath() + File.separator
-           + srcStackHooksFile.getName());
-       FileUtils.copyDirectory(srcStackHooksFile, destStackHooksFile);
-     }
- 
 +    // Export stack metainfo.xml
 +    String parentStackVersion = srcStackInfo.getParentStackVersion();
 +    StackMetainfoXml stackMetainfoXml = new StackMetainfoXml();
 +    stackMetainfoXml.setMinJdk(srcStackInfo.getMinJdk());
 +    stackMetainfoXml.setMaxJdk(srcStackInfo.getMaxJdk());
 +    StackMetainfoXml.Version ver = new StackMetainfoXml.Version();
 +    ver.setActive(srcStackInfo.isActive());
 +    stackMetainfoXml.setVersion(ver);
 +    ctx = JAXBContext.newInstance(StackMetainfoXml.class);
 +    marshaller = ctx.createMarshaller();
 +    marshaller.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, Boolean.TRUE);
 +    FileOutputStream stackMetainfoFileStream = new FileOutputStream(
 +      mpackRootDir.getAbsolutePath() + File.separator + "metainfo.xml");
 +    marshaller.marshal(stackMetainfoXml, stackMetainfoFileStream);
 +    stackMetainfoFileStream.flush();
 +    stackMetainfoFileStream.close();
 +
 +    // Export stack advisors
 +    File stackAdvisorsDir = new File(mpackRootDir.getAbsolutePath() + File.separator + "stack-advisor");
 +    if(!stackAdvisorsDir.exists()) {
 +      stackAdvisorsDir.mkdir();
 +    }
 +
 +    currentStackVersion = srcStackId.getStackVersion();
 +    String baseStackAdvisor = null;
 +    String baseStackAdvisorModule = null;
 +    while (!StringUtils.isEmpty(currentStackVersion)) {
 +      // Copy all inherited stack advisors from source stack to "stack-advisor" folder
 +      StackInfo currentStackInfo = stackManager.getStack(srcStackName, currentStackVersion);
 +      File srcStackAdvisor = new File(stackRoot.getAbsolutePath() + File.separator + srcStackName + File.separator + currentStackVersion + File.separator + "services" + File.separator + "stack_advisor.py");
 +      if(srcStackAdvisor.exists()) {
 +        if(baseStackAdvisor == null) {
 +          baseStackAdvisor = srcStackName.toUpperCase() + currentStackVersion.replace(".", "") + "StackAdvisor";
 +          baseStackAdvisorModule = "stack_advisor_" + srcStackName.toLowerCase() + currentStackVersion.replace(".", "");
 +        }
 +        File dstStackAdvisor = new File(
 +          stackAdvisorsDir.getAbsolutePath() + File.separator + "stack_advisor_" + srcStackName.toLowerCase()
 +            + currentStackVersion.replace(".", "") + ".py");
 +        FileUtils.copyFile(srcStackAdvisor, dstStackAdvisor);
 +      }
 +      currentStackVersion = currentStackInfo.getParentStackVersion();
 +    }
 +    if(baseStackAdvisor != null) {
 +      File mpackServicesDir = new File(mpackRootDir.getAbsolutePath() + File.separator + "services");
 +      if (!mpackServicesDir.exists()) {
 +        mpackServicesDir.mkdir();
 +      }
 +      String mpackStackAdvisorName = dstStackId.getStackName().toUpperCase() + dstStackId.getStackVersion().replace(".", "") + "StackAdvisor";
 +      if(baseStackAdvisor.equalsIgnoreCase(mpackStackAdvisorName)) {
 +        // Use top level stack advisor from source stack as mpack stack advisor
 +        String srcPath = stackAdvisorsDir.getAbsolutePath() + File.separator + baseStackAdvisorModule + ".py";
 +        String dstPath = mpackServicesDir.getAbsolutePath() + File.separator + "stack_advisor.py";
 +        Files.move(Paths.get(srcPath), Paths.get(dstPath));
 +      } else {
 +        // Create mpack stack advisor that inherits from top level stack advisor from source stack
 +        FileWriter fileWriter = new FileWriter(
 +          mpackServicesDir.getAbsolutePath() + File.separator + "stack_advisor.py");
 +        BufferedWriter bw = new BufferedWriter(fileWriter);
 +        bw.write("from " + baseStackAdvisorModule + " import *");
 +        bw.newLine();
 +        bw.write("class " + mpackStackAdvisorName + "(" + baseStackAdvisor + ")");
 +        bw.newLine();
 +        bw.write("  pass");
 +        bw.newLine();
 +        bw.flush();
 +        fileWriter.flush();
 +        bw.close();
 +        fileWriter.close();
 +      }
 +    }
 +
 +    Mpack mpack = new Mpack();
 +    mpack.setName(dstStackName);
 +    mpack.setVersion(vdf.release.version);
 +    mpack.setBuildNumber(vdf.release.getFullVersion());
 +    mpack.setStackId(dstStackId.getStackId());
 +    mpack.setDescription(dstStackName + " Ambari Management Pack");
 +    Map<String, String> prereqs = new HashMap<>();
 +    prereqs.put("min-ambari-version", "3.0.0.0");
 +    mpack.setPrerequisites(prereqs);
 +    List<Packlet> packlets = new ArrayList<>();
 +    mpack.setPacklets(packlets);
 +
 +    for (ManifestServiceInfo manifestServiceInfo : vdf.getStackServices(srcStackInfo, true /* skipMissingServices = true */)) {
 +      ServiceInfo serviceInfo = srcStackInfo.getService(manifestServiceInfo.getName());
 +      String serviceName = manifestServiceInfo.getName();
 +      String serviceVersion = (String) manifestServiceInfo.getVersions().toArray()[0];
 +      ServiceInfo clonedServiceInfo =  (ServiceInfo) serviceInfo.clone();
 +      clonedServiceInfo.setVersion(serviceVersion);
 +      clonedServiceInfo.setParent(null);
 +      if (serviceInfo.getMetricsFile() != null) {
 +        clonedServiceInfo.setMetricsFileName(serviceInfo.getMetricsFile().getName());
 +      }
 +      if( serviceInfo.getWidgetsDescriptorFile() != null) {
 +        clonedServiceInfo.setWidgetsFileName(serviceInfo.getWidgetsDescriptorFile().getName());
 +      }
 +
 +      System.out.println("Processing service=" + serviceInfo.getName() + ", version=" + serviceVersion);
 +      System.out.println("Service Parent : " + serviceInfo.getParent());
 +      String packletDirName = serviceName + "-packlet-" + serviceVersion;
 +      String packletTarName = packletDirName + ".tar.gz";
 +      File packletDir = new File(
 +        mpackPackletsDir.getAbsolutePath() + File.separator + packletDirName);
 +      if (!packletDir.exists()) {
 +        packletDir.mkdir();
 +      }
 +      Packlet packlet = new Packlet();
 +      packlet.setType(Packlet.PackletType.SERVICE_PACKLET);
 +      packlet.setName(serviceName);
 +      packlet.setVersion(serviceVersion);
 +      packlet.setServiceId(serviceVersion);
 +      packlet.setSourceLocation("packlets" + File.separator + packletTarName);
 +      packlets.add(packlet);
 +
 +      // Export service metainfo.xml
 +      ServiceMetainfoXml serviceMetainfoXml = new ServiceMetainfoXml();
 +      serviceMetainfoXml.setSchemaVersion(clonedServiceInfo.getSchemaVersion());
 +      List<ServiceInfo> serviceInfos = Collections.singletonList(clonedServiceInfo);
 +      serviceMetainfoXml.setServices(serviceInfos);
 +      ctx = JAXBContext.newInstance(ServiceMetainfoXml.class);
 +      marshaller = ctx.createMarshaller();
 +      marshaller.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, Boolean.TRUE);
 +      FileOutputStream serviceMetainfoFileStream = new FileOutputStream(
 +        packletDir.getAbsolutePath() + File.separator + "metainfo.xml");
 +      marshaller.marshal(serviceMetainfoXml, serviceMetainfoFileStream);
 +      serviceMetainfoFileStream.flush();
 +      serviceMetainfoFileStream.close();
 +
 +      // Export mertrics.json
 +      File srcMetricsFile = serviceInfo.getMetricsFile();
 +      exportFile(srcMetricsFile, packletDir);
 +
 +      // Export widgets.json
 +      File srcWidgetsFile = serviceInfo.getWidgetsDescriptorFile();
 +      exportFile(srcWidgetsFile, packletDir);
 +
 +      // Export alerts.json
 +      File srcAlertsFile = serviceInfo.getAlertsFile();
 +      exportFile(srcAlertsFile, packletDir);
 +
 +      // Export kerberos.json
 +      File srcKerberosFile = serviceInfo.getKerberosDescriptorFile();
 +      exportFile(srcKerberosFile, packletDir);
 +
 +      // Export quicklinks
 +      for (Map.Entry<String, QuickLinksConfigurationInfo> entry : serviceInfo.getQuickLinksConfigurationsMap()
 +        .entrySet()) {
 +        QuickLinksConfigurationInfo quickLinksConfigurationInfo = entry.getValue();
 +        String quickLinksFileName = quickLinksConfigurationInfo.getFileName();
 +        for (Map.Entry<String, QuickLinks> quickLinksEntry : quickLinksConfigurationInfo
 +          .getQuickLinksConfigurationMap().entrySet()) {
 +          File quickLinksDir = new File(
 +            packletDir.getAbsolutePath() + File.separator + serviceInfo
 +              .getQuickLinksConfigurationsDir());
 +          if (!quickLinksDir.exists()) {
 +            quickLinksDir.mkdir();
 +          }
 +          FileWriter quickLinksFileWriter = new FileWriter(
 +            quickLinksDir.getAbsolutePath() + File.separator + quickLinksFileName, true);
 +          mapper.writerWithDefaultPrettyPrinter()
 +            .writeValue(quickLinksFileWriter, quickLinksEntry.getValue());
 +        }
 +      }
 +
 +      // Export themes
 +      for (Map.Entry<String, ThemeInfo> entry : serviceInfo.getThemesMap().entrySet()) {
 +        ThemeInfo themeInfo = entry.getValue();
 +        String themeFileName = themeInfo.getFileName();
 +        for (Map.Entry<String, Theme> themeEntry : themeInfo.getThemeMap().entrySet()) {
 +          File themesDir = new File(
 +            packletDir.getAbsolutePath() + File.separator + serviceInfo.getThemesDir());
 +          if (!themesDir.exists()) {
 +            themesDir.mkdir();
 +          }
 +          FileWriter themesFileWriter = new FileWriter(
 +            themesDir.getAbsolutePath() + File.separator + themeFileName, true);
 +          mapper.writerWithDefaultPrettyPrinter().writeValue(themesFileWriter, themeEntry.getValue());
 +        }
 +      }
 +
 +      // Export package folder
 +      String srcPackageFolder = serviceInfo.getServicePackageFolder();
 +      if (srcPackageFolder.startsWith("common-services")) {
 +        srcPackageFolder = srcPackageFolder
 +          .replace("common-services", commonServicesRoot.getAbsolutePath());
 +      } else {
 +        srcPackageFolder = srcPackageFolder.replace("stacks", stackRoot.getAbsolutePath());
 +      }
 +      File srcPackageFile = new File(srcPackageFolder);
 +      if (srcPackageFile != null && srcPackageFile.exists()) {
 +        File destPackageFile = new File(
 +          packletDir.getAbsolutePath() + File.separator
 +            + ServiceDirectory.PACKAGE_FOLDER_NAME);
 +        FileUtils.copyDirectory(srcPackageFile, destPackageFile);
 +      }
 +
 +      // Export merged configs
 +      File configDir = new File(
 +        packletDir.getAbsolutePath() + File.separator + serviceInfo.getConfigDir());
 +      exportConfigs(serviceInfo.getProperties(), configDir);
 +
 +      // Copy service advisor
 +      File srcServiceAdvisor = serviceInfo.getAdvisorFile();
 +      File destServiceAdvisor = new File(packletDir.getAbsolutePath() + File.separator + "service_advisor.py");
 +      if(srcServiceAdvisor != null && srcServiceAdvisor.exists()) {
 +        FileUtils.copyFile(srcServiceAdvisor, destServiceAdvisor);
 +      }
 +
 +      // TODO: Export upgrade packs
 +
 +      // Create packlet tarball
 +      createTarGzip(packletDir.getAbsolutePath());
 +      if(packletDir.exists()) {
 +        FileUtils.deleteDirectory(packletDir);
 +      }
 +    }
 +
 +    // Create mpack.json
 +    String mpackFilePath = mpackRootDir.getAbsolutePath() + File.separator + "mpack.json";
 +    FileWriter mpackFileWriter = new FileWriter(mpackFilePath);
 +    gson.toJson(mpack, Mpack.class, mpackFileWriter);
 +    mpackFileWriter.flush();
 +    mpackFileWriter.close();
 +
 +    // Create mpack tarball
 +    createTarGzip(mpackRootDir.getAbsolutePath());
 +    if(mpackRootDir.exists()) {
 +      FileUtils.deleteDirectory(mpackRootDir);
 +    }
 +  }
 +
 +  public static void exportFile(File srcFile, File destRootDir) throws Exception {
 +    if (srcFile != null && srcFile.exists()) {
 +      Path srcPath = Paths.get(srcFile.getAbsolutePath());
 +      Path destPath = Paths.get(
 +        destRootDir.getAbsolutePath() + File.separator + srcFile.getName());
 +      Files.copy(srcPath, destPath, StandardCopyOption.COPY_ATTRIBUTES,
 +        StandardCopyOption.REPLACE_EXISTING);
 +    }
 +  }
 +
 +  public void createTarGzip(String path) throws FileNotFoundException, IOException {
 +    File file = new File(path);
 +    if(!file.exists()) {
 +      throw new FileNotFoundException(path + " does not exist");
 +    }
 +    String parentDirName = file.getParent();
 +    String tarGzPath = parentDirName + File.separator + file.getName() + ".tar.gz";
 +    // Delete existing tarball
 +    File f = new File(tarGzPath);
 +    if(f.exists()) {
 +      f.delete();
 +    }
 +    FileOutputStream fOut = null;
 +    BufferedOutputStream bOut = null;
 +    GzipCompressorOutputStream gzOut = null;
 +    TarArchiveOutputStream tOut = null;
 +    try {
 +      fOut = new FileOutputStream(new File(tarGzPath));
 +      bOut = new BufferedOutputStream(fOut);
 +      gzOut = new GzipCompressorOutputStream(bOut);
 +      tOut = new TarArchiveOutputStream(gzOut);
 +      addFileToTarGz(tOut, path, "");
 +      System.out.println("Compressed " + path + " -> " + tarGzPath);
 +    } finally {
 +      if(tOut != null) {
 +        tOut.finish();
 +        tOut.close();
 +      }
 +      if(gzOut != null) {
 +        gzOut.close();
 +      }
 +      if(bOut != null) {
 +        bOut.close();
 +      }
 +      if(fOut != null) {
 +        fOut.close();
 +      }
 +    }
 +
 +  }
 +
 +  private void addFileToTarGz(TarArchiveOutputStream tOut, String path, String base)
 +    throws IOException
 +  {
 +    File f = new File(path);
 +    String entryName = base + f.getName();
 +    TarArchiveEntry tarEntry = new TarArchiveEntry(f, entryName);
 +    tOut.putArchiveEntry(tarEntry);
 +
 +    if (f.isFile()) {
 +      IOUtils.copy(new FileInputStream(f), tOut);
 +      tOut.closeArchiveEntry();
 +    } else {
 +      tOut.closeArchiveEntry();
 +      File[] children = f.listFiles();
 +      if (children != null) {
 +        for (File child : children) {
 +          addFileToTarGz(tOut, child.getAbsolutePath(), entryName + "/");
 +        }
 +      }
 +    }
 +  }
 +
 +  public static void exportConfigs(List<PropertyInfo> properties, File configDir) throws Exception {
 +    if (!configDir.exists()) {
 +      configDir.mkdir();
 +    }
 +
 +    Map<String, List<PropertyInfo>> configFilesMap = new HashMap<>();
 +    for (PropertyInfo propertyInfo : properties) {
 +      String fileName = propertyInfo.getFilename();
 +      if (!configFilesMap.containsKey(fileName)) {
 +        configFilesMap.put(fileName, new ArrayList<PropertyInfo>());
 +      }
 +      configFilesMap.get(fileName).add(propertyInfo);
 +    }
 +
 +    for (Map.Entry<String, List<PropertyInfo>> entry : configFilesMap.entrySet()) {
 +      String fileName = entry.getKey();
 +      ConfigurationXml configXml = new ConfigurationXml();
 +      configXml.setProperties(entry.getValue());
 +      JAXBContext ctx = JAXBContext.newInstance(ConfigurationXml.class);
 +      Marshaller marshaller = ctx.createMarshaller();
 +      marshaller.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, Boolean.TRUE);
 +      FileOutputStream configFileStream = new FileOutputStream(
 +        configDir.getAbsolutePath() + File.separator + fileName);
 +      marshaller.marshal(configXml, configFileStream);
 +      configFileStream.flush();
 +      configFileStream.close();
 +    }
 +  }
 +
 +  /**
 +   * Main method for generating mpack
 +   * @param args
 +   * @throws Exception
 +   */
 +  public static void main(String[] args) throws Exception {
 +    System.out.println("Mpack Generator Started");
 +    String stackDir = args[0];
 +    String commonServicesDir = args[1];
 +    String srcStack = args[2];
 +    String vdfPath = args[3];
 +    String mpacksDir = args[4];
 +    MpackGenerator mpackGenerator = new MpackGenerator(
 +      new File(stackDir),
 +      new File(commonServicesDir),
 +      new StackId(srcStack),
 +      new URL(vdfPath),
 +      new File(mpacksDir));
 +    mpackGenerator.generateMpack();
 +    System.out.println("Mpack Generator Finished");
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/java/org/apache/ambari/server/stack/ServiceDirectory.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/java/org/apache/ambari/server/stack/StackManager.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/stack/StackManager.java
index da23ebc,eb6737a..9b740d0
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackManager.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackManager.java
@@@ -205,91 -193,6 +210,85 @@@ public class StackManager 
      populateDB(stackDao, extensionDao);
    }
  
 +  /***
 +   *  Constructor. Initialize StackManager for merging service definitions and creating management packs
 +   * @param stackRoot
 +   * @param commonServicesRoot
 +   */
 +  public StackManager(File stackRoot, File commonServicesRoot, boolean validate) throws AmbariException{
 +    LOG.info("Initializing the stack manager...");
 +
 +    if (validate) {
 +      validateStackDirectory(stackRoot);
 +      validateCommonServicesDirectory(commonServicesRoot);
 +    }
 +
 +    stackMap = new HashMap<>();
 +
 +    parseDirectories(stackRoot, commonServicesRoot, null);
 +
 +    fullyResolveCommonServices(stackModules, commonServiceModules, extensionModules);
 +    fullyResolveExtensions(stackModules, commonServiceModules, extensionModules);
 +    fullyResolveStacks(stackModules, commonServiceModules, extensionModules);
 +  }
 +
 +  protected void updateArchives(
 +    File resourcesRoot, File stackRoot, Map<String, StackModule> stackModules, Map<String, ServiceModule> commonServiceModules,
 +    Map<String, ExtensionModule> extensionModules ) throws AmbariException {
 +
 +    LOG.info("Refreshing archives ...");
 +
 +    LOG.debug("Refreshing archives for stacks");
 +    for (StackModule stackModule : stackModules.values()) {
 +      LOG.debug("Refreshing archives for stack : " + stackModule.getId());
-       String hooksDir = stackModule.getStackDirectory().getHooksDir();
-       if(hooksDir != null) {
-         LOG.debug("Refreshing archive for stack hooks directory : " + hooksDir);
-         String hooksAbsolutePath = stackRoot.getAbsolutePath() + File.separator + hooksDir;
-         ResourceFilesKeeperHelper.updateDirectoryArchive(hooksAbsolutePath, false);
-       }
 +      for(ServiceModule serviceModule : stackModule.getServiceModules().values()) {
 +        String packageDir = serviceModule.getServiceDirectory().getPackageDir();
 +        if(packageDir != null) {
 +          LOG.debug("Refreshing archive for stack service package directory : " + packageDir);
 +          String packageAbsoluteDir = resourcesRoot.getAbsolutePath() + File.separator + packageDir;
 +          ResourceFilesKeeperHelper.updateDirectoryArchive(packageAbsoluteDir, false);
 +        }
 +      }
 +    }
 +
 +    LOG.debug("Refreshing archives for common services");
 +    for(ServiceModule serviceModule : commonServiceModules.values()) {
 +      String packageDir = serviceModule.getServiceDirectory().getPackageDir();
 +      if(packageDir != null) {
 +        LOG.debug("Refreshing archive for common service package directory : " + packageDir);
 +        String packageAbsoluteDir = resourcesRoot.getAbsolutePath() + File.separator + packageDir;
 +        ResourceFilesKeeperHelper.updateDirectoryArchive(packageAbsoluteDir, false);
 +      }
 +    }
 +
 +    LOG.debug("Refreshing archives for extensions");
 +    for(ExtensionModule extensionModule : extensionModules.values()) {
 +      LOG.debug("Refreshing archives for extension module" + extensionModule.getId());
 +      for(ServiceModule serviceModule : extensionModule.getServiceModules().values()) {
 +        String packageDir = serviceModule.getServiceDirectory().getPackageDir();
 +        if(packageDir != null) {
 +          LOG.debug("Refreshing archive for extension service package directory : " + packageDir);
 +          String packageAbsoluteDir = resourcesRoot.getAbsolutePath() + File.separator + packageDir;
 +          ResourceFilesKeeperHelper.updateDirectoryArchive(packageAbsoluteDir, false);
 +        }
 +      }
 +    }
 +
 +    List<String> miscDirs = new ArrayList<String>() {{
 +      add(CUSTOM_ACTIONS_DIR);
 +      add(HOST_SCRIPTS_DIR);
 +      add(DASHBOARDS_DIR);
 +    }};
 +
 +    LOG.debug("Refreshing archives for misc directories");
 +    for(String miscDir : miscDirs) {
 +      LOG.debug("Refreshing archive for misc directory : " + miscDir);
 +      String miscAbsolutePath = resourcesRoot.getAbsolutePath() + File.separator + miscDir;
 +      ResourceFilesKeeperHelper.updateDirectoryArchive(miscAbsolutePath, false);
 +    }
 +    LOG.info("Refreshing archives finished!");
 +  }
 +
    protected void parseDirectories(File stackRoot, File commonServicesRoot, File extensionRoot) throws AmbariException {
      commonServiceModules = parseCommonServicesDirectory(commonServicesRoot);
      stackModules = parseStackDirectory(stackRoot);

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceInfo.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
----------------------------------------------------------------------
diff --cc ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
index 56d352e,4af83ef..13ceeb9
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
@@@ -64,7 -67,8 +67,9 @@@ import org.apache.ambari.server.state.H
  import org.apache.ambari.server.state.HostHealthStatus.HealthStatus;
  import org.apache.ambari.server.state.HostState;
  import org.apache.ambari.server.state.MaintenanceState;
++import org.apache.ambari.server.state.Service;
  import org.apache.ambari.server.state.StackId;
+ import org.apache.ambari.server.state.UpgradeState;
  import org.apache.ambari.server.state.configgroup.ConfigGroup;
  import org.apache.ambari.server.state.fsm.InvalidStateTransitionException;
  import org.apache.ambari.server.state.fsm.SingleArcTransition;
@@@ -1168,6 -1172,45 +1173,48 @@@ public class HostImpl implements Host 
  
      return false;
    }
+ 
+   /**
+    * {@inheritDoc}
+    */
+   @Override
+   public boolean isRepositoryVersionCorrect(RepositoryVersionEntity repositoryVersion)
+       throws AmbariException {
+     HostEntity hostEntity = getHostEntity();
+     Collection<HostComponentStateEntity> hostComponentStates = hostEntity.getHostComponentStateEntities();
+ 
+     // for every host component, if it matches the desired repo and has reported
+     // the correct version then we're good
+     for (HostComponentStateEntity hostComponentState : hostComponentStates) {
+       ServiceComponentDesiredStateEntity desiredComponmentState = hostComponentState.getServiceComponentDesiredStateEntity();
+       RepositoryVersionEntity desiredRepositoryVersion = desiredComponmentState.getDesiredRepositoryVersion();
+ 
++      Long clusterId = hostComponentState.getClusterId();
++      Cluster cluster = clusters.getCluster(clusterId);
++      Service s = cluster.getService(hostComponentState.getServiceId());
+       ComponentInfo componentInfo = ambariMetaInfo.getComponent(
+           desiredRepositoryVersion.getStackName(), desiredRepositoryVersion.getStackVersion(),
 -          hostComponentState.getServiceName(), hostComponentState.getComponentName());
++          s.getName(), hostComponentState.getComponentName());
+ 
+       // skip components which don't advertise a version
+       if (!componentInfo.isVersionAdvertised()) {
+         continue;
+       }
+ 
+       // we only care about checking the specified repo version for this host
+       if (!repositoryVersion.equals(desiredRepositoryVersion)) {
+         continue;
+       }
+ 
+       String versionAdvertised = hostComponentState.getVersion();
+       if (hostComponentState.getUpgradeState() == UpgradeState.IN_PROGRESS
+           || !StringUtils.equals(versionAdvertised, repositoryVersion.getVersion())) {
+         return false;
+       }
+     }
+ 
+     return true;
+   }
  }
  
  

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/resources/common-services/ATLAS/0.7.0.3.0/service_advisor.py
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/resources/common-services/RANGER_KMS/1.0.0.3.0/service_advisor.py
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/resources/stacks/HDP/2.5/services/stack_advisor.py
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
----------------------------------------------------------------------
diff --cc ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
index dbe2eed,9285526..8c44632
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
@@@ -73,6 -71,6 +73,7 @@@ import org.apache.ambari.server.state.O
  import org.apache.ambari.server.state.PropertyDependencyInfo;
  import org.apache.ambari.server.state.PropertyInfo;
  import org.apache.ambari.server.state.RepositoryInfo;
++import org.apache.ambari.server.state.ServiceGroup;
  import org.apache.ambari.server.state.ServiceInfo;
  import org.apache.ambari.server.state.StackId;
  import org.apache.ambari.server.state.StackInfo;
@@@ -1738,8 -1716,8 +1721,8 @@@ public class AmbariMetaInfoTest 
  
      RepositoryVersionEntity repositoryVersion = ormHelper.getOrCreateRepositoryVersion(
          cluster.getCurrentStackVersion(), repoVersion);
--
--    cluster.addService("HDFS", repositoryVersion);
++    ServiceGroup sg = cluster.addServiceGroup("core");
++    cluster.addService(sg, "HDFS", "HDFS", repositoryVersion);
  
      metaInfo.reconcileAlertDefinitions(clusters);
  
@@@ -1962,7 -1947,7 +1952,8 @@@
      Properties properties = new Properties();
      properties.setProperty(Configuration.METADATA_DIR_PATH.getKey(), stackRoot.getPath());
      properties.setProperty(Configuration.SERVER_VERSION_FILE.getKey(), versionFile.getPath());
 +    properties.setProperty(Configuration.MPACKS_V2_STAGING_DIR_PATH.getKey(),"src/test/resources/mpacks-v2");
+     properties.setProperty(Configuration.RESOURCES_DIR.getKey(), resourcesRoot.getPath());
      Configuration configuration = new Configuration(properties);
  
      TestAmbariMetaInfo metaInfo = new TestAmbariMetaInfo(configuration);

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelperTest.java
----------------------------------------------------------------------
diff --cc ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelperTest.java
index f7f7ac6,a3d13b1..049fe27
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/kerberos/KerberosDescriptorUpdateHelperTest.java
@@@ -69,11 -66,10 +69,12 @@@ public class KerberosDescriptorUpdateHe
          properties.put("common.services.path", "src/main/resources/common-services");
          properties.put("server.version.file", "target/version");
          properties.put("custom.action.definitions", "/tmp/nofile");
 +        properties.put("mpacks-v2.staging.path","src/test/resources/mpacks-v2");
+         properties.put("resources.dir", "src/main/resources");
          Configuration configuration = new Configuration(properties);
  
 -        install(new FactoryModuleBuilder().build(StackManagerFactory.class));
 +        install(new FactoryModuleBuilder().implement(
 +          StackManager.class, StackManagerMock.class).build(StackManagerFactory.class));
  
          bind(Clusters.class).toInstance(createNiceMock(Clusters.class));
          bind(DBAccessor.class).toInstance(createNiceMock(DBAccessor.class));

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-web/app/controllers/installer.js
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-web/app/controllers/wizard/step8_controller.js
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-web/app/utils/ajax/ajax.js
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-web/test/controllers/installer_test.js
----------------------------------------------------------------------
diff --cc ambari-web/test/controllers/installer_test.js
index 37c2303,65a1e5c..d2d4f2c
--- a/ambari-web/test/controllers/installer_test.js
+++ b/ambari-web/test/controllers/installer_test.js
@@@ -494,27 -468,44 +494,44 @@@ describe('App.InstallerController', fun
        };
  
        beforeEach(function () {
+         sinon.spy(checker, 'loadStacks');
 -        installerController.loadMap['1'][0].callback.call(checker);
 +        installerController.loadMap['step1'][0].callback.call(checker);
        });
  
-       it('stack info is loaded', function () {
-         expect(loadStacks).to.be.true;
+       afterEach(function() {
+         checker.loadStacks.restore();
+       });
+ 
+       it('should call loadStacks, stack info not loaded', function () {
+         expect(checker.loadStacks.calledOnce).to.be.true;
        });
      });
  
-     describe ('Should load stacks async', function() {
-       var loadStacksVersions = false;
+     describe('Should load stacks async', function() {
        var checker = {
-         loadStacksVersions: function() {
-           loadStacksVersions = true;
-         }
+         loadStacksVersions: Em.K
        };
  
+       beforeEach(function () {
+         sinon.spy(checker, 'loadStacksVersions');
+       });
+ 
+       afterEach(function() {
+         checker.loadStacksVersions.restore();
+       });
+ 
        it('stack versions are loaded', function () {
 -        installerController.loadMap['1'][1].callback.call(checker, true).then(function(data){
 +        installerController.loadMap['step1'][1].callback.call(checker, true).then(function(data){
            expect(data).to.be.true;
          });
-         expect(loadStacksVersions).to.be.false;
+         expect(checker.loadStacksVersions.called).to.be.false;
+       });
+ 
+       it('should call loadStacksVersions, stack versions not loaded', function () {
+         installerController.loadMap['1'][1].callback.call(checker, false).then(function(data){
+           expect(data).to.be.true;
+         });
+         expect(checker.loadStacksVersions.calledOnce).to.be.true;
        });
      });
  

http://git-wip-us.apache.org/repos/asf/ambari/blob/c36afcdd/ambari-web/test/mixins/common/configs/configs_saver_test.js
----------------------------------------------------------------------


[07/50] [abbrv] ambari git commit: AMBARI-22095 Make hooks stack agnostic (dsen)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
deleted file mode 100644
index 20992e2..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
+++ /dev/null
@@ -1,254 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import collections
-import re
-import os
-import ast
-
-import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
-
-from resource_management.libraries.script import Script
-from resource_management.libraries.functions import default
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import format_jvm_option
-from resource_management.libraries.functions.is_empty import is_empty
-from resource_management.libraries.functions.version import format_stack_version
-from resource_management.libraries.functions.expect import expect
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.stack_features import get_stack_feature_version
-from resource_management.libraries.functions.get_architecture import get_architecture
-from ambari_commons.constants import AMBARI_SUDO_BINARY
-
-
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-stack_root = Script.get_stack_root()
-
-architecture = get_architecture()
-
-dfs_type = default("/commandParams/dfs_type", "")
-
-artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
-jdk_name = default("/hostLevelParams/jdk_name", None)
-java_home = config['hostLevelParams']['java_home']
-java_version = expect("/hostLevelParams/java_version", int)
-jdk_location = config['hostLevelParams']['jdk_location']
-
-hadoop_custom_extensions_enabled = default("/configurations/core-site/hadoop.custom-extensions.enabled", False)
-
-sudo = AMBARI_SUDO_BINARY
-
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-stack_version_formatted = format_stack_version(stack_version_unformatted)
-
-upgrade_type = Script.get_upgrade_type(default("/commandParams/upgrade_type", ""))
-version = default("/commandParams/version", None)
-# Handle upgrade and downgrade
-if (upgrade_type is not None) and version:
-  stack_version_formatted = format_stack_version(version)
-ambari_java_home = default("/commandParams/ambari_java_home", None)
-ambari_jdk_name = default("/commandParams/ambari_jdk_name", None)
-
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-
-# Some datanode settings
-dfs_dn_addr = default('/configurations/hdfs-site/dfs.datanode.address', None)
-dfs_dn_http_addr = default('/configurations/hdfs-site/dfs.datanode.http.address', None)
-dfs_dn_https_addr = default('/configurations/hdfs-site/dfs.datanode.https.address', None)
-dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None)
-secure_dn_ports_are_in_use = False
-
-def get_port(address):
-  """
-  Extracts port from the address like 0.0.0.0:1019
-  """
-  if address is None:
-    return None
-  m = re.search(r'(?:http(?:s)?://)?([\w\d.]*):(\d{1,5})', address)
-  if m is not None:
-    return int(m.group(2))
-  else:
-    return None
-
-def is_secure_port(port):
-  """
-  Returns True if port is root-owned at *nix systems
-  """
-  if port is not None:
-    return port < 1024
-  else:
-    return False
-
-# upgrades would cause these directories to have a version instead of "current"
-# which would cause a lot of problems when writing out hadoop-env.sh; instead
-# force the use of "current" in the hook
-hdfs_user_nofile_limit = default("/configurations/hadoop-env/hdfs_user_nofile_limit", "128000")
-hadoop_home = stack_select.get_hadoop_dir("home")
-hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
-hadoop_lib_home = stack_select.get_hadoop_dir("lib")
-
-hadoop_dir = "/etc/hadoop"
-hadoop_java_io_tmpdir = os.path.join(tmp_dir, "hadoop_java_io_tmpdir")
-datanode_max_locked_memory = config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory']
-is_datanode_max_locked_memory_set = not is_empty(config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory'])
-
-mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
-
-if not security_enabled:
-  hadoop_secure_dn_user = '""'
-else:
-  dfs_dn_port = get_port(dfs_dn_addr)
-  dfs_dn_http_port = get_port(dfs_dn_http_addr)
-  dfs_dn_https_port = get_port(dfs_dn_https_addr)
-  # We try to avoid inability to start datanode as a plain user due to usage of root-owned ports
-  if dfs_http_policy == "HTTPS_ONLY":
-    secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_https_port)
-  elif dfs_http_policy == "HTTP_AND_HTTPS":
-    secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port) or is_secure_port(dfs_dn_https_port)
-  else:   # params.dfs_http_policy == "HTTP_ONLY" or not defined:
-    secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port)
-  if secure_dn_ports_are_in_use:
-    hadoop_secure_dn_user = hdfs_user
-  else:
-    hadoop_secure_dn_user = '""'
-
-#hadoop params
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
-hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
-
-jsvc_path = "/usr/lib/bigtop-utils"
-
-hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
-namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
-namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
-
-jtnode_opt_newsize = "200m"
-jtnode_opt_maxnewsize = "200m"
-jtnode_heapsize =  "1024m"
-ttnode_heapsize = "1024m"
-
-dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
-nfsgateway_heapsize = config['configurations']['hadoop-env']['nfsgateway_heapsize']
-mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
-hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
-
-#users and groups
-hbase_user = config['configurations']['hbase-env']['hbase_user']
-smoke_user =  config['configurations']['cluster-env']['smokeuser']
-gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
-gmond_user = config['configurations']['ganglia-env']["gmond_user"]
-tez_user = config['configurations']['tez-env']["tez_user"]
-oozie_user = config['configurations']['oozie-env']["oozie_user"]
-falcon_user = config['configurations']['falcon-env']["falcon_user"]
-ranger_user = config['configurations']['ranger-env']["ranger_user"]
-zeppelin_user = config['configurations']['zeppelin-env']["zeppelin_user"]
-zeppelin_group = config['configurations']['zeppelin-env']["zeppelin_group"]
-
-user_group = config['configurations']['cluster-env']['user_group']
-
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-falcon_server_hosts = default("/clusterHostInfo/falcon_server_hosts", [])
-ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
-zeppelin_master_hosts = default("/clusterHostInfo/zeppelin_master_hosts", [])
-
-# get the correct version to use for checking stack features
-version_for_stack_feature_checks = get_stack_feature_version(config)
-
-
-has_namenode = not len(namenode_host) == 0
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-has_tez = 'tez-site' in config['configurations']
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_oozie_server = not len(oozie_servers) == 0
-has_falcon_server_hosts = not len(falcon_server_hosts) == 0
-has_ranger_admin = not len(ranger_admin_hosts) == 0
-has_zeppelin_master = not len(zeppelin_master_hosts) == 0
-stack_supports_zk_security = check_stack_feature(StackFeature.SECURE_ZOOKEEPER, version_for_stack_feature_checks)
-
-# HDFS High Availability properties
-dfs_ha_enabled = False
-dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.internal.nameservices', None)
-if dfs_ha_nameservices is None:
-  dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.nameservices', None)
-dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
-if dfs_ha_namenode_ids:
-  dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
-  dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
-  if dfs_ha_namenode_ids_array_len > 1:
-    dfs_ha_enabled = True
-
-if has_namenode or dfs_type == 'HCFS':
-    hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-    hadoop_conf_secure_dir = os.path.join(hadoop_conf_dir, "secure")
-
-hbase_tmp_dir = "/tmp/hbase-hbase"
-
-proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
-ranger_group = config['configurations']['ranger-env']['ranger_group']
-dfs_cluster_administrators_group = config['configurations']['hdfs-site']["dfs.cluster.administrators"]
-
-sysprep_skip_create_users_and_groups = default("/configurations/cluster-env/sysprep_skip_create_users_and_groups", False)
-ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
-fetch_nonlocal_groups = config['configurations']['cluster-env']["fetch_nonlocal_groups"]
-
-smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
-if has_hbase_masters:
-  hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
-#repo params
-repo_info = config['hostLevelParams']['repo_info']
-service_repo_info = default("/hostLevelParams/service_repo_info",None)
-
-user_to_groups_dict = {}
-
-#Append new user-group mapping to the dict
-try:
-  user_group_map = ast.literal_eval(config['hostLevelParams']['user_groups'])
-  for key in user_group_map.iterkeys():
-    user_to_groups_dict[key] = user_group_map[key]
-except ValueError:
-  print('User Group mapping (user_group) is missing in the hostLevelParams')
-
-user_to_gid_dict = collections.defaultdict(lambda:user_group)
-
-user_list = json.loads(config['hostLevelParams']['user_list'])
-group_list = json.loads(config['hostLevelParams']['group_list'])
-host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
-
-tez_am_view_acls = config['configurations']['tez-site']["tez.am.view-acls"]
-override_uid = str(default("/configurations/cluster-env/override_uid", "true")).lower()
-
-# if NN HA on secure clutser, access Zookeper securely
-if stack_supports_zk_security and dfs_ha_enabled and security_enabled:
-    hadoop_zkfc_opts=format("-Dzookeeper.sasl.client=true -Dzookeeper.sasl.client.username=zookeeper -Djava.security.auth.login.config={hadoop_conf_secure_dir}/hdfs_jaas.conf -Dzookeeper.sasl.clientconfig=Client")

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
deleted file mode 100644
index 27679e0..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
+++ /dev/null
@@ -1,273 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-import re
-import getpass
-import tempfile
-from copy import copy
-from resource_management.libraries.functions.version import compare_versions
-from resource_management import *
-from resource_management.core import shell
-
-def setup_users():
-  """
-  Creates users before cluster installation
-  """
-  import params
-
-  should_create_users_and_groups = False
-  if params.host_sys_prepped:
-    should_create_users_and_groups = not params.sysprep_skip_create_users_and_groups
-  else:
-    should_create_users_and_groups = not params.ignore_groupsusers_create
-
-  if should_create_users_and_groups:
-    for group in params.group_list:
-      Group(group,
-      )
-
-    for user in params.user_list:
-      User(user,
-           uid = get_uid(user) if params.override_uid == "true" else None,
-           gid = params.user_to_gid_dict[user],
-           groups = params.user_to_groups_dict[user],
-           fetch_nonlocal_groups = params.fetch_nonlocal_groups,
-           )
-
-    if params.override_uid == "true":
-      set_uid(params.smoke_user, params.smoke_user_dirs)
-    else:
-      Logger.info('Skipping setting uid for smoke user as host is sys prepped')
-  else:
-    Logger.info('Skipping creation of User and Group as host is sys prepped or ignore_groupsusers_create flag is on')
-    pass
-
-
-  if params.has_hbase_masters:
-    Directory (params.hbase_tmp_dir,
-               owner = params.hbase_user,
-               mode=0775,
-               create_parents = True,
-               cd_access="a",
-    )
-
-    if params.override_uid == "true":
-      set_uid(params.hbase_user, params.hbase_user_dirs)
-    else:
-      Logger.info('Skipping setting uid for hbase user as host is sys prepped')
-
-  if should_create_users_and_groups:
-    if params.has_namenode:
-      create_dfs_cluster_admins()
-    if params.has_tez and params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.3') >= 0:
-      create_tez_am_view_acls()
-  else:
-    Logger.info('Skipping setting dfs cluster admin and tez view acls as host is sys prepped')
-
-def create_dfs_cluster_admins():
-  """
-  dfs.cluster.administrators support format <comma-delimited list of usernames><space><comma-delimited list of group names>
-  """
-  import params
-
-  groups_list = create_users_and_groups(params.dfs_cluster_administrators_group)
-
-  User(params.hdfs_user,
-    groups = params.user_to_groups_dict[params.hdfs_user] + groups_list,
-    fetch_nonlocal_groups = params.fetch_nonlocal_groups
-  )
-
-def create_tez_am_view_acls():
-
-  """
-  tez.am.view-acls support format <comma-delimited list of usernames><space><comma-delimited list of group names>
-  """
-  import params
-
-  if not params.tez_am_view_acls.startswith("*"):
-    create_users_and_groups(params.tez_am_view_acls)
-
-def create_users_and_groups(user_and_groups):
-
-  import params
-
-  parts = re.split('\s+', user_and_groups)
-  if len(parts) == 1:
-    parts.append("")
-
-  users_list = parts[0].strip(",").split(",") if parts[0] else []
-  groups_list = parts[1].strip(",").split(",") if parts[1] else []
-
-  # skip creating groups and users if * is provided as value.
-  users_list = filter(lambda x: x != '*' , users_list)
-  groups_list = filter(lambda x: x != '*' , groups_list)
-
-  if users_list:
-    User(users_list,
-          fetch_nonlocal_groups = params.fetch_nonlocal_groups
-    )
-
-  if groups_list:
-    Group(copy(groups_list),
-    )
-  return groups_list
-
-def set_uid(user, user_dirs):
-  """
-  user_dirs - comma separated directories
-  """
-  import params
-
-  File(format("{tmp_dir}/changeUid.sh"),
-       content=StaticFile("changeToSecureUid.sh"),
-       mode=0555)
-  ignore_groupsusers_create_str = str(params.ignore_groupsusers_create).lower()
-  uid = get_uid(user, return_existing=True)
-  Execute(format("{tmp_dir}/changeUid.sh {user} {user_dirs} {new_uid}", new_uid=0 if uid is None else uid),
-          not_if = format("(test $(id -u {user}) -gt 1000) || ({ignore_groupsusers_create_str})"))
-
-def get_uid(user, return_existing=False):
-  """
-  Tries to get UID for username. It will try to find UID in custom properties in *cluster_env* and, if *return_existing=True*,
-  it will try to return UID of existing *user*.
-
-  :param user: username to get UID for
-  :param return_existing: return UID for existing user
-  :return:
-  """
-  import params
-  user_str = str(user) + "_uid"
-  service_env = [ serviceEnv for serviceEnv in params.config['configurations'] if user_str in params.config['configurations'][serviceEnv]]
-
-  if service_env and params.config['configurations'][service_env[0]][user_str]:
-    service_env_str = str(service_env[0])
-    uid = params.config['configurations'][service_env_str][user_str]
-    if len(service_env) > 1:
-      Logger.warning("Multiple values found for %s, using %s"  % (user_str, uid))
-    return uid
-  else:
-    if return_existing:
-      # pick up existing UID or try to find available UID in /etc/passwd, see changeToSecureUid.sh for more info
-      if user == params.smoke_user:
-        return None
-      File(format("{tmp_dir}/changeUid.sh"),
-           content=StaticFile("changeToSecureUid.sh"),
-           mode=0555)
-      code, newUid = shell.call(format("{tmp_dir}/changeUid.sh {user}"))
-      return int(newUid)
-    else:
-      # do not return UID for existing user, used in User resource call to let OS to choose UID for us
-      return None
-
-def setup_hadoop_env():
-  import params
-  stackversion = params.stack_version_unformatted
-  Logger.info("FS Type: {0}".format(params.dfs_type))
-  if params.has_namenode or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS':
-    if params.security_enabled:
-      tc_owner = "root"
-    else:
-      tc_owner = params.hdfs_user
-
-    # create /etc/hadoop
-    Directory(params.hadoop_dir, mode=0755)
-
-    # write out hadoop-env.sh, but only if the directory exists
-    if os.path.exists(params.hadoop_conf_dir):
-      File(os.path.join(params.hadoop_conf_dir, 'hadoop-env.sh'), owner=tc_owner,
-        group=params.user_group,
-        content=InlineTemplate(params.hadoop_env_sh_template))
-
-    # Create tmp dir for java.io.tmpdir
-    # Handle a situation when /tmp is set to noexec
-    Directory(params.hadoop_java_io_tmpdir,
-              owner=params.hdfs_user,
-              group=params.user_group,
-              mode=01777
-    )
-
-def setup_java():
-  """
-  Install jdk using specific params.
-  Install ambari jdk as well if the stack and ambari jdk are different.
-  """
-  import params
-  __setup_java(custom_java_home=params.java_home, custom_jdk_name=params.jdk_name)
-  if params.ambari_java_home and params.ambari_java_home != params.java_home:
-    __setup_java(custom_java_home=params.ambari_java_home, custom_jdk_name=params.ambari_jdk_name)
-
-def __setup_java(custom_java_home, custom_jdk_name):
-  """
-  Installs jdk using specific params, that comes from ambari-server
-  """
-  import params
-  java_exec = format("{custom_java_home}/bin/java")
-
-  if not os.path.isfile(java_exec):
-    if not params.jdk_name: # if custom jdk is used.
-      raise Fail(format("Unable to access {java_exec}. Confirm you have copied jdk to this host."))
-
-    jdk_curl_target = format("{tmp_dir}/{custom_jdk_name}")
-    java_dir = os.path.dirname(params.java_home)
-
-    Directory(params.artifact_dir,
-              create_parents = True,
-              )
-
-    File(jdk_curl_target,
-         content = DownloadSource(format("{jdk_location}/{custom_jdk_name}")),
-         not_if = format("test -f {jdk_curl_target}")
-         )
-
-    File(jdk_curl_target,
-         mode = 0755,
-         )
-
-    tmp_java_dir = tempfile.mkdtemp(prefix="jdk_tmp_", dir=params.tmp_dir)
-
-    try:
-      if params.jdk_name.endswith(".bin"):
-        chmod_cmd = ("chmod", "+x", jdk_curl_target)
-        install_cmd = format("cd {tmp_java_dir} && echo A | {jdk_curl_target} -noregister && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
-      elif params.jdk_name.endswith(".gz"):
-        chmod_cmd = ("chmod","a+x", java_dir)
-        install_cmd = format("cd {tmp_java_dir} && tar -xf {jdk_curl_target} && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
-
-      Directory(java_dir
-                )
-
-      Execute(chmod_cmd,
-              sudo = True,
-              )
-
-      Execute(install_cmd,
-              )
-
-    finally:
-      Directory(tmp_java_dir, action="delete")
-
-    File(format("{custom_java_home}/bin/java"),
-         mode=0755,
-         cd_access="a",
-         )
-    Execute(('chmod', '-R', '755', params.java_home),
-            sudo = True,
-            )
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/hook.py
deleted file mode 100644
index ce17776..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/hook.py
+++ /dev/null
@@ -1,37 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from shared_initialization import *
-from repo_initialization import *
-
-class BeforeInstallHook(Hook):
-
-  def hook(self, env):
-    import params
-
-    self.run_custom_hook('before-ANY')
-    env.set_params(params)
-    
-    install_repos()
-    install_packages()
-
-if __name__ == "__main__":
-  BeforeInstallHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py
deleted file mode 100644
index 50c5a40..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py
+++ /dev/null
@@ -1,115 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from ambari_commons.constants import AMBARI_SUDO_BINARY
-from resource_management.libraries.functions.version import format_stack_version, compare_versions
-from resource_management.core.system import System
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import default, format
-from resource_management.libraries.functions.expect import expect
-
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-sudo = AMBARI_SUDO_BINARY
-
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability']
-agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int)
-stack_version_formatted = format_stack_version(stack_version_unformatted)
-
-#users and groups
-hbase_user = config['configurations']['hbase-env']['hbase_user']
-smoke_user =  config['configurations']['cluster-env']['smokeuser']
-gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
-gmond_user = config['configurations']['ganglia-env']["gmond_user"]
-tez_user = config['configurations']['tez-env']["tez_user"]
-
-user_group = config['configurations']['cluster-env']['user_group']
-proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
-
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-
-# repo templates
-repo_rhel_suse =  config['configurations']['cluster-env']['repo_suse_rhel_template']
-repo_ubuntu =  config['configurations']['cluster-env']['repo_ubuntu_template']
-
-#hosts
-hostname = config["hostname"]
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
-hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-storm_server_hosts = default("/clusterHostInfo/nimbus_hosts", [])
-falcon_host =  default('/clusterHostInfo/falcon_server_hosts', [])
-
-has_sqoop_client = 'sqoop-env' in config['configurations']
-has_namenode = not len(namenode_host) == 0
-has_hs = not len(hs_host) == 0
-has_resourcemanager = not len(rm_host) == 0
-has_slaves = not len(slave_hosts) == 0
-has_oozie_server = not len(oozie_servers)  == 0
-has_hcat_server_host = not len(hcat_server_hosts)  == 0
-has_hive_server_host = not len(hive_server_host)  == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_zk_host = not len(zk_hosts) == 0
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-has_storm_server = not len(storm_server_hosts) == 0
-has_falcon_server = not len(falcon_host) == 0
-has_tez = 'tez-site' in config['configurations']
-
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-if has_ganglia_server:
-  ganglia_server_host = ganglia_server_hosts[0]
-
-hbase_tmp_dir = "/tmp/hbase-hbase"
-
-#security params
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-
-#java params
-java_home = config['hostLevelParams']['java_home']
-artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
-jdk_name = default("/hostLevelParams/jdk_name", None) # None when jdk is already installed by user
-jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
-jce_location = config['hostLevelParams']['jdk_location']
-jdk_location = config['hostLevelParams']['jdk_location']
-ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
-host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
-
-smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
-if has_hbase_masters:
-  hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
-#repo params
-repo_info = config['hostLevelParams']['repo_info']
-service_repo_info = default("/hostLevelParams/service_repo_info",None)
-
-repo_file = default("/repositoryFile", None)

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/repo_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/repo_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/repo_initialization.py
deleted file mode 100644
index 9f2b344..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/repo_initialization.py
+++ /dev/null
@@ -1,75 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from ambari_commons.os_check import OSCheck
-from resource_management.libraries.resources.repository import Repository
-from resource_management.libraries.functions.repository_util import create_repo_files, CommandRepository, UBUNTU_REPO_COMPONENTS_POSTFIX
-from resource_management.core.logger import Logger
-import ambari_simplejson as json
-
-
-def _alter_repo(action, repo_string, repo_template):
-  """
-  @param action: "delete" or "create"
-  @param repo_string: e.g. "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0._\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]"
-  """
-  repo_dicts = json.loads(repo_string)
-
-  if not isinstance(repo_dicts, list):
-    repo_dicts = [repo_dicts]
-
-  if 0 == len(repo_dicts):
-    Logger.info("Repository list is empty. Ambari may not be managing the repositories.")
-  else:
-    Logger.info("Initializing {0} repositories".format(str(len(repo_dicts))))
-
-  for repo in repo_dicts:
-    if not 'baseUrl' in repo:
-      repo['baseUrl'] = None
-    if not 'mirrorsList' in repo:
-      repo['mirrorsList'] = None
-
-    ubuntu_components = [ repo['distribution'] if 'distribution' in repo and repo['distribution'] else repo['repoName'] ] \
-                        + [repo['components'].replace(",", " ") if 'components' in repo and repo['components'] else UBUNTU_REPO_COMPONENTS_POSTFIX]
-
-    Repository(repo['repoId'],
-               action = action,
-               base_url = repo['baseUrl'],
-               mirror_list = repo['mirrorsList'],
-               repo_file_name = repo['repoName'],
-               repo_template = repo_template,
-               components = ubuntu_components) # ubuntu specific
-
-
-def install_repos():
-  import params
-  if params.host_sys_prepped:
-    return
-
-  template = params.repo_rhel_suse if OSCheck.is_suse_family() or OSCheck.is_redhat_family() else params.repo_ubuntu
-
-  # use this newer way of specifying repositories, if available
-  if params.repo_file is not None:
-    create_repo_files(template, CommandRepository(params.repo_file))
-    return
-
-  _alter_repo("create", params.repo_info, template)
-
-  if params.service_repo_info:
-    _alter_repo("create", params.service_repo_info, template)

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py
deleted file mode 100644
index 1609050..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py
+++ /dev/null
@@ -1,37 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-
-from resource_management.libraries.functions import stack_tools
-from resource_management.libraries.functions.version import compare_versions
-from resource_management.core.resources.packaging import Package
-
-def install_packages():
-  import params
-  if params.host_sys_prepped:
-    return
-
-  packages = ['unzip', 'curl']
-  if params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.2') >= 0:
-    stack_selector_package = stack_tools.get_stack_tool_package(stack_tools.STACK_SELECTOR_NAME)
-    packages.append(stack_selector_package)
-  Package(packages,
-          retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
-          retry_count=params.agent_stack_retry_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-RESTART/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-RESTART/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-RESTART/scripts/hook.py
deleted file mode 100644
index 14b9d99..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-RESTART/scripts/hook.py
+++ /dev/null
@@ -1,29 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-class BeforeRestartHook(Hook):
-
-  def hook(self, env):
-    self.run_custom_hook('before-START')
-
-if __name__ == "__main__":
-  BeforeRestartHook().execute()
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/checkForFormat.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/checkForFormat.sh
deleted file mode 100644
index 68aa96d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/checkForFormat.sh
+++ /dev/null
@@ -1,65 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-export hdfs_user=$1
-shift
-export conf_dir=$1
-shift
-export bin_dir=$1
-shift
-export mark_dir=$1
-shift
-export name_dirs=$*
-
-export EXIT_CODE=0
-export command="namenode -format"
-export list_of_non_empty_dirs=""
-
-mark_file=/var/run/hadoop/hdfs/namenode-formatted
-if [[ -f ${mark_file} ]] ; then
-  /var/lib/ambari-agent/ambari-sudo.sh rm -f ${mark_file}
-  /var/lib/ambari-agent/ambari-sudo.sh mkdir -p ${mark_dir}
-fi
-
-if [[ ! -d $mark_dir ]] ; then
-  for dir in `echo $name_dirs | tr ',' ' '` ; do
-    echo "NameNode Dirname = $dir"
-    cmd="ls $dir | wc -l  | grep -q ^0$"
-    eval $cmd
-    if [[ $? -ne 0 ]] ; then
-      (( EXIT_CODE = $EXIT_CODE + 1 ))
-      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
-    fi
-  done
-
-  if [[ $EXIT_CODE == 0 ]] ; then
-    /var/lib/ambari-agent/ambari-sudo.sh su ${hdfs_user} - -s /bin/bash -c "export PATH=$PATH:$bin_dir ; yes Y | hdfs --config ${conf_dir} ${command}"
-    (( EXIT_CODE = $EXIT_CODE | $? ))
-  else
-    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
-  fi
-else
-  echo "${mark_dir} exists. Namenode DFS already formatted"
-fi
-
-exit $EXIT_CODE
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/fast-hdfs-resource.jar
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/fast-hdfs-resource.jar b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/fast-hdfs-resource.jar
deleted file mode 100644
index 6c993bf..0000000
Binary files a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/fast-hdfs-resource.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/task-log4j.properties
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/task-log4j.properties b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/task-log4j.properties
deleted file mode 100644
index 7e12962..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/task-log4j.properties
+++ /dev/null
@@ -1,134 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-
-# Define some default values that can be overridden by system properties
-hadoop.root.logger=INFO,console
-hadoop.log.dir=.
-hadoop.log.file=hadoop.log
-
-#
-# Job Summary Appender 
-#
-# Use following logger to send summary to separate file defined by 
-# hadoop.mapreduce.jobsummary.log.file rolled daily:
-# hadoop.mapreduce.jobsummary.logger=INFO,JSA
-# 
-hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
-hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hadoop.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshhold=ALL
-
-#
-# Daily Rolling File Appender
-#
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this 
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-
-#
-# TaskLog Appender
-#
-
-#Default values
-hadoop.tasklog.taskid=null
-hadoop.tasklog.iscleanup=false
-hadoop.tasklog.noKeepSplits=4
-hadoop.tasklog.totalLogFileSize=100
-hadoop.tasklog.purgeLogSplits=true
-hadoop.tasklog.logsRetainHours=12
-
-log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
-log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
-log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
-log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
-
-log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
-log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-
-#
-# Rolling File Appender
-#
-
-#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Logfile size and and 30-day backups
-#log4j.appender.RFA.MaxFileSize=1MB
-#log4j.appender.RFA.MaxBackupIndex=30
-
-#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-# Custom Logging levels
-
-hadoop.metrics.log.level=INFO
-#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
-#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
-#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
-log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
-
-# Jets3t library
-log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
-
-#
-# Null Appender
-# Trap security logger on the hadoop client side
-#
-log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
- 
-# Removes "deprecated" messages
-log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/topology_script.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/topology_script.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/topology_script.py
deleted file mode 100644
index 0f7a55c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/topology_script.py
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/usr/bin/env python
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import sys, os
-from string import join
-import ConfigParser
-
-
-DEFAULT_RACK = "/default-rack"
-DATA_FILE_NAME =  os.path.dirname(os.path.abspath(__file__)) + "/topology_mappings.data"
-SECTION_NAME = "network_topology"
-
-class TopologyScript():
-
-  def load_rack_map(self):
-    try:
-      #RACK_MAP contains both host name vs rack and ip vs rack mappings
-      mappings = ConfigParser.ConfigParser()
-      mappings.read(DATA_FILE_NAME)
-      return dict(mappings.items(SECTION_NAME))
-    except ConfigParser.NoSectionError:
-      return {}
-
-  def get_racks(self, rack_map, args):
-    if len(args) == 1:
-      return DEFAULT_RACK
-    else:
-      return join([self.lookup_by_hostname_or_ip(input_argument, rack_map) for input_argument in args[1:]],)
-
-  def lookup_by_hostname_or_ip(self, hostname_or_ip, rack_map):
-    #try looking up by hostname
-    rack = rack_map.get(hostname_or_ip)
-    if rack is not None:
-      return rack
-    #try looking up by ip
-    rack = rack_map.get(self.extract_ip(hostname_or_ip))
-    #try by localhost since hadoop could be passing in 127.0.0.1 which might not be mapped
-    return rack if rack is not None else rack_map.get("localhost.localdomain", DEFAULT_RACK)
-
-  #strips out port and slashes in case hadoop passes in something like 127.0.0.1/127.0.0.1:50010
-  def extract_ip(self, container_string):
-    return container_string.split("/")[0].split(":")[0]
-
-  def execute(self, args):
-    rack_map = self.load_rack_map()
-    rack = self.get_racks(rack_map, args)
-    print rack
-
-if __name__ == "__main__":
-  TopologyScript().execute(sys.argv)

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/custom_extensions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/custom_extensions.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/custom_extensions.py
deleted file mode 100644
index 04299ba..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/custom_extensions.py
+++ /dev/null
@@ -1,173 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-
-from resource_management.core.resources import Directory
-from resource_management.core.resources import Execute
-from resource_management.libraries.functions import default
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import format
-
-
-DEFAULT_HADOOP_HDFS_EXTENSION_DIR = "/hdp/ext/{0}/hadoop"
-DEFAULT_HADOOP_HIVE_EXTENSION_DIR = "/hdp/ext/{0}/hive"
-DEFAULT_HADOOP_HBASE_EXTENSION_DIR = "/hdp/ext/{0}/hbase"
-
-def setup_extensions():
-  """
-  The goal of this method is to distribute extensions (for example jar files) from
-  HDFS (/hdp/ext/{major_stack_version}/{service_name}) to all nodes which contain related
-  components of service (YARN, HIVE or HBASE). Extensions should be added to HDFS by
-  user manually.
-  """
-
-  import params
-
-  # Hadoop Custom extensions
-  hadoop_custom_extensions_enabled = default("/configurations/core-site/hadoop.custom-extensions.enabled", False)
-  hadoop_custom_extensions_services = default("/configurations/core-site/hadoop.custom-extensions.services", "")
-  hadoop_custom_extensions_owner = default("/configurations/core-site/hadoop.custom-extensions.owner", params.hdfs_user)
-  hadoop_custom_extensions_hdfs_dir = get_config_formatted_value(default("/configurations/core-site/hadoop.custom-extensions.root",
-                                                 DEFAULT_HADOOP_HDFS_EXTENSION_DIR.format(params.major_stack_version)))
-  hadoop_custom_extensions_services = [ service.strip().upper() for service in hadoop_custom_extensions_services.split(",") ]
-  hadoop_custom_extensions_services.append("YARN")
-
-  hadoop_custom_extensions_local_dir = "{0}/current/ext/hadoop".format(Script.get_stack_root())
-
-  if params.current_service in hadoop_custom_extensions_services:
-    clean_extensions(hadoop_custom_extensions_local_dir)
-    if hadoop_custom_extensions_enabled:
-      download_extensions(hadoop_custom_extensions_owner, params.user_group,
-                          hadoop_custom_extensions_hdfs_dir,
-                          hadoop_custom_extensions_local_dir)
-
-  setup_extensions_hive()
-
-  hbase_custom_extensions_services = []
-  hbase_custom_extensions_services.append("HBASE")
-  if params.current_service in hbase_custom_extensions_services:
-    setup_hbase_extensions()
-
-
-def setup_hbase_extensions():
-  import params
-
-  # HBase Custom extensions
-  hbase_custom_extensions_enabled = default("/configurations/hbase-site/hbase.custom-extensions.enabled", False)
-  hbase_custom_extensions_owner = default("/configurations/hbase-site/hbase.custom-extensions.owner", params.hdfs_user)
-  hbase_custom_extensions_hdfs_dir = get_config_formatted_value(default("/configurations/hbase-site/hbase.custom-extensions.root",
-                                                DEFAULT_HADOOP_HBASE_EXTENSION_DIR.format(params.major_stack_version)))
-  hbase_custom_extensions_local_dir = "{0}/current/ext/hbase".format(Script.get_stack_root())
-
-  impacted_components = ['HBASE_MASTER', 'HBASE_REGIONSERVER', 'PHOENIX_QUERY_SERVER'];
-  role = params.config.get('role','')
-
-  if role in impacted_components:
-    clean_extensions(hbase_custom_extensions_local_dir)
-    if hbase_custom_extensions_enabled:
-      download_extensions(hbase_custom_extensions_owner, params.user_group,
-                          hbase_custom_extensions_hdfs_dir,
-                          hbase_custom_extensions_local_dir)
-
-
-def setup_extensions_hive():
-  import params
-
-  hive_custom_extensions_enabled = default("/configurations/hive-site/hive.custom-extensions.enabled", False)
-  hive_custom_extensions_owner = default("/configurations/hive-site/hive.custom-extensions.owner", params.hdfs_user)
-  hive_custom_extensions_hdfs_dir = DEFAULT_HADOOP_HIVE_EXTENSION_DIR.format(params.major_stack_version)
-
-  hive_custom_extensions_local_dir = "{0}/current/ext/hive".format(Script.get_stack_root())
-
-  impacted_components = ['HIVE_SERVER', 'HIVE_CLIENT'];
-  role = params.config.get('role','')
-
-  # Run copying for HIVE_SERVER and HIVE_CLIENT
-  if params.current_service == 'HIVE' and role in impacted_components:
-    clean_extensions(hive_custom_extensions_local_dir)
-    if hive_custom_extensions_enabled:
-      download_extensions(hive_custom_extensions_owner, params.user_group,
-                          hive_custom_extensions_hdfs_dir,
-                          hive_custom_extensions_local_dir)
-
-def download_extensions(owner_user, owner_group, hdfs_source_dir, local_target_dir):
-  """
-  :param owner_user: user owner of the HDFS directory
-  :param owner_group: group owner of the HDFS directory
-  :param hdfs_source_dir: the HDFS directory from where the files are being pull
-  :param local_target_dir: the location of where to download the files
-  :return: Will return True if successful, otherwise, False.
-  """
-  import params
-
-  if not os.path.isdir(local_target_dir):
-    extensions_tmp_dir=format("{tmp_dir}/custom_extensions")
-    Directory(local_target_dir,
-              owner="root",
-              mode=0755,
-              group="root",
-              create_parents=True)
-
-    params.HdfsResource(hdfs_source_dir,
-                        type="directory",
-                        action="create_on_execute",
-                        owner=owner_user,
-                        group=owner_group,
-                        mode=0755)
-
-    Directory(extensions_tmp_dir,
-              owner=params.hdfs_user,
-              mode=0755,
-              create_parents=True)
-
-    # copy from hdfs to /tmp
-    params.HdfsResource(extensions_tmp_dir,
-                        type="directory",
-                        action="download_on_execute",
-                        source=hdfs_source_dir,
-                        user=params.hdfs_user,
-                        mode=0644,
-                        replace_existing_files=True)
-
-    # Execute command is not quoting correctly.
-    cmd = format("{sudo} mv {extensions_tmp_dir}/* {local_target_dir}")
-    only_if_cmd = "ls -d {extensions_tmp_dir}/*".format(extensions_tmp_dir=extensions_tmp_dir)
-    Execute(cmd, only_if=only_if_cmd)
-
-    only_if_local = 'ls -d "{local_target_dir}"'.format(local_target_dir=local_target_dir)
-    Execute(("chown", "-R", "root:root", local_target_dir),
-            sudo=True,
-            only_if=only_if_local)
-
-    params.HdfsResource(None,action="execute")
-  return True
-
-def clean_extensions(local_dir):
-  """
-  :param local_dir: The local directory where the extensions are stored.
-  :return: Will return True if successful, otherwise, False.
-  """
-  if os.path.isdir(local_dir):
-    Directory(local_dir,
-              action="delete")
-  return True
-
-def get_config_formatted_value(property_value):
-  return format(property_value.replace("{{", "{").replace("}}", "}"))

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/hook.py
deleted file mode 100644
index 4cb276a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/hook.py
+++ /dev/null
@@ -1,43 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from rack_awareness import create_topology_script_and_mapping
-from shared_initialization import setup_hadoop, setup_configs, create_javahome_symlink, setup_unlimited_key_jce_policy
-from custom_extensions import setup_extensions
-
-class BeforeStartHook(Hook):
-
-  def hook(self, env):
-    import params
-
-    self.run_custom_hook('before-ANY')
-    env.set_params(params)
-
-    setup_hadoop()
-    setup_configs()
-    create_javahome_symlink()
-    create_topology_script_and_mapping()
-    setup_unlimited_key_jce_policy()
-    if params.stack_supports_hadoop_custom_extensions:
-      setup_extensions()
-
-if __name__ == "__main__":
-  BeforeStartHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
deleted file mode 100644
index 6c26e01..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
+++ /dev/null
@@ -1,380 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import default
-from resource_management.libraries.functions import format_jvm_option
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions.version import format_stack_version, compare_versions, get_major_version
-from ambari_commons.os_check import OSCheck
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import get_kinit_path
-from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
-from resource_management.libraries.resources.hdfs_resource import HdfsResource
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.stack_features import get_stack_feature_version
-from resource_management.libraries.functions import StackFeature
-from ambari_commons.constants import AMBARI_SUDO_BINARY
-
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-artifact_dir = tmp_dir + "/AMBARI-artifacts"
-
-version_for_stack_feature_checks = get_stack_feature_version(config)
-stack_supports_hadoop_custom_extensions = check_stack_feature(StackFeature.HADOOP_CUSTOM_EXTENSIONS, version_for_stack_feature_checks)
-
-sudo = AMBARI_SUDO_BINARY
-
-# Global flag enabling or disabling the sysprep feature
-host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
-
-# Whether to skip copying fast-hdfs-resource.jar to /var/lib/ambari-agent/lib/
-# This is required if tarballs are going to be copied to HDFS, so set to False
-sysprep_skip_copy_fast_jar_hdfs = host_sys_prepped and default("/configurations/cluster-env/sysprep_skip_copy_fast_jar_hdfs", False)
-
-# Whether to skip setting up the unlimited key JCE policy
-sysprep_skip_setup_jce = host_sys_prepped and default("/configurations/cluster-env/sysprep_skip_setup_jce", False)
-
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-stack_version_formatted = format_stack_version(stack_version_unformatted)
-major_stack_version = get_major_version(stack_version_formatted)
-
-dfs_type = default("/commandParams/dfs_type", "")
-hadoop_conf_dir = "/etc/hadoop/conf"
-component_list = default("/localComponents", [])
-
-hdfs_tmp_dir = default("/configurations/hadoop-env/hdfs_tmp_dir", "/tmp")
-
-hadoop_metrics2_properties_content = None
-if 'hadoop-metrics2.properties' in config['configurations']:
-  hadoop_metrics2_properties_content = config['configurations']['hadoop-metrics2.properties']['content']
-
-hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
-hadoop_lib_home = stack_select.get_hadoop_dir("lib")
-hadoop_bin = stack_select.get_hadoop_dir("sbin")
-
-mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
-hadoop_home = stack_select.get_hadoop_dir("home")
-create_lib_snappy_symlinks = False
-  
-current_service = config['serviceName']
-
-#security params
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-
-ambari_server_resources_url = default("/hostLevelParams/jdk_location", None)
-if ambari_server_resources_url is not None and ambari_server_resources_url.endswith('/'):
-  ambari_server_resources_url = ambari_server_resources_url[:-1]
-
-# Unlimited key JCE policy params
-jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
-unlimited_key_jce_required = default("/hostLevelParams/unlimited_key_jce_required", False)
-jdk_name = default("/hostLevelParams/jdk_name", None)
-java_home = default("/hostLevelParams/java_home", None)
-java_exec = "{0}/bin/java".format(java_home) if java_home is not None else "/bin/java"
-
-#users and groups
-has_hadoop_env = 'hadoop-env' in config['configurations']
-mapred_user = config['configurations']['mapred-env']['mapred_user']
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-yarn_user = config['configurations']['yarn-env']['yarn_user']
-
-user_group = config['configurations']['cluster-env']['user_group']
-
-#hosts
-hostname = config["hostname"]
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
-hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-cluster_name = config["clusterName"]
-set_instanceId = "false"
-if 'cluster-env' in config['configurations'] and \
-    'metrics_collector_external_hosts' in config['configurations']['cluster-env']:
-  ams_collector_hosts = config['configurations']['cluster-env']['metrics_collector_external_hosts']
-  set_instanceId = "true"
-else:
-  ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
-
-has_namenode = not len(namenode_host) == 0
-has_resourcemanager = not len(rm_host) == 0
-has_slaves = not len(slave_hosts) == 0
-has_oozie_server = not len(oozie_servers) == 0
-has_hcat_server_host = not len(hcat_server_hosts) == 0
-has_hive_server_host = not len(hive_server_host) == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_zk_host = not len(zk_hosts) == 0
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-has_metric_collector = not len(ams_collector_hosts) == 0
-
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-
-if has_ganglia_server:
-  ganglia_server_host = ganglia_server_hosts[0]
-
-metric_collector_port = None
-if has_metric_collector:
-  if 'cluster-env' in config['configurations'] and \
-      'metrics_collector_external_port' in config['configurations']['cluster-env']:
-    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_external_port']
-  else:
-    metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
-    if metric_collector_web_address.find(':') != -1:
-      metric_collector_port = metric_collector_web_address.split(':')[1]
-    else:
-      metric_collector_port = '6188'
-  if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
-    metric_collector_protocol = 'https'
-  else:
-    metric_collector_protocol = 'http'
-  metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
-  metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
-  metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
-
-  pass
-metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
-metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
-
-host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
-host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
-
-# Cluster Zookeeper quorum
-zookeeper_quorum = None
-if has_zk_host:
-  if 'zoo.cfg' in config['configurations'] and 'clientPort' in config['configurations']['zoo.cfg']:
-    zookeeper_clientPort = config['configurations']['zoo.cfg']['clientPort']
-  else:
-    zookeeper_clientPort = '2181'
-  zookeeper_quorum = (':' + zookeeper_clientPort + ',').join(config['clusterHostInfo']['zookeeper_hosts'])
-  # last port config
-  zookeeper_quorum += ':' + zookeeper_clientPort
-
-#hadoop params
-
-if has_namenode or dfs_type == 'HCFS':
-  hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
-  hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-  task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
-
-hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-hbase_tmp_dir = "/tmp/hbase-hbase"
-#db params
-server_db_name = config['hostLevelParams']['db_name']
-db_driver_filename = config['hostLevelParams']['db_driver_filename']
-oracle_driver_url = config['hostLevelParams']['oracle_jdbc_url']
-mysql_driver_url = config['hostLevelParams']['mysql_jdbc_url']
-oracle_driver_symlink_url = format("{ambari_server_resources_url}/oracle-jdbc-driver.jar")
-mysql_driver_symlink_url = format("{ambari_server_resources_url}/mysql-jdbc-driver.jar")
-
-ambari_db_rca_url = config['hostLevelParams']['ambari_db_rca_url'][0]
-ambari_db_rca_driver = config['hostLevelParams']['ambari_db_rca_driver'][0]
-ambari_db_rca_username = config['hostLevelParams']['ambari_db_rca_username'][0]
-ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password'][0]
-
-if has_namenode and 'rca_enabled' in config['configurations']['hadoop-env']:
-  rca_enabled =  config['configurations']['hadoop-env']['rca_enabled']
-else:
-  rca_enabled = False
-rca_disabled_prefix = "###"
-if rca_enabled == True:
-  rca_prefix = ""
-else:
-  rca_prefix = rca_disabled_prefix
-
-#hadoop-env.sh
-
-jsvc_path = "/usr/lib/bigtop-utils"
-
-hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
-namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
-namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
-
-jtnode_opt_newsize = "200m"
-jtnode_opt_maxnewsize = "200m"
-jtnode_heapsize =  "1024m"
-ttnode_heapsize = "1024m"
-
-dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
-mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
-
-#log4j.properties
-
-yarn_log_dir_prefix = default("/configurations/yarn-env/yarn_log_dir_prefix","/var/log/hadoop-yarn")
-
-dfs_hosts = default('/configurations/hdfs-site/dfs.hosts', None)
-
-# Hdfs log4j settings
-hadoop_log_max_backup_size = default('configurations/hdfs-log4j/hadoop_log_max_backup_size', 256)
-hadoop_log_number_of_backup_files = default('configurations/hdfs-log4j/hadoop_log_number_of_backup_files', 10)
-hadoop_security_log_max_backup_size = default('configurations/hdfs-log4j/hadoop_security_log_max_backup_size', 256)
-hadoop_security_log_number_of_backup_files = default('configurations/hdfs-log4j/hadoop_security_log_number_of_backup_files', 20)
-
-# Yarn log4j settings
-yarn_rm_summary_log_max_backup_size = default('configurations/yarn-log4j/yarn_rm_summary_log_max_backup_size', 256)
-yarn_rm_summary_log_number_of_backup_files = default('configurations/yarn-log4j/yarn_rm_summary_log_number_of_backup_files', 20)
-
-#log4j.properties
-if (('hdfs-log4j' in config['configurations']) and ('content' in config['configurations']['hdfs-log4j'])):
-  log4j_props = config['configurations']['hdfs-log4j']['content']
-  if (('yarn-log4j' in config['configurations']) and ('content' in config['configurations']['yarn-log4j'])):
-    log4j_props += config['configurations']['yarn-log4j']['content']
-else:
-  log4j_props = None
-
-refresh_topology = False
-command_params = config["commandParams"] if "commandParams" in config else None
-if command_params is not None:
-  refresh_topology = bool(command_params["refresh_topology"]) if "refresh_topology" in command_params else False
-
-ambari_java_home = default("/commandParams/ambari_java_home", None)
-ambari_jdk_name = default("/commandParams/ambari_jdk_name", None)
-ambari_jce_name = default("/commandParams/ambari_jce_name", None)
-  
-ambari_libs_dir = "/var/lib/ambari-agent/lib"
-is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled']
-default_fs = config['configurations']['core-site']['fs.defaultFS']
-
-#host info
-all_hosts = default("/clusterHostInfo/all_hosts", [])
-all_racks = default("/clusterHostInfo/all_racks", [])
-all_ipv4_ips = default("/clusterHostInfo/all_ipv4_ips", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-
-#topology files
-net_topology_script_file_path = "/etc/hadoop/conf/topology_script.py"
-net_topology_script_dir = os.path.dirname(net_topology_script_file_path)
-net_topology_mapping_data_file_name = 'topology_mappings.data'
-net_topology_mapping_data_file_path = os.path.join(net_topology_script_dir, net_topology_mapping_data_file_name)
-
-#Added logic to create /tmp and /user directory for HCFS stack.  
-has_core_site = 'core-site' in config['configurations']
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-kinit_path_local = get_kinit_path()
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-stack_version_formatted = format_stack_version(stack_version_unformatted)
-hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
-hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', None)
-hdfs_site = config['configurations']['hdfs-site']
-smoke_user =  config['configurations']['cluster-env']['smokeuser']
-smoke_hdfs_user_dir = format("/user/{smoke_user}")
-smoke_hdfs_user_mode = 0770
-
-
-##### Namenode RPC ports - metrics config section start #####
-
-# Figure out the rpc ports for current namenode
-nn_rpc_client_port = None
-nn_rpc_dn_port = None
-nn_rpc_healthcheck_port = None
-
-namenode_id = None
-namenode_rpc = None
-
-dfs_ha_enabled = False
-dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.internal.nameservices', None)
-if dfs_ha_nameservices is None:
-  dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.nameservices', None)
-dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
-
-dfs_ha_namemodes_ids_list = []
-other_namenode_id = None
-
-if dfs_ha_namenode_ids:
- dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
- dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
- if dfs_ha_namenode_ids_array_len > 1:
-   dfs_ha_enabled = True
-
-if dfs_ha_enabled:
- for nn_id in dfs_ha_namemodes_ids_list:
-   nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
-   if hostname.lower() in nn_host.lower():
-     namenode_id = nn_id
-     namenode_rpc = nn_host
-   pass
- pass
-else:
-  namenode_rpc = default('/configurations/hdfs-site/dfs.namenode.rpc-address', default_fs)
-
-# if HDFS is not installed in the cluster, then don't try to access namenode_rpc
-if "core-site" in config['configurations'] and namenode_rpc:
-  port_str = namenode_rpc.split(':')[-1].strip()
-  try:
-    nn_rpc_client_port = int(port_str)
-  except ValueError:
-    nn_rpc_client_port = None
-
-if dfs_ha_enabled:
- dfs_service_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.servicerpc-address.{dfs_ha_nameservices}.{namenode_id}'), None)
- dfs_lifeline_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.lifeline.rpc-address.{dfs_ha_nameservices}.{namenode_id}'), None)
-else:
- dfs_service_rpc_address = default('/configurations/hdfs-site/dfs.namenode.servicerpc-address', None)
- dfs_lifeline_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.lifeline.rpc-address'), None)
-
-if dfs_service_rpc_address:
- nn_rpc_dn_port = dfs_service_rpc_address.split(':')[1].strip()
-
-if dfs_lifeline_rpc_address:
- nn_rpc_healthcheck_port = dfs_lifeline_rpc_address.split(':')[1].strip()
-
-is_nn_client_port_configured = False if nn_rpc_client_port is None else True
-is_nn_dn_port_configured = False if nn_rpc_dn_port is None else True
-is_nn_healthcheck_port_configured = False if nn_rpc_healthcheck_port is None else True
-
-##### end #####
-
-import functools
-#create partial functions with common arguments for every HdfsResource call
-#to create/delete/copyfromlocal hdfs directories/files we need to call params.HdfsResource in code
-HdfsResource = functools.partial(
-  HdfsResource,
-  user=hdfs_user,
-  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
-  security_enabled = security_enabled,
-  keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local,
-  hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir,
-  principal_name = hdfs_principal_name,
-  hdfs_site = hdfs_site,
-  default_fs = default_fs,
-  immutable_paths = get_not_managed_resources(),
-  dfs_type = dfs_type
-)

http://git-wip-us.apache.org/repos/asf/ambari/blob/5b36cdfd/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/rack_awareness.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/rack_awareness.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/rack_awareness.py
deleted file mode 100644
index 48158bb..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/rack_awareness.py
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-from resource_management.core.resources import File
-from resource_management.core.source import StaticFile, Template
-from resource_management.libraries.functions import format
-
-
-def create_topology_mapping():
-  import params
-
-  File(params.net_topology_mapping_data_file_path,
-       content=Template("topology_mappings.data.j2"),
-       owner=params.hdfs_user,
-       group=params.user_group,
-       mode=0644,
-       only_if=format("test -d {net_topology_script_dir}"))
-
-def create_topology_script():
-  import params
-
-  File(params.net_topology_script_file_path,
-       content=StaticFile('topology_script.py'),
-       mode=0755,
-       only_if=format("test -d {net_topology_script_dir}"))
-
-def create_topology_script_and_mapping():
-  import params
-  if params.has_hadoop_env:
-    create_topology_mapping()
-    create_topology_script()


[40/50] [abbrv] ambari git commit: Revert "AMBARI-22131 Move resources/stacks/HDP/3.0/widgets.json to resources/widgets.json (dsen)"

Posted by jl...@apache.org.
Revert "AMBARI-22131 Move resources/stacks/HDP/3.0/widgets.json to resources/widgets.json (dsen)"

This reverts commit 7172655ff269fbb7e0d29ba93197aa5a804749c0.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b358ee20
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b358ee20
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b358ee20

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: b358ee20e857d31017e54c1bde6f3c1aac084030
Parents: fab2aa3
Author: Dmytro Sen <ds...@apache.org>
Authored: Fri Oct 6 16:44:41 2017 +0300
Committer: Dmytro Sen <ds...@apache.org>
Committed: Fri Oct 6 16:44:59 2017 +0300

----------------------------------------------------------------------
 .../server/api/services/AmbariMetaInfo.java     |  8 --
 .../AmbariManagementControllerImpl.java         | 22 +++--
 .../internal/ServiceResourceProvider.java       |  8 ++
 .../internal/StackArtifactResourceProvider.java | 18 +++-
 .../server/orm/entities/WidgetLayoutEntity.java |  6 +-
 .../ambari/server/stack/StackDirectory.java     | 18 ++++
 .../apache/ambari/server/stack/StackModule.java |  5 ++
 .../apache/ambari/server/state/StackInfo.java   |  8 ++
 .../resources/stacks/HDP/2.0.6/widgets.json     | 95 ++++++++++++++++++++
 .../main/resources/stacks/HDP/3.0/widgets.json  | 95 ++++++++++++++++++++
 .../server/api/services/AmbariMetaInfoTest.java |  8 --
 .../AmbariManagementControllerImplTest.java     |  6 +-
 .../AmbariManagementControllerTest.java         |  6 --
 .../resources/stacks/OTHER/1.0/widgets.json     | 95 ++++++++++++++++++++
 14 files changed, 364 insertions(+), 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b358ee20/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
index 425d247..de84965 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
@@ -21,7 +21,6 @@ package org.apache.ambari.server.api.services;
 import static org.apache.ambari.server.controller.spi.Resource.InternalType.Component;
 import static org.apache.ambari.server.controller.spi.Resource.InternalType.HostComponent;
 import static org.apache.ambari.server.controller.utilities.PropertyHelper.AGGREGATE_FUNCTION_IDENTIFIERS;
-import static org.apache.ambari.server.stack.StackDirectory.WIDGETS_DESCRIPTOR_FILE_NAME;
 
 import java.io.File;
 import java.io.FileReader;
@@ -126,7 +125,6 @@ public class AmbariMetaInfo {
   private File commonServicesRoot;
   private File extensionsRoot;
   private File serverVersionFile;
-  private File commonWidgetsDescriptorFile;
   private File customActionRoot;
   private Map<String, VersionDefinitionXml> versionDefinitions = null;
 
@@ -216,8 +214,6 @@ public class AmbariMetaInfo {
     serverVersionFile = new File(serverVersionFilePath);
 
     customActionRoot = new File(conf.getCustomActionDefinitionPath());
-
-    commonWidgetsDescriptorFile = new File(conf.getResourceDirPath(), WIDGETS_DESCRIPTOR_FILE_NAME);
   }
 
   /**
@@ -1439,8 +1435,4 @@ public class AmbariMetaInfo {
 
     return null;
   }
-
-  public File getCommonWidgetsDescriptorFile() {
-    return commonWidgetsDescriptorFile;
-  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/b358ee20/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 5642575..b2993e3 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -5184,12 +5184,22 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
         widgetDescriptorFiles.add(widgetDescriptorFile);
       }
     } else {
-      // common cluster level widgets
-      File commonWidgetsFile = ambariMetaInfo.getCommonWidgetsDescriptorFile();
-      if (commonWidgetsFile != null && commonWidgetsFile.exists()) {
-        widgetDescriptorFiles.add(commonWidgetsFile);
-      } else {
-        LOG.warn("Common widgets file with path {%s} doesn't exist. No cluster widgets will be created.", commonWidgetsFile);
+      Set<StackId> stackIds = new HashSet<>();
+
+      for (Service svc : cluster.getServices().values()) {
+        stackIds.add(svc.getDesiredStackId());
+      }
+
+      for (StackId stackId : stackIds) {
+        StackInfo stackInfo = ambariMetaInfo.getStack(stackId);
+
+        String widgetDescriptorFileLocation = stackInfo.getWidgetsDescriptorFileLocation();
+        if (widgetDescriptorFileLocation != null) {
+          File widgetDescriptorFile = new File(widgetDescriptorFileLocation);
+          if (widgetDescriptorFile.exists()) {
+            widgetDescriptorFiles.add(widgetDescriptorFile);
+          }
+        }
       }
     }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b358ee20/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
index e65693b..76a4547 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
@@ -423,6 +423,8 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
     // do all validation checks
     validateCreateRequests(requests, clusters);
 
+    Set<Cluster> clustersSetFromRequests = new HashSet<>();
+
     for (ServiceRequest request : requests) {
       Cluster cluster = clusters.getCluster(request.getClusterName());
 
@@ -478,6 +480,12 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
 
       // Initialize service widgets
       getManagementController().initializeWidgetsAndLayouts(cluster, s);
+      clustersSetFromRequests.add(cluster);
+    }
+
+    // Create cluster widgets and layouts
+    for (Cluster cluster : clustersSetFromRequests) {
+      getManagementController().initializeWidgetsAndLayouts(cluster, null);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b358ee20/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java
index a7f7710..2e8a32a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java
@@ -426,7 +426,7 @@ public class StackArtifactResourceProvider extends AbstractControllerResourcePro
     }
 
     if (StringUtils.isEmpty(serviceName)) {
-      return null;
+      return getWidgetsDescriptorForCluster(stackInfo);
     } else {
       return getWidgetsDescriptorForService(stackInfo, serviceName);
     }
@@ -450,6 +450,22 @@ public class StackArtifactResourceProvider extends AbstractControllerResourcePro
     return widgetDescriptor;
   }
 
+  public Map<String, Object> getWidgetsDescriptorForCluster(StackInfo stackInfo)
+      throws NoSuchParentResourceException, IOException {
+
+    Map<String, Object> widgetDescriptor = null;
+
+    String widgetDescriptorFileLocation = stackInfo.getWidgetsDescriptorFileLocation();
+    if (widgetDescriptorFileLocation != null) {
+      File widgetDescriptorFile = new File(widgetDescriptorFileLocation);
+      if (widgetDescriptorFile.exists()) {
+        widgetDescriptor = gson.fromJson(new FileReader(widgetDescriptorFile), widgetLayoutType);
+      }
+    }
+
+    return widgetDescriptor;
+  }
+
   /**
    * Get a kerberos descriptor.
    *

http://git-wip-us.apache.org/repos/asf/ambari/blob/b358ee20/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/WidgetLayoutEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/WidgetLayoutEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/WidgetLayoutEntity.java
index 1fa45e9..90d98fc 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/WidgetLayoutEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/WidgetLayoutEntity.java
@@ -33,7 +33,6 @@ import javax.persistence.OneToMany;
 import javax.persistence.OrderBy;
 import javax.persistence.Table;
 import javax.persistence.TableGenerator;
-import javax.persistence.UniqueConstraint;
 
 @Entity
 @Table(name = "widget_layout")
@@ -42,8 +41,7 @@ import javax.persistence.UniqueConstraint;
         pkColumnName = "sequence_name",
         valueColumnName = "sequence_value",
         pkColumnValue = "widget_layout_id_seq",
-        initialValue = 0,
-        uniqueConstraints=@UniqueConstraint(columnNames={"layout_name", "cluster_id"})
+        initialValue = 0
 )
 @NamedQueries({
     @NamedQuery(name = "WidgetLayoutEntity.findAll", query = "SELECT widgetLayout FROM WidgetLayoutEntity widgetLayout"),
@@ -58,7 +56,7 @@ public class WidgetLayoutEntity {
   @Column(name = "id", nullable = false, updatable = false)
   private Long id;
 
-  @Column(name = "layout_name", nullable = false, length = 255)
+  @Column(name = "layout_name", nullable = false, unique = true, length = 255)
   private String layoutName;
 
   @Column(name = "section_name", nullable = false, length = 255)

http://git-wip-us.apache.org/repos/asf/ambari/blob/b358ee20/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
index e3c586b..9259466 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
@@ -94,6 +94,11 @@ public class StackDirectory extends StackDefinitionDirectory {
   private String kerberosDescriptorPreconfigureFilePath;
 
   /**
+   * widgets descriptor file path
+   */
+  private String widgetsDescriptorFilePath;
+
+  /**
    * repository file
    */
   private RepositoryXml repoFile;
@@ -228,6 +233,15 @@ public class StackDirectory extends StackDefinitionDirectory {
   }
 
   /**
+   * Obtain the path to the (stack-level) widgets descriptor file
+   *
+   * @return the path to the (stack-level) widgets descriptor file
+   */
+  public String getWidgetsDescriptorFilePath() {
+    return widgetsDescriptorFilePath;
+  }
+
+  /**
    * Obtain the repository directory path.
    *
    * @return repository directory path
@@ -310,6 +324,10 @@ public class StackDirectory extends StackDefinitionDirectory {
       kerberosDescriptorPreconfigureFilePath = getAbsolutePath() + File.separator + KERBEROS_DESCRIPTOR_PRECONFIGURE_FILE_NAME;
     }
 
+    if (subDirs.contains(WIDGETS_DESCRIPTOR_FILE_NAME)) {
+      widgetsDescriptorFilePath = getAbsolutePath() + File.separator + WIDGETS_DESCRIPTOR_FILE_NAME;
+    }
+
     parseUpgradePacks(subDirs);
     parseServiceDirectories(subDirs);
     parseRepoFile(subDirs);

http://git-wip-us.apache.org/repos/asf/ambari/blob/b358ee20/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
index 71235f3..742706d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
@@ -294,6 +294,10 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
       stackInfo.setKerberosDescriptorPreConfigurationFileLocation(parentStack.getModuleInfo().getKerberosDescriptorPreConfigurationFileLocation());
     }
 
+    if (stackInfo.getWidgetsDescriptorFileLocation() == null) {
+      stackInfo.setWidgetsDescriptorFileLocation(parentStack.getModuleInfo().getWidgetsDescriptorFileLocation());
+    }
+
     mergeServicesWithParent(parentStack, allStacks, commonServices, extensions);
   }
 
@@ -569,6 +573,7 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
       stackInfo.setRcoFileLocation(stackDirectory.getRcoFilePath());
       stackInfo.setKerberosDescriptorFileLocation(stackDirectory.getKerberosDescriptorFilePath());
       stackInfo.setKerberosDescriptorPreConfigurationFileLocation(stackDirectory.getKerberosDescriptorPreconfigureFilePath());
+      stackInfo.setWidgetsDescriptorFileLocation(stackDirectory.getWidgetsDescriptorFilePath());
       stackInfo.setUpgradesFolder(stackDirectory.getUpgradesDir());
       stackInfo.setUpgradePacks(stackDirectory.getUpgradePacks());
       stackInfo.setConfigUpgradePack(stackDirectory.getConfigUpgradePack());

http://git-wip-us.apache.org/repos/asf/ambari/blob/b358ee20/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
index 3efc997..dcf850f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
@@ -429,6 +429,14 @@ public class StackInfo implements Comparable<StackInfo>, Validable {
     this.kerberosDescriptorPreConfigurationFileLocation = kerberosDescriptorPreConfigurationFileLocation;
   }
 
+  public String getWidgetsDescriptorFileLocation() {
+    return widgetsDescriptorFileLocation;
+  }
+
+  public void setWidgetsDescriptorFileLocation(String widgetsDescriptorFileLocation) {
+    this.widgetsDescriptorFileLocation = widgetsDescriptorFileLocation;
+  }
+
   /**
    * Set the path of the stack upgrade directory.
    *

http://git-wip-us.apache.org/repos/asf/ambari/blob/b358ee20/ambari-server/src/main/resources/stacks/HDP/2.0.6/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/widgets.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/widgets.json
new file mode 100644
index 0000000..3176354
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/widgets.json
@@ -0,0 +1,95 @@
+{
+  "layouts": [
+    {
+      "layout_name": "default_system_heatmap",
+      "display_name": "Heatmaps",
+      "section_name": "SYSTEM_HEATMAPS",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "Host Disk Space Used %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "disk_free",
+              "metric_path": "metrics/disk/disk_free",
+              "service_name": "STACK"
+            },
+            {
+              "name": "disk_total",
+              "metric_path": "metrics/disk/disk_total",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Disk Space Used %",
+              "value": "${((disk_total-disk_free)/disk_total)*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Host Memory Used %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "mem_total",
+              "metric_path": "metrics/memory/mem_total",
+              "service_name": "STACK"
+            },
+            {
+              "name": "mem_free",
+              "metric_path": "metrics/memory/mem_free",
+              "service_name": "STACK"
+            },
+            {
+              "name": "mem_cached",
+              "metric_path": "metrics/memory/mem_cached",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Memory Used %",
+              "value": "${((mem_total-mem_free-mem_cached)/mem_total)*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Host CPU Wait IO %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "cpu_wio",
+              "metric_path": "metrics/cpu/cpu_wio",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Memory Used %",
+              "value": "${cpu_wio*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b358ee20/ambari-server/src/main/resources/stacks/HDP/3.0/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/widgets.json b/ambari-server/src/main/resources/stacks/HDP/3.0/widgets.json
new file mode 100644
index 0000000..3176354
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/3.0/widgets.json
@@ -0,0 +1,95 @@
+{
+  "layouts": [
+    {
+      "layout_name": "default_system_heatmap",
+      "display_name": "Heatmaps",
+      "section_name": "SYSTEM_HEATMAPS",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "Host Disk Space Used %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "disk_free",
+              "metric_path": "metrics/disk/disk_free",
+              "service_name": "STACK"
+            },
+            {
+              "name": "disk_total",
+              "metric_path": "metrics/disk/disk_total",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Disk Space Used %",
+              "value": "${((disk_total-disk_free)/disk_total)*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Host Memory Used %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "mem_total",
+              "metric_path": "metrics/memory/mem_total",
+              "service_name": "STACK"
+            },
+            {
+              "name": "mem_free",
+              "metric_path": "metrics/memory/mem_free",
+              "service_name": "STACK"
+            },
+            {
+              "name": "mem_cached",
+              "metric_path": "metrics/memory/mem_cached",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Memory Used %",
+              "value": "${((mem_total-mem_free-mem_cached)/mem_total)*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Host CPU Wait IO %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "cpu_wio",
+              "metric_path": "metrics/cpu/cpu_wio",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Memory Used %",
+              "value": "${cpu_wio*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b358ee20/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
index 25e8d04..4baca5c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
@@ -1894,14 +1894,6 @@ public class AmbariMetaInfoTest {
     Assert.assertTrue(descriptor.getService("NEW_SERVICE").shouldPreconfigure());
   }
 
-  @Test
-  public void testGetCommonWidgetsFile() throws AmbariException {
-    File widgetsFile = metaInfo.getCommonWidgetsDescriptorFile();
-
-    Assert.assertNotNull(widgetsFile);
-    Assert.assertEquals("/var/lib/ambari-server/resources/widgets.json", widgetsFile.getPath());
-  }
-
   private File getStackRootTmp(String buildDir) {
     return new File(buildDir + "/ambari-metaInfo");
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/b358ee20/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
index 9547271..a02690f 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
@@ -2367,14 +2367,18 @@ public class AmbariManagementControllerImplTest {
     Cluster cluster = createNiceMock(Cluster.class);
     Service service = createNiceMock(Service.class);
     expect(service.getDesiredStackId()).andReturn(stackId).atLeastOnce();
+    expect(cluster.getServices()).andReturn(ImmutableMap.<String, Service>builder()
+        .put("HDFS", service)
+        .build());
 
     expect(clusters.getCluster("c1")).andReturn(cluster).atLeastOnce();
 
 
     StackInfo stackInfo = createNiceMock(StackInfo.class);
+    expect(stackInfo.getWidgetsDescriptorFileLocation()).andReturn(null).once();
 
     expect(ambariMetaInfo.getStack("HDP", "2.1")).andReturn(stackInfo).atLeastOnce();
-    expect(ambariMetaInfo.getCommonWidgetsDescriptorFile()).andReturn(null).once();
+    expect(ambariMetaInfo.getStack(stackId)).andReturn(stackInfo).atLeastOnce();
 
     replay(injector, clusters, ambariMetaInfo, stackInfo, cluster, service, repoVersionDAO, repoVersion);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b358ee20/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
index 7094caa..b370829 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
@@ -34,7 +34,6 @@ import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
-import java.io.File;
 import java.io.StringReader;
 import java.lang.reflect.Type;
 import java.text.MessageFormat;
@@ -10425,11 +10424,6 @@ public class AmbariManagementControllerTest {
     Assert.assertEquals("UPDATED_BLOCKED_TIME", layoutUserWidgetEntities.get(3).getWidget().getWidgetName());
     Assert.assertEquals("HBASE_SUMMARY", layoutUserWidgetEntities.get(0).getWidget().getDefaultSectionName());
 
-    File widgetsFile  = ambariMetaInfo.getCommonWidgetsDescriptorFile();
-    assertNotNull(widgetsFile);
-    assertEquals("src/test/resources/widgets.json", widgetsFile.getPath());
-    assertTrue(widgetsFile.exists());
-
     candidateLayoutEntity = null;
     for (WidgetLayoutEntity entity : layoutEntities) {
       if (entity.getLayoutName().equals("default_system_heatmap")) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/b358ee20/ambari-server/src/test/resources/stacks/OTHER/1.0/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/OTHER/1.0/widgets.json b/ambari-server/src/test/resources/stacks/OTHER/1.0/widgets.json
new file mode 100644
index 0000000..3176354
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/OTHER/1.0/widgets.json
@@ -0,0 +1,95 @@
+{
+  "layouts": [
+    {
+      "layout_name": "default_system_heatmap",
+      "display_name": "Heatmaps",
+      "section_name": "SYSTEM_HEATMAPS",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "Host Disk Space Used %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "disk_free",
+              "metric_path": "metrics/disk/disk_free",
+              "service_name": "STACK"
+            },
+            {
+              "name": "disk_total",
+              "metric_path": "metrics/disk/disk_total",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Disk Space Used %",
+              "value": "${((disk_total-disk_free)/disk_total)*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Host Memory Used %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "mem_total",
+              "metric_path": "metrics/memory/mem_total",
+              "service_name": "STACK"
+            },
+            {
+              "name": "mem_free",
+              "metric_path": "metrics/memory/mem_free",
+              "service_name": "STACK"
+            },
+            {
+              "name": "mem_cached",
+              "metric_path": "metrics/memory/mem_cached",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Memory Used %",
+              "value": "${((mem_total-mem_free-mem_cached)/mem_total)*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Host CPU Wait IO %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "cpu_wio",
+              "metric_path": "metrics/cpu/cpu_wio",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Memory Used %",
+              "value": "${cpu_wio*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        }
+      ]
+    }
+  ]
+}


[04/50] [abbrv] ambari git commit: AMBARI-22116. You should not be able to 'Ignore and Proceed' to finalize (alexantonenko)

Posted by jl...@apache.org.
AMBARI-22116. You should not be able to 'Ignore and Proceed' to finalize (alexantonenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/05c7067a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/05c7067a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/05c7067a

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 05c7067a3560eebc257213691c0a947c9626f8f5
Parents: b4c8e84
Author: Alex Antonenko <aa...@hortonworks.com>
Authored: Tue Oct 3 15:03:08 2017 +0300
Committer: Alex Antonenko <aa...@hortonworks.com>
Committed: Tue Oct 3 15:03:08 2017 +0300

----------------------------------------------------------------------
 .../stack_upgrade/stack_upgrade_wizard.hbs      |  2 +-
 .../admin/stack_upgrade/upgrade_wizard_view.js  | 12 +++++++++++
 .../stack_upgrade/upgrade_wizard_view_test.js   | 22 ++++++++++++++++++++
 3 files changed, 35 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/05c7067a/ambari-web/app/templates/main/admin/stack_upgrade/stack_upgrade_wizard.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/admin/stack_upgrade/stack_upgrade_wizard.hbs b/ambari-web/app/templates/main/admin/stack_upgrade/stack_upgrade_wizard.hbs
index 7ab43b4..332594e 100644
--- a/ambari-web/app/templates/main/admin/stack_upgrade/stack_upgrade_wizard.hbs
+++ b/ambari-web/app/templates/main/admin/stack_upgrade/stack_upgrade_wizard.hbs
@@ -123,7 +123,7 @@
                               <button
                                 class="btn btn-danger" {{bindAttr disabled="controller.requestInProgress"}} {{action confirmDowngrade view.failedItem target="controller"}}>{{t common.downgrade}}</button>
                             {{/if}}
-                            {{#if view.failedItem.skippable}}
+                            {{#if view.canSkipFailedItem}}
                               <button
                                 class="btn btn-warning" {{bindAttr disabled="controller.requestInProgress"}} {{action continue view.failedItem target="view"}}>{{t admin.stackUpgrade.dialog.continue}}</button>
                             {{/if}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/05c7067a/ambari-web/app/views/main/admin/stack_upgrade/upgrade_wizard_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/admin/stack_upgrade/upgrade_wizard_view.js b/ambari-web/app/views/main/admin/stack_upgrade/upgrade_wizard_view.js
index 89c54ce..e1689c2 100644
--- a/ambari-web/app/views/main/admin/stack_upgrade/upgrade_wizard_view.js
+++ b/ambari-web/app/views/main/admin/stack_upgrade/upgrade_wizard_view.js
@@ -118,6 +118,18 @@ App.upgradeWizardView = Em.View.extend({
   }.property('activeGroup.upgradeItems.@each.status'),
 
   /**
+   * can skip failed item or not
+   * @type {boolean}
+   */
+  canSkipFailedItem: function () {
+    var failedItem = this.get('failedItem');
+    var associatedVersion = this.get('controller.upgradeData.Upgrade.associated_version');
+    var version = associatedVersion && App.RepositoryVersion.find().findProperty('repositoryVersion', associatedVersion);
+    var isPatchOrMaint = version && ( version.get('isPatch') || version.get('isMaint') );
+    return failedItem && failedItem.get('skippable') && !isPatchOrMaint;
+  }.property('failedItem'),
+
+  /**
    * upgrade doesn't have any failed or manual or running item
    * @type {boolean}
    */

http://git-wip-us.apache.org/repos/asf/ambari/blob/05c7067a/ambari-web/test/views/main/admin/stack_upgrade/upgrade_wizard_view_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/admin/stack_upgrade/upgrade_wizard_view_test.js b/ambari-web/test/views/main/admin/stack_upgrade/upgrade_wizard_view_test.js
index a303e60..0107975 100644
--- a/ambari-web/test/views/main/admin/stack_upgrade/upgrade_wizard_view_test.js
+++ b/ambari-web/test/views/main/admin/stack_upgrade/upgrade_wizard_view_test.js
@@ -915,4 +915,26 @@ describe('App.upgradeWizardView', function () {
     });
   });
 
+  describe("#canSkipFailedItem()", function() {
+    beforeEach(function () {
+      view.reopen({'failedItem': Em.Object.create({skippable: true}) });
+      view.set('controller.upgradeData.Upgrade', {associated_version: '2.1.1'});
+    })
+    it("Should return true if can not find upgrade", function () {
+      view.propertyDidChange('canSkipFailedItem');
+      expect(view.get('canSkipFailedItem')).to.be.true
+    });
+
+    it("Should return false if upgrade is patch or maint", function () {
+      var findResult = [Em.Object.create({repositoryVersion: '2.1.1', isPatch: true})];
+      sinon.stub(App.RepositoryVersion, 'find', function(){
+        return findResult;
+      });
+      view.propertyDidChange('canSkipFailedItem');
+      expect(view.get('canSkipFailedItem')).to.be.false;
+      App.RepositoryVersion.find.restore();
+    });
+
+  });
+
 });


[35/50] [abbrv] ambari git commit: AMBARI-22148 Style changes for service summary. (atkach)

Posted by jl...@apache.org.
AMBARI-22148 Style changes for service summary. (atkach)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/3f002525
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/3f002525
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/3f002525

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 3f00252534e2dd4b08bd5d308757070fe849cba2
Parents: 0f32765
Author: Andrii Tkach <at...@apache.org>
Authored: Thu Oct 5 17:56:44 2017 +0300
Committer: Andrii Tkach <at...@apache.org>
Committed: Fri Oct 6 12:50:49 2017 +0300

----------------------------------------------------------------------
 ambari-web/app/messages.js                      |  6 ++--
 ambari-web/app/styles/application.less          |  2 +-
 .../app/templates/main/service/info/summary.hbs | 16 ++++++++-
 .../templates/main/service/services/hdfs.hbs    | 38 ++++++++++++--------
 .../app/views/main/service/info/summary.js      |  8 +++++
 .../app/views/main/service/services/hdfs.js     |  2 --
 6 files changed, 51 insertions(+), 21 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/3f002525/ambari-web/app/messages.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index d0c7e39..7cde3d1 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -357,6 +357,7 @@ Em.I18n.translations = {
   'common.repositoryType': 'Repository Type',
   'common.rolling.downgrade': 'Rolling Downgrade',
   'common.express.downgrade': 'Express Downgrade',
+  'common.views': 'Views',
 
   'models.alert_instance.tiggered.verbose': "Occurred on {0} <br> Checked on {1}",
   'models.alert_definition.triggered.verbose': "Occurred on {0}",
@@ -2961,8 +2962,9 @@ Em.I18n.translations = {
   'dashboard.services.hdfs.nodes.heapUsed':'{0} / {1}',
 
   'dashboard.services.hdfs.chart.label':'Capacity (Used/Total)',
-  'dashboard.services.hdfs.blockErrors':'{0} / {1} / {2}',
-  'dashboard.services.hdfs.blockErrorsDesc':'corrupt replica / missing / under replicated',
+  'dashboard.services.hdfs.blockErrors.corrupt': 'corrupt replica',
+  'dashboard.services.hdfs.blockErrors.replicated': 'under replicated',
+  'dashboard.services.hdfs.blockErrors.missing': 'missing',
   'dashboard.services.hdfs.datanode.status.tooltip.live': 'This is the number of DataNodes that are live as reported from ' +
     'the NameNode. Even if a DataNode process is up, NameNode might see the status as dead ' +
     'if the DataNode is not communicating with the NameNode as expected. This can be due situations ' +

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f002525/ambari-web/app/styles/application.less
----------------------------------------------------------------------
diff --git a/ambari-web/app/styles/application.less b/ambari-web/app/styles/application.less
index 8181996..7c9cc8e 100644
--- a/ambari-web/app/styles/application.less
+++ b/ambari-web/app/styles/application.less
@@ -1083,7 +1083,7 @@ a:focus {
   .col-md-3 {
     min-height: 100px;
     margin-left: 2px;
-    min-width: 150px;
+    min-width: 180px;
   }
   .summary-label {
     font-size: 12px;

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f002525/ambari-web/app/templates/main/service/info/summary.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/service/info/summary.hbs b/ambari-web/app/templates/main/service/info/summary.hbs
index b1b0a42..075cae0 100644
--- a/ambari-web/app/templates/main/service/info/summary.hbs
+++ b/ambari-web/app/templates/main/service/info/summary.hbs
@@ -57,8 +57,8 @@
           <div class="col-md-4 col-lg-4 service-alerts">
             {{#if view.hasAlertDefinitions}}
               <span {{action "showServiceAlertsPopup" controller.content target="controller"}} class="pull-right">
-                <i class="glyphicon glyphicon-bell"></i>
                 {{#if view.alertsCount}}
+                  <i class="glyphicon glyphicon-bell"></i>
                   <span {{bindAttr class=":label view.hasCriticalAlerts:alerts-crit-count:alerts-warn-count"}}>
                     {{view.alertsCount}}
                   </span>
@@ -118,6 +118,20 @@
           {{/if}}
         </div>
       {{/view}}
+      <div class="panel-heading">
+         <div class="row col-md-8 col-lg-12">
+           <h4 class="panel-title">{{t common.views}}</h4>
+         </div>
+      </div>
+      <div class="panel-body">
+       {{#if view.views.length}}
+         {{#each item in view.views}}
+           <a href="#" {{action "setView" item target="App.router.mainViewsController"}}>{{item.label}}</a>
+         {{/each}}
+       {{else}}
+         <span>{{t menu.item.views.noViews}}</span>
+       {{/if}}
+      </div>
     </div>
   </div>
   {{! widgets in the metrics panel are loaded seperately from summary page text information

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f002525/ambari-web/app/templates/main/service/services/hdfs.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/service/services/hdfs.hbs b/ambari-web/app/templates/main/service/services/hdfs.hbs
index 967239d..fec5bac 100644
--- a/ambari-web/app/templates/main/service/services/hdfs.hbs
+++ b/ambari-web/app/templates/main/service/services/hdfs.hbs
@@ -64,17 +64,18 @@
             </div>
           {{else}}
             <div class="main-info">
-              <span {{translateAttr data-original-title="dashboard.services.hdfs.datanode.status.tooltip.live" }}
-                      rel="tooltip">{{view.service.liveDataNodes.length}} </span> /
-              <span {{translateAttr data-original-title="dashboard.services.hdfs.datanode.status.tooltip.dead" }}
-                      rel="tooltip">{{view.service.deadDataNodes.length}} </span> /
-              <span {{translateAttr data-original-title="dashboard.services.hdfs.datanode.status.tooltip.decommission" }}
-                      rel="tooltip">{{view.service.decommissionDataNodes.length}} </span>
-            </div>
-            <div class="info-desc">
-              <span {{t dashboard.services.hdfs.nodes.live}} </span> /
-              <span {{t dashboard.services.hdfs.nodes.dead}} </span> /
-              <span {{t dashboard.services.hdfs.nodes.decom}} </span>
+              <p {{translateAttr data-original-title="dashboard.services.hdfs.datanode.status.tooltip.live" }}
+                      rel="tooltip">
+                 {{view.service.liveDataNodes.length}}&nbsp;{{t dashboard.services.hdfs.nodes.live}}
+              </p>
+              <p {{translateAttr data-original-title="dashboard.services.hdfs.datanode.status.tooltip.dead" }}
+                      rel="tooltip">
+                 {{view.service.deadDataNodes.length}}&nbsp;{{t dashboard.services.hdfs.nodes.dead}}
+              </p>
+              <p {{translateAttr data-original-title="dashboard.services.hdfs.datanode.status.tooltip.decommission" }}
+                      rel="tooltip">
+                {{view.service.decommissionDataNodes.length}}&nbsp;{{t dashboard.services.hdfs.nodes.decom}}
+              </p>
             </div>
           {{/if}}
         </div>
@@ -156,10 +157,17 @@
         <div class="summary-label">{{t services.service.summary.blocksTotal}}</div>
       </div>
       {{! Block Errors }}
-      <div class="row block-errors col-md-3">
-        <div class="summary-value">
-          <div class="main-info">{{view.blockErrorsMessage}}</div>
-          <div class="info-desc">{{t dashboard.services.hdfs.blockErrorsDesc}}</div>
+      <div class="row block-errors col-md-3 summary-value">
+        <div class="main-info">
+          <p>
+            {{view.dfsCorruptBlocks}}&nbsp;{{t dashboard.services.hdfs.blockErrors.corrupt}}
+          </p>
+          <p>
+            {{view.dfsMissingBlocks}}&nbsp;{{t dashboard.services.hdfs.blockErrors.missing}}
+          </p>
+          <p>
+            {{view.dfsUnderReplicatedBlocks}}&nbsp;{{t dashboard.services.hdfs.blockErrors.replicated}}
+          </p>
         </div>
         <div class="summary-label">{{t services.service.summary.blockErrors}}</div>
       </div>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f002525/ambari-web/app/views/main/service/info/summary.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/service/info/summary.js b/ambari-web/app/views/main/service/info/summary.js
index 69e339a..91b2ca3 100644
--- a/ambari-web/app/views/main/service/info/summary.js
+++ b/ambari-web/app/views/main/service/info/summary.js
@@ -61,6 +61,14 @@ App.MainServiceInfoSummaryView = Em.View.extend(App.Persist, App.TimeRangeMixin,
    *  <code>loadServiceSummary()</code>
    */
   serviceSummaryView: null,
+
+  /**
+   * @type {App.ViewInstance}
+   */
+  views: function () {
+    return App.router.get('loggedIn') ? App.router.get('mainViewsController.visibleAmbariViews') : [];
+  }.property('App.router.mainViewsController.visibleAmbariViews.[]', 'App.router.loggedIn'),
+
   /**
    * @property {Object} serviceCustomViewsMap - custom views to embed
    *

http://git-wip-us.apache.org/repos/asf/ambari/blob/3f002525/ambari-web/app/views/main/service/services/hdfs.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/service/services/hdfs.js b/ambari-web/app/views/main/service/services/hdfs.js
index e0580f1..8f6ba6f 100644
--- a/ambari-web/app/views/main/service/services/hdfs.js
+++ b/ambari-web/app/views/main/service/services/hdfs.js
@@ -111,8 +111,6 @@ App.MainDashboardServiceHdfsView = App.MainDashboardServiceView.extend({
 
   dfsUnderReplicatedBlocks: Em.computed.formatUnavailable('service.dfsUnderReplicatedBlocks'),
 
-  blockErrorsMessage: Em.computed.i18nFormat('dashboard.services.hdfs.blockErrors', 'dfsCorruptBlocks', 'dfsMissingBlocks', 'dfsUnderReplicatedBlocks'),
-
   nodeUptime: function () {
     var uptime = this.get('service.nameNodeStartTime');
     if (uptime && uptime > 0){


[03/50] [abbrv] ambari git commit: AMBARI-22113. Unit test fails with Python 2.6

Posted by jl...@apache.org.
AMBARI-22113. Unit test fails with Python 2.6


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b4c8e840
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b4c8e840
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b4c8e840

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: b4c8e8409edcd01c70c15453a03d59248382f515
Parents: 03696f1
Author: Attila Doroszlai <ad...@hortonworks.com>
Authored: Mon Oct 2 12:23:40 2017 +0200
Committer: Attila Doroszlai <ad...@hortonworks.com>
Committed: Tue Oct 3 11:48:55 2017 +0200

----------------------------------------------------------------------
 .../libraries/functions/conf_select.py          |  4 +--
 .../src/test/python/TestUpgradeSummary.py       |  6 ++--
 .../HIVE/test_jdbc_driver_config.py             | 18 +++++++----
 .../RANGER/test_db_flavor_config.py             | 17 ++++++++--
 .../RANGER_KMS/test_db_flavor_config.py         | 17 ++++++++--
 .../SQOOP/test_jdbc_driver_config.py            | 16 +++++++--
 .../stacks/2.3/common/test_stack_advisor.py     |  2 +-
 .../src/test/python/stacks/utils/RMFTestCase.py | 34 ++++++++++----------
 8 files changed, 76 insertions(+), 38 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b4c8e840/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py b/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
index 86821bf..f330f39 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
@@ -310,7 +310,7 @@ def convert_conf_directories_to_symlinks(package, version, dirs, skip_existing_l
     old_conf = dir_def['conf_dir']
     backup_dir = _get_backup_conf_directory(old_conf)
     Logger.info("Backing up {0} to {1} if destination doesn't exist already.".format(old_conf, backup_dir))
-    Execute(("cp", "-R", "-p", old_conf, backup_dir),
+    Execute(("cp", "-R", "-p", unicode(old_conf), unicode(backup_dir)),
       not_if = format("test -e {backup_dir}"), sudo = True)
 
   # we're already in the HDP stack
@@ -460,4 +460,4 @@ def _get_backup_conf_directory(old_conf):
   """
   old_parent = os.path.abspath(os.path.join(old_conf, os.pardir))
   backup_dir = os.path.join(old_parent, "conf.backup")
-  return backup_dir
\ No newline at end of file
+  return backup_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4c8e840/ambari-server/src/test/python/TestUpgradeSummary.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/TestUpgradeSummary.py b/ambari-server/src/test/python/TestUpgradeSummary.py
index 7606867..6a793e4 100644
--- a/ambari-server/src/test/python/TestUpgradeSummary.py
+++ b/ambari-server/src/test/python/TestUpgradeSummary.py
@@ -49,7 +49,7 @@ class TestUpgradeSummary(TestCase):
     self.assertEqual("2.4.0.0-1234", upgrade_summary.get_source_version("HDFS"))
     self.assertEqual("2.5.9.9-9999", upgrade_summary.get_target_version("HDFS"))
 
-    self.assertIsNone(upgrade_summary.get_downgrade_from_version("HDFS"))
+    self.assertTrue(upgrade_summary.get_downgrade_from_version("HDFS") is None)
 
 
   def test_get_downgrade_from_version(self):
@@ -60,7 +60,7 @@ class TestUpgradeSummary(TestCase):
     command_json = TestUpgradeSummary._get_cluster_simple_downgrade_json()
     Script.config = command_json
 
-    self.assertIsNone(upgrade_summary.get_downgrade_from_version("FOO"))
+    self.assertTrue(upgrade_summary.get_downgrade_from_version("FOO") is None)
     self.assertEqual("2.5.9.9-9999", upgrade_summary.get_downgrade_from_version("HDFS"))
 
 
@@ -134,4 +134,4 @@ class TestUpgradeSummary(TestCase):
         "isRevert":False,
         "orchestration":"STANDARD"
       }
-    }
\ No newline at end of file
+    }

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4c8e840/ambari-server/src/test/python/common-services/HIVE/test_jdbc_driver_config.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/common-services/HIVE/test_jdbc_driver_config.py b/ambari-server/src/test/python/common-services/HIVE/test_jdbc_driver_config.py
index e4d81b1..98072f9 100644
--- a/ambari-server/src/test/python/common-services/HIVE/test_jdbc_driver_config.py
+++ b/ambari-server/src/test/python/common-services/HIVE/test_jdbc_driver_config.py
@@ -36,13 +36,18 @@ class TestJdbcDriverConfig(RMFTestCase):
                        config_file=os.path.join(self.CONFIG_DIR, "hive_default.json"))
 
   def test_unsupported_jdbc_type_throws_error_0_12_0_2_0(self):
-    with self.assertRaises(Fail):
+    try:
       self.executeScript("HIVE/0.12.0.2.0/package/scripts/hive_server.py",
                        classname="HiveServer",
                        command="configure",
                        target=RMFTestCase.TARGET_COMMON_SERVICES,
                        stack_version=self.STACK_VERSION,
                        config_file=os.path.join(self.CONFIG_DIR, "hive_unsupported_jdbc_type.json"))
+      self.fail("Expected 'Fail', but call completed without throwing")
+    except Fail as e:
+      pass
+    except Exception as e:
+      self.fail("Expected 'Fail', got {}".format(e))
 
   def test_jdbc_type_2_1_0_3_0(self):
     self.executeScript("HIVE/2.1.0.3.0/package/scripts/hive_server.py",
@@ -53,14 +58,15 @@ class TestJdbcDriverConfig(RMFTestCase):
                        config_file=os.path.join(self.CONFIG_DIR, "hive_default.json"))
 
   def test_unsupported_jdbc_type_throws_error_2_1_0_3_0(self):
-    with self.assertRaises(Fail):
+    try:
       self.executeScript("HIVE/2.1.0.3.0/package/scripts/hive_server.py",
                          classname="HiveServer",
                          command="configure",
                          target=RMFTestCase.TARGET_COMMON_SERVICES,
                          stack_version=self.STACK_VERSION,
                          config_file=os.path.join(self.CONFIG_DIR, "hive_unsupported_jdbc_type.json"))
-
-
-
-
+      self.fail("Expected 'Fail', but call completed without throwing")
+    except Fail as e:
+      pass
+    except Exception as e:
+      self.fail("Expected 'Fail', got {}".format(e))

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4c8e840/ambari-server/src/test/python/common-services/RANGER/test_db_flavor_config.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/common-services/RANGER/test_db_flavor_config.py b/ambari-server/src/test/python/common-services/RANGER/test_db_flavor_config.py
index 568e3fd..ac626d3 100644
--- a/ambari-server/src/test/python/common-services/RANGER/test_db_flavor_config.py
+++ b/ambari-server/src/test/python/common-services/RANGER/test_db_flavor_config.py
@@ -37,13 +37,19 @@ class TestDbFlavorConfig(RMFTestCase):
                        config_file=os.path.join(self.CONFIG_DIR, "ranger_admin_default.json"))
 
   def test_unsupported_db_flavor_0_4_0(self):
-    with self.assertRaises(Fail):
+    try:
       self.executeScript("RANGER/0.4.0/package/scripts/ranger_admin.py",
                        classname="RangerAdmin",
                        command="configure",
                        target=RMFTestCase.TARGET_COMMON_SERVICES,
                        stack_version=self.STACK_VERSION,
                        config_file=os.path.join(self.CONFIG_DIR, "ranger_admin_unsupported_db_flavor.json"))
+      self.fail("Expected 'Fail', but call completed without throwing")
+    except Fail as e:
+      pass
+    except Exception as e:
+      self.fail("Expected 'Fail', got {}".format(e))
+
 
   def test_db_flavor_1_0_0_3_0(self):
     self.executeScript("RANGER/1.0.0.3.0/package/scripts/ranger_admin.py",
@@ -54,10 +60,15 @@ class TestDbFlavorConfig(RMFTestCase):
                        config_file=os.path.join(self.CONFIG_DIR, "ranger_admin_default.json"))
 
   def test_unsupported_db_flavor_1_0_0_3_0(self):
-    with self.assertRaises(Fail):
+    try:
       self.executeScript("RANGER/1.0.0.3.0/package/scripts/ranger_admin.py",
                          classname="RangerAdmin",
                          command="configure",
                          target=RMFTestCase.TARGET_COMMON_SERVICES,
                          stack_version=self.STACK_VERSION,
-                         config_file=os.path.join(self.CONFIG_DIR, "ranger_admin_unsupported_db_flavor.json"))
\ No newline at end of file
+                         config_file=os.path.join(self.CONFIG_DIR, "ranger_admin_unsupported_db_flavor.json"))
+      self.fail("Expected 'Fail', but call completed without throwing")
+    except Fail as e:
+      pass
+    except Exception as e:
+      self.fail("Expected 'Fail', got {}".format(e))

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4c8e840/ambari-server/src/test/python/common-services/RANGER_KMS/test_db_flavor_config.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/common-services/RANGER_KMS/test_db_flavor_config.py b/ambari-server/src/test/python/common-services/RANGER_KMS/test_db_flavor_config.py
index 48654ee..16d271c 100644
--- a/ambari-server/src/test/python/common-services/RANGER_KMS/test_db_flavor_config.py
+++ b/ambari-server/src/test/python/common-services/RANGER_KMS/test_db_flavor_config.py
@@ -37,13 +37,19 @@ class TestDbFlavorConfig(RMFTestCase):
                        config_file=os.path.join(self.CONFIG_DIR, "ranger_kms_default.json"))
 
   def test_unsupported_db_flavor_0_5_0_2_3(self):
-    with self.assertRaises(Fail):
+    try:
       self.executeScript("RANGER_KMS/0.5.0.2.3/package/scripts/kms_server.py",
                        classname="KmsServer",
                        command="configure",
                        target=RMFTestCase.TARGET_COMMON_SERVICES,
                        stack_version=self.STACK_VERSION,
                        config_file=os.path.join(self.CONFIG_DIR, "ranger_kms_unsupported_db_flavor.json"))
+      self.fail("Expected 'Fail', but call completed without throwing")
+    except Fail as e:
+      pass
+    except Exception as e:
+      self.fail("Expected 'Fail', got {}".format(e))
+
 
   def test_db_flavor_1_0_0_3_0(self):
     self.executeScript("RANGER_KMS/1.0.0.3.0/package/scripts/kms_server.py",
@@ -54,10 +60,15 @@ class TestDbFlavorConfig(RMFTestCase):
                        config_file=os.path.join(self.CONFIG_DIR, "ranger_kms_default.json"))
 
   def test_unsupported_db_flavor_1_0_0_3_0(self):
-    with self.assertRaises(Fail):
+    try:
       self.executeScript("RANGER_KMS/1.0.0.3.0/package/scripts/kms_server.py",
                          classname="KmsServer",
                          command="configure",
                          target=RMFTestCase.TARGET_COMMON_SERVICES,
                          stack_version=self.STACK_VERSION,
-                         config_file=os.path.join(self.CONFIG_DIR, "ranger_kms_unsupported_db_flavor.json"))
\ No newline at end of file
+                         config_file=os.path.join(self.CONFIG_DIR, "ranger_kms_unsupported_db_flavor.json"))
+      self.fail("Expected 'Fail', but call completed without throwing")
+    except Fail as e:
+      pass
+    except Exception as e:
+      self.fail("Expected 'Fail', got {}".format(e))

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4c8e840/ambari-server/src/test/python/common-services/SQOOP/test_jdbc_driver_config.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/common-services/SQOOP/test_jdbc_driver_config.py b/ambari-server/src/test/python/common-services/SQOOP/test_jdbc_driver_config.py
index 7bb809a..066d526 100644
--- a/ambari-server/src/test/python/common-services/SQOOP/test_jdbc_driver_config.py
+++ b/ambari-server/src/test/python/common-services/SQOOP/test_jdbc_driver_config.py
@@ -37,13 +37,18 @@ class TestJdbcDriverConfig(RMFTestCase):
                        config_file=os.path.join(self.CONFIG_DIR, "sqoop_default.json"))
 
   def test_unsupported_jdbc_driver_1_4_4_2_0(self):
-    with self.assertRaises(Fail):
+    try:
       self.executeScript("SQOOP/1.4.4.2.0/package/scripts/sqoop_client.py",
                        classname="SqoopClient",
                        command="configure",
                        target=RMFTestCase.TARGET_COMMON_SERVICES,
                        stack_version=self.STACK_VERSION,
                        config_file=os.path.join(self.CONFIG_DIR, "sqoop_unsupported_jdbc_driver.json"))
+      self.fail("Expected 'Fail', but call completed without throwing")
+    except Fail as e:
+      pass
+    except Exception as e:
+      self.fail("Expected 'Fail', got {}".format(e))
 
   def test_jdbc_driver_1_4_4_3_0(self):
     self.executeScript("SQOOP/1.4.4.3.0/package/scripts/sqoop_client.py",
@@ -54,10 +59,15 @@ class TestJdbcDriverConfig(RMFTestCase):
                        config_file=os.path.join(self.CONFIG_DIR, "sqoop_default.json"))
 
   def test_unsupported_jdbc_driver_1_4_4_3_0(self):
-    with self.assertRaises(Fail):
+    try:
       self.executeScript("SQOOP/1.4.4.3.0/package/scripts/sqoop_client.py",
                          classname="SqoopClient",
                          command="configure",
                          target=RMFTestCase.TARGET_COMMON_SERVICES,
                          stack_version=self.STACK_VERSION,
-                         config_file=os.path.join(self.CONFIG_DIR, "sqoop_unsupported_jdbc_driver.json"))
\ No newline at end of file
+                         config_file=os.path.join(self.CONFIG_DIR, "sqoop_unsupported_jdbc_driver.json"))
+      self.fail("Expected 'Fail', but call completed without throwing")
+    except Fail as e:
+      pass
+    except Exception as e:
+      self.fail("Expected 'Fail', got {}".format(e))

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4c8e840/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
index 0cc9ad0..2112fa0 100644
--- a/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
@@ -28,7 +28,7 @@ class TestHDP23StackAdvisor(TestCase):
   def setUp(self):
     import imp
     self.maxDiff = None
-    unittest.util._MAX_LENGTH=2000
+    if 'util' in dir(unittest): unittest.util._MAX_LENGTH=2000
     self.testDirectory = os.path.dirname(os.path.abspath(__file__))
     stackAdvisorPath = os.path.join(self.testDirectory, '../../../../../main/resources/stacks/stack_advisor.py')
     hdp206StackAdvisorPath = os.path.join(self.testDirectory, '../../../../../main/resources/stacks/HDP/2.0.6/services/stack_advisor.py')

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4c8e840/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
index 291f22b..81ac262 100644
--- a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
+++ b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
@@ -91,7 +91,7 @@ class RMFTestCase(TestCase):
     elif config_dict is not None and config_file is None:
       self.config_dict = config_dict
     else:
-      raise RuntimeError("Please specify either config_file_path or config_dict parameter")
+      raise RuntimeError("Please specify either config_file or config_dict parameter")
 
     # add the stack tools & features from the stack if the test case's JSON file didn't have them
     if "stack_tools" not in self.config_dict["configurations"]["cluster-env"]:
@@ -139,22 +139,22 @@ class RMFTestCase(TestCase):
     if 'status_params' in sys.modules:
       del(sys.modules["status_params"])
 
-    with Environment(basedir, test_mode=True) as RMFTestCase.env,\
-        patch('resource_management.core.shell.checked_call', side_effect=checked_call_mocks) as mocks_dict['checked_call'],\
-        patch('resource_management.core.shell.call', side_effect=call_mocks) as mocks_dict['call'],\
-        patch.object(Script, 'get_config', return_value=self.config_dict) as mocks_dict['get_config'],\
-        patch.object(Script, 'get_tmp_dir', return_value="/tmp") as mocks_dict['get_tmp_dir'],\
-        patch.object(Script, 'post_start') as mocks_dict['post_start'],\
-        patch('resource_management.libraries.functions.get_kinit_path', return_value=kinit_path_local) as mocks_dict['get_kinit_path'],\
-        patch.object(platform, 'linux_distribution', return_value=os_type) as mocks_dict['linux_distribution'],\
-        patch('resource_management.libraries.functions.stack_select.is_package_supported', return_value=True),\
-        patch('resource_management.libraries.functions.stack_select.get_supported_packages', return_value=MagicMock()),\
-        patch.object(os, "environ", new=os_env) as mocks_dict['environ']:
-      if not try_install:
-        with patch.object(Script, 'install_packages') as install_mock_value:
-          method(RMFTestCase.env, *command_args)
-      else:
-        method(RMFTestCase.env, *command_args)
+    with Environment(basedir, test_mode=True) as RMFTestCase.env:
+      with patch('resource_management.core.shell.checked_call', side_effect=checked_call_mocks) as mocks_dict['checked_call']:
+        with patch('resource_management.core.shell.call', side_effect=call_mocks) as mocks_dict['call']:
+          with patch.object(Script, 'get_config', return_value=self.config_dict) as mocks_dict['get_config']:
+            with patch.object(Script, 'get_tmp_dir', return_value="/tmp") as mocks_dict['get_tmp_dir']:
+              with patch.object(Script, 'post_start') as mocks_dict['post_start']:
+                with patch('resource_management.libraries.functions.get_kinit_path', return_value=kinit_path_local) as mocks_dict['get_kinit_path']:
+                  with patch.object(platform, 'linux_distribution', return_value=os_type) as mocks_dict['linux_distribution']:
+                    with patch('resource_management.libraries.functions.stack_select.is_package_supported', return_value=True):
+                      with patch('resource_management.libraries.functions.stack_select.get_supported_packages', return_value=MagicMock()):
+                        with patch.object(os, "environ", new=os_env) as mocks_dict['environ']:
+                          if not try_install:
+                            with patch.object(Script, 'install_packages') as install_mock_value:
+                              method(RMFTestCase.env, *command_args)
+                          else:
+                            method(RMFTestCase.env, *command_args)
 
     sys.path.remove(scriptsdir)
 


[48/50] [abbrv] ambari git commit: AMBARI-21776. Move druid version to druid 0.10.1 and drop TP flag. (Slim Bouguerra via Swapan Shridhar).

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/params.py b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/params.py
new file mode 100644
index 0000000..fd1cde6
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/params.py
@@ -0,0 +1,200 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from ambari_commons import OSCheck
+from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
+from resource_management.libraries.functions.default import default
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+
+import status_params
+
+# a map of the Ambari role to the component name
+# for use with <stack-root>/current/<component>
+SERVER_ROLE_DIRECTORY_MAP = {
+  'DRUID_BROKER': 'druid-broker',
+  'DRUID_COORDINATOR': 'druid-coordinator',
+  'DRUID_HISTORICAL': 'druid-historical',
+  'DRUID_MIDDLEMANAGER': 'druid-middlemanager',
+  'DRUID_OVERLORD': 'druid-overlord',
+  'DRUID_ROUTER': 'druid-router'
+}
+
+# server configurations
+config = Script.get_config()
+stack_root = Script.get_stack_root()
+tmp_dir = Script.get_tmp_dir()
+
+stack_name = default("/hostLevelParams/stack_name", None)
+
+# stack version
+stack_version = default("/commandParams/version", None)
+
+# un-formatted stack version
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+
+# default role to coordinator needed for service checks
+component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "DRUID_COORDINATOR")
+
+hostname = config['hostname']
+sudo = AMBARI_SUDO_BINARY
+
+# default druid parameters
+druid_home = format("{stack_root}/current/{component_directory}")
+druid_conf_dir = format("{stack_root}/current/{component_directory}/conf")
+
+druid_common_conf_dir = druid_conf_dir + "/_common"
+druid_coordinator_conf_dir = druid_conf_dir + "/coordinator"
+druid_overlord_conf_dir = druid_conf_dir + "/overlord"
+druid_broker_conf_dir = druid_conf_dir + "/broker"
+druid_historical_conf_dir = druid_conf_dir + "/historical"
+druid_middlemanager_conf_dir = druid_conf_dir + "/middleManager"
+druid_router_conf_dir = druid_conf_dir + "/router"
+druid_extensions_dir = druid_home + "/extensions"
+druid_hadoop_dependencies_dir = druid_home + "/hadoop-dependencies"
+druid_segment_infoDir = config['configurations']['druid-historical']['druid.segmentCache.infoDir']
+druid_segment_cache_locations = config['configurations']['druid-historical']['druid.segmentCache.locations']
+druid_tasks_dir = config['configurations']['druid-middlemanager']['druid.indexer.task.baseTaskDir']
+druid_user = config['configurations']['druid-env']['druid_user']
+druid_log_dir = config['configurations']['druid-env']['druid_log_dir']
+druid_classpath = config['configurations']['druid-env']['druid_classpath']
+druid_extensions = config['configurations']['druid-common']['druid.extensions.pullList']
+druid_repo_list = config['configurations']['druid-common']['druid.extensions.repositoryList']
+druid_extensions_load_list = config['configurations']['druid-common']['druid.extensions.loadList']
+druid_security_extensions_load_list = config['configurations']['druid-common']['druid.security.extensions.loadList']
+
+
+# status params
+druid_pid_dir = status_params.druid_pid_dir
+user_group = config['configurations']['cluster-env']['user_group']
+java8_home = config['hostLevelParams']['java_home']
+druid_env_sh_template = config['configurations']['druid-env']['content']
+
+# log4j params
+log4j_props = config['configurations']['druid-log4j']['content']
+druid_log_level = config['configurations']['druid-log4j']['druid_log_level']
+metamx_log_level = config['configurations']['druid-log4j']['metamx_log_level']
+root_log_level = config['configurations']['druid-log4j']['root_log_level']
+
+druid_log_maxbackupindex = default('/configurations/druid-logrotate/druid_log_maxbackupindex', 7)
+druid_log_maxfilesize = default('/configurations/druid-logrotate/druid_log_maxfilesize', 256)
+logrotate_props = config['configurations']['druid-logrotate']['content']
+
+# Metadata storage
+metadata_storage_user = config['configurations']['druid-common']['druid.metadata.storage.connector.user']
+metadata_storage_password = config['configurations']['druid-common']['druid.metadata.storage.connector.password']
+metadata_storage_db_name = config['configurations']['druid-common']['database_name']
+metadata_storage_db_name = config['configurations']['druid-common']['database_name']
+metadata_storage_type = config['configurations']['druid-common']['druid.metadata.storage.type']
+metadata_storage_url = config['configurations']['druid-common']['druid.metadata.storage.connector.connectURI']
+jdk_location = config['hostLevelParams']['jdk_location']
+if 'mysql' == metadata_storage_type:
+  jdbc_driver_jar = default("/hostLevelParams/custom_mysql_jdbc_name", None)
+  connector_curl_source = format("{jdk_location}/{jdbc_driver_jar}")
+  connector_download_dir=format("{druid_extensions_dir}/mysql-metadata-storage")
+  downloaded_custom_connector = format("{tmp_dir}/{jdbc_driver_jar}")
+
+check_db_connection_jar_name = "DBConnectionVerification.jar"
+check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
+
+# HDFS
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', 'missing_principal').replace("_HOST",
+                                                                                                             hostname)
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+dfs_type = default("/commandParams/dfs_type", "")
+hdfs_tmp_dir = config['configurations']['hadoop-env']['hdfs_tmp_dir']
+
+# Kerberos
+druid_principal_name = default('/configurations/druid-common/druid.hadoop.security.kerberos.principal',
+                               'missing_principal')
+druid_user_keytab = default('/configurations/druid-common/druid.hadoop.security.kerberos.keytab', 'missing_keytab')
+
+import functools
+
+# create partial functions with common arguments for every HdfsResource call
+# to create hdfs directory we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  hdfs_resource_ignore_file="/var/lib/ambari-agent/data/.hdfs_resource_ignore",
+  security_enabled=security_enabled,
+  keytab=hdfs_user_keytab,
+  kinit_path_local=kinit_path_local,
+  hadoop_bin_dir=hadoop_bin_dir,
+  hadoop_conf_dir=hadoop_conf_dir,
+  principal_name=hdfs_principal_name,
+  hdfs_site=hdfs_site,
+  default_fs=default_fs,
+  immutable_paths=get_not_managed_resources(),
+  dfs_type=dfs_type
+)
+
+# Ambari Metrics
+metric_emitter_type = "noop"
+metric_collector_host = ""
+metric_collector_port = ""
+metric_collector_protocol = ""
+metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
+metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
+metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
+
+ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
+has_metric_collector = not len(ams_collector_hosts) == 0
+
+if has_metric_collector:
+    metric_emitter_type = "ambari-metrics"
+    if 'cluster-env' in config['configurations'] and \
+                    'metrics_collector_vip_host' in config['configurations']['cluster-env']:
+        metric_collector_host = config['configurations']['cluster-env']['metrics_collector_vip_host']
+    else:
+        metric_collector_host = ams_collector_hosts[0]
+    if 'cluster-env' in config['configurations'] and \
+                    'metrics_collector_vip_port' in config['configurations']['cluster-env']:
+        metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
+    else:
+        metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "localhost:6188")
+        if metric_collector_web_address.find(':') != -1:
+            metric_collector_port = metric_collector_web_address.split(':')[1]
+        else:
+            metric_collector_port = '6188'
+    if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
+        metric_collector_protocol = 'https'
+    else:
+        metric_collector_protocol = 'http'
+    pass
+
+# Create current Hadoop Clients  Libs
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
+lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
+lzo_packages = get_lzo_packages(stack_version_unformatted)
+hadoop_lib_home = stack_root + '/' + stack_version + '/hadoop/lib'

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/router.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/router.py b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/router.py
new file mode 100644
index 0000000..1731a2a
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/router.py
@@ -0,0 +1,28 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from druid_node import DruidBase
+
+
+class DruidRouter(DruidBase):
+  def __init__(self):
+    DruidBase.__init__(self, nodeType="router")
+
+
+if __name__ == "__main__":
+  DruidRouter().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/service_check.py
new file mode 100644
index 0000000..139b727
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/service_check.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.format import format
+from resource_management.core.resources.system import Execute
+
+
+class ServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    self.checkComponent(params, "druid_coordinator", "druid-coordinator")
+    self.checkComponent(params, "druid_overlord", "druid-overlord")
+
+  def checkComponent(self, params, component_name, config_name):
+    component_port = params.config['configurations'][format('{config_name}')]['druid.port']
+    for component_host in params.config['clusterHostInfo'][format('{component_name}_hosts')]:
+      Execute(format(
+        "curl -s -o /dev/null -w'%{{http_code}}' --negotiate -u: -k {component_host}:{component_port}/status | grep 200"),
+        tries=10,
+        try_sleep=3,
+        logoutput=True)
+
+
+if __name__ == "__main__":
+  ServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/status_params.py
new file mode 100644
index 0000000..ee1d61c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/package/scripts/status_params.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.libraries.script.script import Script
+
+config = Script.get_config()
+
+druid_pid_dir = config['configurations']['druid-env']['druid_pid_dir']

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/quicklinks/quicklinks.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/quicklinks/quicklinks.json b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/quicklinks/quicklinks.json
new file mode 100644
index 0000000..c68b9b9
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/quicklinks/quicklinks.json
@@ -0,0 +1,37 @@
+{
+  "name": "default",
+  "description": "default quick links configuration",
+  "configuration": {
+    "protocol": {
+      "type": "HTTP_ONLY"
+    },
+    "links": [
+      {
+        "name": "coordinator_console",
+        "label": "Druid Coordinator Console",
+        "component_name": "DRUID_COORDINATOR",
+        "requires_user_name": "false",
+        "url": "%@://%@:%@",
+        "port": {
+          "http_property": "druid.port",
+          "http_default_port": "8081",
+          "regex": "^(\\d+)$",
+          "site": "druid-coordinator"
+        }
+      },
+      {
+        "name": "overlord_console",
+        "label": "Druid Overlord Console",
+        "component_name": "DRUID_OVERLORD",
+        "requires_user_name": "false",
+        "url": "%@://%@:%@",
+        "port": {
+          "http_property": "druid.port",
+          "http_default_port": "8090",
+          "regex": "^(\\d+)$",
+          "site": "druid-overlord"
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/role_command_order.json b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/role_command_order.json
new file mode 100644
index 0000000..4d697fe
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/role_command_order.json
@@ -0,0 +1,17 @@
+{
+  "general_deps" : {
+    "_comment" : "dependencies for Druid",
+    "DRUID_HISTORICAL-START" : ["ZOOKEEPER_SERVER-START", "NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START"],
+    "DRUID_OVERLORD-START" : ["ZOOKEEPER_SERVER-START", "NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START"],
+    "DRUID_MIDDLEMANAGER-START" : ["ZOOKEEPER_SERVER-START", "NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START"],
+    "DRUID_BROKER-START" : ["ZOOKEEPER_SERVER-START", "NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START"],
+    "DRUID_ROUTER-START" : ["ZOOKEEPER_SERVER-START", "NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START"],
+    "DRUID_COORDINATOR-START" : ["ZOOKEEPER_SERVER-START", "NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START"],
+    "DRUID_OVERLORD-RESTART" : ["DRUID_HISTORICAL-RESTART"],
+    "DRUID_MIDDLEMANAGER-RESTART" : ["DRUID_OVERLORD-RESTART"],
+    "DRUID_BROKER-RESTART" : ["DRUID_MIDDLEMANAGER-RESTART"],
+    "DRUID_ROUTER-RESTART" : ["DRUID_BROKER-RESTART"],
+    "DRUID_COORDINATOR-RESTART" : ["DRUID_ROUTER-RESTART"],
+    "DRUID_SERVICE_CHECK-SERVICE_CHECK" : ["DRUID_HISTORICAL-START", "DRUID_COORDINATOR-START", "DRUID_OVERLORD-START", "DRUID_MIDDLEMANAGER-START", "DRUID_BROKER-START", "DRUID_ROUTER-START"]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.10.1/themes/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.10.1/themes/theme.json b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/themes/theme.json
new file mode 100644
index 0000000..7033e19
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.10.1/themes/theme.json
@@ -0,0 +1,120 @@
+{
+  "name": "default",
+  "description": "Default theme for Druid service",
+  "configuration": {
+    "layouts": [
+      {
+        "name": "default",
+        "tabs": [
+          {
+            "name": "metadata_storage",
+            "display-name": "META DATA STORAGE CONFIG",
+            "layout": {
+              "tab-columns": "",
+              "tab-rows": "1",
+              "sections": [
+                {
+                  "name": "section-metadata-storage",
+                  "display-name": "",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "2",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-metadata-storage-row1-col1",
+                      "display-name": "DRUID META DATA STORAGE",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "druid-common/database_name",
+          "subsection-name": "subsection-metadata-storage-row1-col1"
+        },
+        {
+          "config": "druid-common/druid.metadata.storage.type",
+          "subsection-name": "subsection-metadata-storage-row1-col1"
+        },
+        {
+          "config": "druid-common/druid.metadata.storage.connector.user",
+          "subsection-name": "subsection-metadata-storage-row1-col1"
+        },
+        {
+          "config": "druid-common/druid.metadata.storage.connector.password",
+          "subsection-name": "subsection-metadata-storage-row1-col1"
+        },
+        {
+          "config": "druid-common/metastore_hostname",
+          "subsection-name": "subsection-metadata-storage-row1-col1"
+        },
+        {
+          "config": "druid-common/druid.metadata.storage.connector.port",
+          "subsection-name": "subsection-metadata-storage-row1-col1"
+        },
+        {
+          "config": "druid-common/druid.metadata.storage.connector.connectURI",
+          "subsection-name": "subsection-metadata-storage-row1-col1"
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "druid-common/database_name",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "druid-common/druid.metadata.storage.type",
+        "widget": {
+          "type": "combo"
+        }
+      },
+      {
+        "config": "druid-common/druid.metadata.storage.connector.user",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "druid-common/druid.metadata.storage.connector.password",
+        "widget": {
+          "type": "password"
+        }
+      },
+      {
+        "config": "druid-common/metastore_hostname",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "druid-common/druid.metadata.storage.connector.port",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "druid-common/druid.metadata.storage.connector.connectURI",
+        "widget": {
+          "type": "text-field"
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-broker.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-broker.xml b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-broker.xml
deleted file mode 100644
index 6146ca3..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-broker.xml
+++ /dev/null
@@ -1,106 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>druid.service</name>
-    <value>druid/broker</value>
-    <description>The druid.service name of broker node.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.port</name>
-    <value>8082</value>
-    <description>The port on which the broker will accept connections.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.broker.http.numConnections</name>
-    <value>20</value>
-    <description>Size of connection pool for the Broker to connect to historical and real-time nodes. If there are more
-      queries than this number that all need to speak to the same node, then they will queue up.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.server.http.numThreads</name>
-    <value>50</value>
-    <description>Number of threads for HTTP requests.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.processing.buffer.sizeBytes</name>
-    <value>1073741824</value>
-    <value-attributes>
-      <type>long</type>
-      <minimum>0</minimum>
-      <unit>Bytes</unit>
-    </value-attributes>
-    <description>This specifies a buffer size for the storage of intermediate results. The computation engine in both
-      the Historical and Realtime nodes will use a scratch buffer of this size to do all of their intermediate
-      computations
-      off-heap. Larger values allow for more aggregations in a single pass over the data while smaller values can
-      require more passes depending on the query that is being executed.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.processing.numThreads</name>
-    <value>2</value>
-    <description>The number of processing threads to have available for parallel processing of segments.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.processing.numMergeBuffers</name>
-    <value>2</value>
-    <description>The number of direct memory buffers available for merging query results. The buffers are sized by druid.processing.buffer.sizeBytes.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.broker.cache.useCache</name>
-    <value>true</value>
-    <description>Enable the cache on the broker.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.broker.cache.populateCache</name>
-    <value>true</value>
-    <description>Populate the cache on the broker.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.cache.type</name>
-    <value>local</value>
-    <description>The type of cache to use for queries.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.cache.sizeInBytes</name>
-    <value>10000000</value>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <unit>Bytes</unit>
-    </value-attributes>
-    <description>Maximum cache size in bytes. Zero disables caching.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-common.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-common.xml b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-common.xml
deleted file mode 100644
index d3b53cd..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-common.xml
+++ /dev/null
@@ -1,270 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>druid.extensions.pullList</name>
-    <value>[]</value>
-    <description>A comma-separated list of one or more druid extensions to download from maven.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.extensions.repositoryList</name>
-    <value>[]</value>
-    <description>A comma-separated list of maven repositories to download extensions.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.extensions.loadList</name>
-    <value>["druid-datasketches"]
-    </value>
-    <depends-on>
-      <property>
-        <type>druid-common</type>
-        <name>druid.metadata.storage.type</name>
-      </property>
-    </depends-on>
-    <description>A comma-separated list of one or more druid extensions to load.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.security.extensions.loadList</name>
-    <value>[]</value>
-    <description>A comma-separated list of one or more druid security extensions to load. This property will be set via the kerberos wizard and User will not be allowed to modify this when security is enabled.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.zk.service.host</name>
-    <value>localhost:2181</value>
-    <description>
-      zookeeper connection string.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.zk.paths.base</name>
-    <value>/druid</value>
-    <description>
-      Base Zookeeper path
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.discovery.curator.path</name>
-    <value>/druid/discovery</value>
-    <description>
-      Services announce themselves under this ZooKeeper path.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.storage.type</name>
-    <value></value>
-    <description>
-      Choices:local, noop, s3, hdfs, c*. The type of deep storage to use.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.storage.storageDirectory</name>
-    <value></value>
-    <description>
-      directory to use as deep storage.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.metadata.storage.connector.password</name>
-    <value></value>
-    <property-type>PASSWORD</property-type>
-    <display-name>Metadata storage password</display-name>
-    <description>Password for the metadata storage data base.</description>
-    <value-attributes>
-      <type>password</type>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.metadata.storage.connector.user</name>
-    <value>druid</value>
-    <display-name>Metadata storage user</display-name>
-    <description>Metadata storage user</description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.metadata.storage.connector.port</name>
-    <value>1527</value>
-    <display-name>Metadata storage port</display-name>
-    <description>Metadata storage port</description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>druid-common</type>
-        <name>druid.metadata.storage.type</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>database_name</name>
-    <value>druid</value>
-    <display-name>Druid Metadata storage database name</display-name>
-    <description>Druid Metadata storage database name</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>metastore_hostname</name>
-    <value>localhost</value>
-    <display-name>Metadata storage hostname</display-name>
-    <description>Metadata storage hostname</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property require-input="true">
-    <name>druid.metadata.storage.type</name>
-    <display-name>Druid Metadata storage type</display-name>
-    <value>derby</value>
-    <value-attributes>
-      <overridable>false</overridable>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>mysql</value>
-          <label>MYSQL</label>
-        </entry>
-        <entry>
-          <value>derby</value>
-          <label>DERBY</label>
-        </entry>
-        <entry>
-          <value>postgresql</value>
-          <label>POSTGRESQL</label>
-        </entry>
-      </entries>
-    </value-attributes>
-    <description>Type of the metadata storage. Note that derby will work only if all the druid node are located
-      within the same node. Use mysql or postgres for distributed mode.
-      mysql installed by ambari is only for development and not suitable for production use cases due to it being not HA
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property require-input="true">
-    <name>druid.metadata.storage.connector.connectURI</name>
-    <value>jdbc:derby://localhost:1527/druid;create=true</value>
-    <display-name>Metadata storage connector url</display-name>
-    <description>Metadata storage connector url</description>
-    <on-ambari-upgrade add="false"/>
-    <depends-on>
-      <property>
-        <type>druid-common</type>
-        <name>database_name</name>
-      </property>
-      <property>
-        <type>druid-common</type>
-        <name>metastore_hostname</name>
-      </property>
-      <property>
-        <type>druid-common</type>
-        <name>druid.metadata.storage.type</name>
-      </property>
-      <property>
-        <type>druid-common</type>
-        <name>druid.metadata.storage.connector.port</name>
-      </property>
-    </depends-on>
-  </property>
-  <property>
-    <name>druid.hadoop.security.kerberos.principal</name>
-    <display-name>kerberos principal</display-name>
-    <description>Kerberos principal e.g druid@EXAMPLE.COM</description>
-    <property-type>KERBEROS_PRINCIPAL</property-type>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.hadoop.security.kerberos.keytab</name>
-    <display-name>Kerberos keytab location</display-name>
-    <description>Kerberos keytab location</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <property>
-    <name>druid.emitter</name>
-    <value>{{metric_emitter_type}}</value>
-    <description>Emitter used to emit metrics. Values - "noop", "logging", "ambari-metrics", or "http" will initialize
-      one of the emitter modules.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.emitter.ambari-metrics.hostname</name>
-    <value>{{metric_collector_host}}</value>
-    <description>Timeline host</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.emitter.ambari-metrics.port</name>
-    <value>{{metric_collector_port}}</value>
-    <description>Timeline port</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.emitter.ambari-metrics.protocol</name>
-    <value>{{metric_collector_protocol}}</value>
-    <description>Timeline protocol(http or https)</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.emitter.ambari-metrics.trustStorePath</name>
-    <value>{{metric_truststore_path}}</value>
-    <description>Location of the trust store file.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.emitter.ambari-metrics.trustStoreType</name>
-    <value>{{metric_truststore_type}}</value>
-    <description>Optional. Default value is "jks".</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.emitter.ambari-metrics.trustStorePassword</name>
-    <value>{{metric_truststore_password}}</value>
-    <description>Password to open the trust store file.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.emitter.ambari-metrics.eventConverter</name>
-    <value>{"type":"whiteList"}</value>
-    <description>Password to open the trust store file.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.monitoring.monitors</name>
-    <value>["com.metamx.metrics.JvmMonitor"]</value>
-    <description>Password to open the trust store file.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-coordinator.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-coordinator.xml b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-coordinator.xml
deleted file mode 100644
index 618f11d..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-coordinator.xml
+++ /dev/null
@@ -1,43 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>druid.service</name>
-    <value>druid/coordinator</value>
-    <description>The druid.service name of coordinator node.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.port</name>
-    <value>8081</value>
-    <description>The port on which the coordinator will accept connections.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.coordinator.merge.on</name>
-    <value>false</value>
-    <description>Boolean flag for whether or not the coordinator should try and merge small segments into a more optimal
-      segment size.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-env.xml b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-env.xml
deleted file mode 100644
index 2e96f6a..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-env.xml
+++ /dev/null
@@ -1,248 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <!--Heap Settings -->
-  <property>
-    <name>druid.broker.jvm.heap.memory</name>
-    <value>2048</value>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>65536</maximum>
-      <unit>MB</unit>
-      <increment-step>256</increment-step>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.coordinator.jvm.heap.memory</name>
-    <value>512</value>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>65536</maximum>
-      <unit>MB</unit>
-      <increment-step>256</increment-step>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.middlemanager.jvm.heap.memory</name>
-    <value>256</value>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>65536</maximum>
-      <unit>MB</unit>
-      <increment-step>256</increment-step>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.historical.jvm.heap.memory</name>
-    <value>2048</value>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>65536</maximum>
-      <unit>MB</unit>
-      <increment-step>256</increment-step>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.overlord.jvm.heap.memory</name>
-    <value>512</value>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>65536</maximum>
-      <unit>MB</unit>
-      <increment-step>256</increment-step>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.router.jvm.heap.memory</name>
-    <value>512</value>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>65536</maximum>
-      <unit>MB</unit>
-      <increment-step>256</increment-step>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <!-- DirectMemorySettings -->
-  <property>
-    <name>druid.broker.jvm.direct.memory</name>
-    <value>1048576</value>
-    <depends-on>
-      <property>
-        <type>druid-broker</type>
-        <name>druid.processing.buffer.sizeBytes</name>
-      </property>
-      <property>
-        <type>druid-broker</type>
-        <name>druid.processing.numThreads</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.coordinator.jvm.direct.memory</name>
-    <value>1048576</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.middlemanager.jvm.direct.memory</name>
-    <value>1048576</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.historical.jvm.direct.memory</name>
-    <value>1048576</value>
-    <depends-on>
-      <property>
-        <type>druid-historical</type>
-        <name>druid.processing.buffer.sizeBytes</name>
-      </property>
-      <property>
-        <type>druid-historical</type>
-        <name>druid.processing.numThreads</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.overlord.jvm.direct.memory</name>
-    <value>1048576</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.router.jvm.direct.memory</name>
-    <value>1048576</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <!-- JavaOpts Tune GC related configs here-->
-  <property>
-    <name>druid.broker.jvm.opts</name>
-    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.coordinator.jvm.opts</name>
-    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.middlemanager.jvm.opts</name>
-    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.historical.jvm.opts</name>
-    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.overlord.jvm.opts</name>
-    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.router.jvm.opts</name>
-    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid_user</name>
-    <display-name>Druid User</display-name>
-    <value>druid</value>
-    <property-type>USER</property-type>
-    <description></description>
-    <value-attributes>
-      <type>user</type>
-      <overridable>false</overridable>
-      <user-groups>
-        <property>
-          <type>cluster-env</type>
-          <name>user_group</name>
-        </property>
-      </user-groups>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid_log_dir</name>
-    <value>/var/log/druid</value>
-    <description></description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid_pid_dir</name>
-    <value>/var/run/druid</value>
-    <display-name>Druid PID dir</display-name>
-    <description></description>
-    <value-attributes>
-      <type>directory</type>
-      <editable-only-at-install>true</editable-only-at-install>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <!-- druid-env.sh -->
-  <property>
-    <name>content</name>
-    <display-name>druid-env template</display-name>
-    <description>This is simple template for druid-env.sh file</description>
-    <value>
-      #!/bin/bash
-
-      # Set DRUID specific environment variables here.
-
-      # The java implementation to use.
-      export JAVA_HOME={{java8_home}}
-      export PATH=$JAVA_HOME/bin:$PATH
-      export DRUID_PID_DIR={{druid_pid_dir}}
-      export DRUID_LOG_DIR={{druid_log_dir}}
-      export DRUID_CONF_DIR={{druid_conf_dir}}
-      export DRUID_LIB_DIR={{druid_home}}/lib
-      export HADOOP_CONF_DIR={{hadoop_conf_dir}}
-
-    </value>
-    <value-attributes>
-      <type>content</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-historical.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-historical.xml b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-historical.xml
deleted file mode 100644
index 5ff30ce..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-historical.xml
+++ /dev/null
@@ -1,94 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>druid.service</name>
-    <value>druid/historical</value>
-    <description>The druid.service name of historical node.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.port</name>
-    <value>8083</value>
-    <description>The port on which the historical nodes will accept connections.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.server.http.numThreads</name>
-    <value>50</value>
-    <description>Number of threads for HTTP requests.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.processing.numMergeBuffers</name>
-    <value>2</value>
-    <description>The number of direct memory buffers available for merging query results. The buffers are sized by druid.processing.buffer.sizeBytes.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.processing.buffer.sizeBytes</name>
-    <value>1073741824</value>
-    <value-attributes>
-      <type>long</type>
-      <minimum>0</minimum>
-      <unit>Bytes</unit>
-    </value-attributes>
-    <description>This specifies a buffer size for the storage of intermediate results. The computation engine in both
-      the Historical and Realtime nodes will use a scratch buffer of this size to do all of their intermediate
-      computations off-heap. Larger values allow for more aggregations in a single pass over the data while smaller
-      values can require more passes depending on the query that is being executed.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.processing.numThreads</name>
-    <value>10</value>
-    <description>The number of processing threads to have available for parallel processing of segments.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.segmentCache.locations</name>
-    <value>[{"path":"/apps/druid/segmentCache","maxSize":300000000000}]</value>
-    <description>Segments assigned to a Historical node are first stored on the local file system (in a disk cache) and
-      then served by the Historical node. These locations define where that local cache resides.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.server.maxSize</name>
-    <value>300000000000</value>
-    <description>The maximum number of bytes-worth of segments that the node wants assigned to it. This is not a limit
-      that Historical nodes actually enforces, just a value published to the Coordinator node so it can plan
-      accordingly.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.segmentCache.infoDir</name>
-    <value>/apps/druid/segmentCache/info_dir</value>
-    <description>Historical nodes keep track of the segments they are serving so that when the process is restarted they
-      can reload the same segments without waiting for the Coordinator to reassign. This path defines where this
-      metadata is kept. Directory will be created if needed.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-log4j.xml b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-log4j.xml
deleted file mode 100644
index bcb731a..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-log4j.xml
+++ /dev/null
@@ -1,84 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_adding_forbidden="true">
-  <property>
-    <name>druid_log_level</name>
-    <value>info</value>
-    <description>Log level for io.druid logging</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>root_log_level</name>
-    <value>WARN</value>
-    <description>Log level for root logging</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>metamx_log_level</name>
-    <value>info</value>
-    <description>Log level for com.metamxlogging</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>content</name>
-    <display-name>druid-log4j template</display-name>
-    <description>Custom log4j.properties</description>
-    <value><![CDATA[<?xml version="1.0" encoding="UTF-8" ?>
-<!--
-  ~ Licensed to the Apache Software Foundation (ASF) under one
-  ~ or more contributor license agreements.  See the NOTICE file
-  ~ distributed with this work for additional information
-  ~ regarding copyright ownership.  The ASF licenses this file
-  ~ to you under the Apache License, Version 2.0 (the
-  ~ "License"); you may not use this file except in compliance
-  ~ with the License.  You may obtain a copy of the License at
-  ~
-  ~     http://www.apache.org/licenses/LICENSE-2.0
-  ~
-  ~ Unless required by applicable law or agreed to in writing, software
-  ~ distributed under the License is distributed on an "AS IS" BASIS,
-  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  ~ See the License for the specific language governing permissions and
-  ~ limitations under the License.
-  -->
-    <Configuration>
-        <Appenders>
-            <Console name="Console" target="SYSTEM_OUT">
-                <PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/>
-            </Console>
-        </Appenders>
-        <Loggers>
-            <Logger name="com.metamx" level="{{metamx_log_level}}"/>
-            <Logger name="io.druid" level="{{druid_log_level}}"/>
-            <Root level="{{root_log_level}}">
-                <AppenderRef ref="Console"/>
-            </Root>
-        </Loggers>
-    </Configuration>
-      ]]></value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-logrotate.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-logrotate.xml b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-logrotate.xml
deleted file mode 100644
index b7308ce..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-logrotate.xml
+++ /dev/null
@@ -1,68 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_adding_forbidden="true">
-  <property>
-    <name>druid_log_maxfilesize</name>
-    <value>256</value>
-    <description>The maximum size of backup file before the log is rotated</description>
-    <display-name>Druid Log: backup file size</display-name>
-    <value-attributes>
-      <unit>MB</unit>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid_log_maxbackupindex</name>
-    <value>7</value>
-    <description>The number of backup files</description>
-    <display-name>Druid Log: # of backup files</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>content</name>
-    <display-name>druid logrotate template</display-name>
-    <description>Custom logrotate file</description>
-    <value><![CDATA[
-    {{druid_log_dir}}/*.log {
-        copytruncate
-        rotate {{druid_log_maxbackupindex}}
-        daily
-        nocompress
-        missingok
-        notifempty
-        create 660 druid users
-        dateext
-        dateformat -%Y-%m-%d-%s
-        size {{druid_log_maxfilesize}}M
-        }
-      ]]></value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-middlemanager.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-middlemanager.xml b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-middlemanager.xml
deleted file mode 100644
index 08280ad..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-middlemanager.xml
+++ /dev/null
@@ -1,122 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>druid.service</name>
-    <value>druid/middlemanager</value>
-    <description>The druid.service name of middlemanager node.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.port</name>
-    <value>8091</value>
-    <description>The port on which the middlemanager nodes will accept connections.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.indexer.runner.startPort</name>
-    <value>8100</value>
-    <description>The port that peons begin running on.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.worker.capacity</name>
-    <value>3</value>
-    <description>
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.indexer.runner.javaOpts</name>
-    <value>-server -Xmx2g -Duser.timezone=UTC -Dfile.encoding=UTF-8 -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager -Dhdp.version={{stack_version}} -Dhadoop.mapreduce.job.classloader=true</value>
-    <description>
-      A string of -X Java options to pass to the peon's JVM.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.indexer.task.baseTaskDir</name>
-    <value>/apps/druid/tasks</value>
-    <description>
-      Base temporary working directory for druid tasks.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.server.http.numThreads</name>
-    <value>50</value>
-    <description>
-      Number of threads for HTTP requests.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.processing.buffer.sizeBytes</name>
-    <value>256000000</value>
-    <value-attributes>
-      <type>long</type>
-      <minimum>0</minimum>
-      <unit>Bytes</unit>
-    </value-attributes>
-    <description>
-      This specifies a buffer size for the storage of intermediate results. The computation engine in both the
-      Historical and Realtime nodes will use a scratch buffer of this size to do all of their intermediate computations
-      off-heap. Larger values allow for more aggregations in a single pass over the data while smaller values can
-      require more passes depending on the query that is being executed.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.processing.numThreads</name>
-    <value>2</value>
-    <description>
-      The number of processing threads to have available for parallel processing of segments.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.indexer.task.hadoopWorkingPath</name>
-    <value>/tmp/druid-indexing</value>
-    <description>
-      Temporary working directory for Hadoop tasks
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.indexer.fork.property.hadoop.mapreduce.reduce.java.opts</name>
-    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
-    <description>
-      Default java properties from Reducer containers
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <property>
-    <name>druid.indexer.fork.property.hadoop.mapreduce.map.java.opts</name>
-    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
-    <description>
-      Default java properties from Map containers
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-overlord.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-overlord.xml b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-overlord.xml
deleted file mode 100644
index 57d1c63..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-overlord.xml
+++ /dev/null
@@ -1,52 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>druid.service</name>
-    <value>druid/overlord</value>
-    <description>The druid.service name of overlord node.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.port</name>
-    <value>8090</value>
-    <description>The port on which the overlord will accept connections.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.indexer.runner.type</name>
-    <value>remote</value>
-    <description>Choices "local" or "remote". Indicates whether tasks should be run locally or in a distributed
-      environment.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.indexer.storage.type</name>
-    <value>metadata</value>
-    <description>Choices are "local" or "metadata". Indicates whether incoming tasks should be stored locally (in heap)
-      or in metadata storage. Storing incoming tasks in metadata storage allows for tasks to be resumed if the overlord
-      should fail.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-router.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-router.xml b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-router.xml
deleted file mode 100644
index d544315..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-router.xml
+++ /dev/null
@@ -1,59 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>druid.service</name>
-    <value>druid/router</value>
-    <description>The druid.service name of router node.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.port</name>
-    <value>8888</value>
-    <description>The port on which the broker will accept connections.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.router.http.numConnections</name>
-    <value>20</value>
-    <description>
-      Size of connection pool for the router to connect to historical and real-time nodes. If there are more
-      queries than this number that all need to speak to the same node, then they will queue up.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.server.http.numThreads</name>
-    <value>50</value>
-    <description>Number of threads for HTTP requests.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>druid.router.tierToBrokerMap</name>
-    <value>{"_default_tier":"druid/broker"}</value>
-    <description>
-      Used to route queries for a certain tier of data to their appropriate broker. An ordered JSON map of
-      tiers to broker names. The priority of brokers is based on the ordering.
-    </description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/metainfo.xml b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/metainfo.xml
deleted file mode 100644
index f9f1a35..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/metainfo.xml
+++ /dev/null
@@ -1,223 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>DRUID</name>
-      <displayName>Druid</displayName>
-      <comment>A fast column-oriented distributed data store. This service is &lt;b&gt;Technical Preview&lt;/b&gt;.</comment>
-      <version>0.9.2</version>
-      <components>
-        <component>
-          <name>DRUID_COORDINATOR</name>
-          <displayName>Druid Coordinator</displayName>
-          <category>MASTER</category>
-          <cardinality>1+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/coordinator.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <configuration-dependencies>
-            <config-type>druid-coordinator</config-type>
-          </configuration-dependencies>
-        </component>
-        <component>
-          <name>DRUID_OVERLORD</name>
-          <displayName>Druid Overlord</displayName>
-          <category>MASTER</category>
-          <cardinality>1+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/overlord.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <configuration-dependencies>
-            <config-type>druid-overlord</config-type>
-          </configuration-dependencies>
-        </component>
-        <component>
-          <name>DRUID_HISTORICAL</name>
-          <displayName>Druid Historical</displayName>
-          <category>SLAVE</category>
-          <cardinality>1+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/historical.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <configuration-dependencies>
-            <config-type>druid-historical</config-type>
-          </configuration-dependencies>
-        </component>
-        <component>
-          <name>DRUID_BROKER</name>
-          <displayName>Druid Broker</displayName>
-          <category>MASTER</category>
-          <cardinality>1+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/broker.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <configuration-dependencies>
-            <config-type>druid-broker</config-type>
-          </configuration-dependencies>
-        </component>
-        <component>
-          <name>DRUID_MIDDLEMANAGER</name>
-          <displayName>Druid MiddleManager</displayName>
-          <category>SLAVE</category>
-          <cardinality>1+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/middlemanager.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <configuration-dependencies>
-            <config-type>druid-middlemanager</config-type>
-          </configuration-dependencies>
-        </component>
-        <component>
-          <name>DRUID_ROUTER</name>
-          <displayName>Druid Router</displayName>
-          <category>MASTER</category>
-          <cardinality>1+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/router.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <configuration-dependencies>
-            <config-type>druid-router</config-type>
-          </configuration-dependencies>
-        </component>
-      </components>
-
-      <themes>
-        <theme>
-          <fileName>theme.json</fileName>
-          <default>true</default>
-        </theme>
-      </themes>
-
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>redhat7,amazon2015,redhat6,suse11,suse12</osFamily>
-          <packages>
-            <package>
-              <name>druid_${stack_version}</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>debian7,ubuntu12,ubuntu14,ubuntu16</osFamily>
-          <packages>
-            <package>
-              <name>druid-${stack_version}</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-      <requiredServices>
-        <service>ZOOKEEPER</service>
-      </requiredServices>
-      <configuration-dependencies>
-        <config-type>druid-common</config-type>
-        <config-type>druid-env</config-type>
-        <config-type>druid-log4j</config-type>
-        <config-type>druid-logrotate</config-type>
-        <config-type>zoo.cfg</config-type>
-      </configuration-dependencies>
-
-      <quickLinksConfigurations>
-        <quickLinksConfiguration>
-          <fileName>quicklinks.json</fileName>
-          <default>true</default>
-        </quickLinksConfiguration>
-      </quickLinksConfigurations>
-
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/broker.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/broker.py b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/broker.py
deleted file mode 100644
index bd170cb..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/broker.py
+++ /dev/null
@@ -1,28 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from druid_node import DruidBase
-
-
-class DruidBroker(DruidBase):
-  def __init__(self):
-    DruidBase.__init__(self, nodeType="broker")
-
-
-if __name__ == "__main__":
-  DruidBroker().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/e61556cc/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/coordinator.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/coordinator.py b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/coordinator.py
deleted file mode 100644
index a86fa40..0000000
--- a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/coordinator.py
+++ /dev/null
@@ -1,28 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from druid_node import DruidBase
-
-
-class DruidCoordinator(DruidBase):
-  def __init__(self):
-    DruidBase.__init__(self, nodeType="coordinator")
-
-
-if __name__ == "__main__":
-  DruidCoordinator().execute()


[12/50] [abbrv] ambari git commit: AMBARI-22104. Refactor existing server side actions to use the common AbstractUpgradeServerAction (dlysnichenko)

Posted by jl...@apache.org.
AMBARI-22104. Refactor existing server side actions to use the common AbstractUpgradeServerAction (dlysnichenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/1032bc5d
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/1032bc5d
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/1032bc5d

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 1032bc5d38b518429d953049fc043da3d6ddfc9d
Parents: 1f00c19
Author: Lisnichenko Dmitro <dl...@hortonworks.com>
Authored: Tue Oct 3 17:37:02 2017 +0300
Committer: Lisnichenko Dmitro <dl...@hortonworks.com>
Committed: Tue Oct 3 17:37:02 2017 +0300

----------------------------------------------------------------------
 .../upgrades/AbstractUpgradeServerAction.java      |  6 +++++-
 .../upgrades/AutoSkipFailedSummaryAction.java      | 15 ++-------------
 .../upgrades/ComponentVersionCheckAction.java      |  2 +-
 .../serveraction/upgrades/ConfigureAction.java     | 11 ++---------
 .../upgrades/FinalizeUpgradeAction.java            |  2 +-
 .../FixCapacitySchedulerOrderingPolicy.java        | 12 ++----------
 .../serveraction/upgrades/FixLzoCodecPath.java     | 10 ++--------
 .../serveraction/upgrades/FixOozieAdminUsers.java  | 10 ++--------
 .../upgrades/FixYarnWebServiceUrl.java             | 11 ++---------
 .../upgrades/HBaseConfigCalculation.java           | 10 ++--------
 .../HBaseEnvMaxDirectMemorySizeAction.java         | 11 ++---------
 .../upgrades/HiveEnvClasspathAction.java           | 11 ++---------
 .../upgrades/HiveZKQuorumConfigAction.java         | 13 ++-----------
 .../upgrades/KerberosKeytabsAction.java            | 13 ++-----------
 .../serveraction/upgrades/ManualStageAction.java   |  3 +--
 .../upgrades/OozieConfigCalculation.java           | 11 ++---------
 .../upgrades/PreconfigureKerberosAction.java       |  2 +-
 .../upgrades/RangerConfigCalculation.java          | 11 ++---------
 .../upgrades/RangerKerberosConfigCalculation.java  | 11 ++---------
 .../upgrades/RangerKmsProxyConfig.java             | 11 ++---------
 .../upgrades/RangerUsersyncConfigCalculation.java  | 11 ++---------
 .../upgrades/RangerWebAlertConfigAction.java       |  9 ++-------
 .../upgrades/SparkShufflePropertyConfig.java       | 11 ++---------
 .../upgrades/UpdateDesiredRepositoryAction.java    | 10 +---------
 .../upgrades/UpgradeUserKerberosDescriptor.java    |  6 +-----
 .../upgrades/YarnConfigCalculation.java            | 11 ++---------
 .../FixCapacitySchedulerOrderingPolicyTest.java    |  2 +-
 .../upgrades/FixOozieAdminUsersTest.java           |  2 +-
 .../upgrades/FixYarnWebServiceUrlTest.java         |  2 +-
 .../HBaseEnvMaxDirectMemorySizeActionTest.java     |  2 +-
 .../upgrades/HiveEnvClasspathActionTest.java       |  2 +-
 .../upgrades/HiveZKQuorumConfigActionTest.java     |  2 +-
 .../upgrades/KerberosKeytabsActionTest.java        | 17 +++++++++++++++++
 .../upgrades/RangerConfigCalculationTest.java      |  2 +-
 .../RangerKerberosConfigCalculationTest.java       |  2 +-
 .../upgrades/RangerKmsProxyConfigTest.java         |  2 +-
 .../RangerUsersyncConfigCalculationTest.java       |  2 +-
 .../upgrades/RangerWebAlertConfigActionTest.java   |  9 ++++++++-
 .../upgrades/SparkShufflePropertyConfigTest.java   |  2 +-
 .../UpgradeUserKerberosDescriptorTest.java         |  4 ++--
 40 files changed, 87 insertions(+), 209 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/1032bc5d/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/AbstractUpgradeServerAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/AbstractUpgradeServerAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/AbstractUpgradeServerAction.java
index 8ebb186..80c73f8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/AbstractUpgradeServerAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/AbstractUpgradeServerAction.java
@@ -33,8 +33,12 @@ import com.google.inject.Inject;
  */
 public abstract class AbstractUpgradeServerAction extends AbstractServerAction {
 
+  public Clusters getClusters() {
+    return m_clusters;
+  }
+
   @Inject
-  protected Clusters m_clusters;
+  private Clusters m_clusters;
 
   /**
    * Used to move desired repo versions forward.

http://git-wip-us.apache.org/repos/asf/ambari/blob/1032bc5d/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/AutoSkipFailedSummaryAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/AutoSkipFailedSummaryAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/AutoSkipFailedSummaryAction.java
index 664cb2c..1abd947 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/AutoSkipFailedSummaryAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/AutoSkipFailedSummaryAction.java
@@ -36,13 +36,10 @@ import org.apache.ambari.server.actionmanager.ServiceComponentHostEventWrapper;
 import org.apache.ambari.server.agent.CommandReport;
 import org.apache.ambari.server.metadata.ActionMetadata;
 import org.apache.ambari.server.orm.dao.HostRoleCommandDAO;
-import org.apache.ambari.server.orm.dao.UpgradeDAO;
 import org.apache.ambari.server.orm.entities.HostRoleCommandEntity;
 import org.apache.ambari.server.orm.entities.UpgradeGroupEntity;
 import org.apache.ambari.server.orm.entities.UpgradeItemEntity;
-import org.apache.ambari.server.serveraction.AbstractServerAction;
 import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.ServiceComponentHostEvent;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -57,7 +54,7 @@ import com.google.inject.Inject;
  * {@link HostRoleStatus#COMPLETED} if there are no skipped failures. Otherwise
  * it will be placed into {@link HostRoleStatus#HOLDING}.
  */
-public class AutoSkipFailedSummaryAction extends AbstractServerAction {
+public class AutoSkipFailedSummaryAction extends AbstractUpgradeServerAction {
 
   /**
    * Logger.
@@ -75,12 +72,6 @@ public class AutoSkipFailedSummaryAction extends AbstractServerAction {
   private static final String FAILURES = "failures";
 
   /**
-   * Used to lookup the {@link UpgradeGroupEntity}.
-   */
-  @Inject
-  private UpgradeDAO m_upgradeDAO;
-
-  /**
    * Used to lookup the tasks that need to be checked for
    * {@link HostRoleStatus#SKIPPED_FAILED}.
    */
@@ -99,8 +90,6 @@ public class AutoSkipFailedSummaryAction extends AbstractServerAction {
   @Inject
   private ActionMetadata actionMetadata;
 
-  @Inject
-  private Clusters clusters;
 
   /**
    * A mapping of host -> Map<key,info> for each failure.
@@ -119,7 +108,7 @@ public class AutoSkipFailedSummaryAction extends AbstractServerAction {
     long stageId = hostRoleCommand.getStageId();
 
     String clusterName = hostRoleCommand.getExecutionCommandWrapper().getExecutionCommand().getClusterName();
-    Cluster cluster = clusters.getCluster(clusterName);
+    Cluster cluster = getClusters().getCluster(clusterName);
 
     // use the host role command to get to the parent upgrade group
     UpgradeItemEntity upgradeItem = m_upgradeDAO.findUpgradeItemByRequestAndStage(requestId,stageId);

http://git-wip-us.apache.org/repos/asf/ambari/blob/1032bc5d/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckAction.java
index 44d2b4d..f72637e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ComponentVersionCheckAction.java
@@ -48,7 +48,7 @@ public class ComponentVersionCheckAction extends FinalizeUpgradeAction {
 
     String clusterName = getExecutionCommand().getClusterName();
 
-    Cluster cluster = m_clusters.getCluster(clusterName);
+    Cluster cluster = getClusters().getCluster(clusterName);
 
     UpgradeContext upgradeContext = getUpgradeContext(cluster);
     Set<InfoTuple> errors = validateComponentVersions(upgradeContext);

http://git-wip-us.apache.org/repos/asf/ambari/blob/1032bc5d/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ConfigureAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ConfigureAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ConfigureAction.java
index ec4d383..a7f910f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ConfigureAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ConfigureAction.java
@@ -37,7 +37,6 @@ import org.apache.ambari.server.controller.ConfigurationRequest;
 import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.serveraction.ServerAction;
 import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.ConfigMergeHelper;
@@ -89,12 +88,6 @@ public class ConfigureAction extends AbstractUpgradeServerAction {
   private static final Logger LOG = LoggerFactory.getLogger(ConfigureAction.class);
 
   /**
-   * Used to lookup the cluster.
-   */
-  @Inject
-  private Clusters m_clusters;
-
-  /**
    * Used to update the configuration properties.
    */
   @Inject
@@ -183,7 +176,7 @@ public class ConfigureAction extends AbstractUpgradeServerAction {
     }
 
     String clusterName = commandParameters.get("clusterName");
-    Cluster cluster = m_clusters.getCluster(clusterName);
+    Cluster cluster = getClusters().getCluster(clusterName);
     UpgradeContext upgradeContext = getUpgradeContext(cluster);
 
     // such as hdfs-site or hbase-env
@@ -591,7 +584,7 @@ public class ConfigureAction extends AbstractUpgradeServerAction {
 
 
     String configType = config.getType();
-    Cluster cluster = m_clusters.getCluster(clusterName);
+    Cluster cluster = getClusters().getCluster(clusterName);
     StackId oldStack = cluster.getCurrentStackVersion();
 
     // iterate over all properties for every cluster service; if the property

http://git-wip-us.apache.org/repos/asf/ambari/blob/1032bc5d/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java
index 451f802..d6876d9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FinalizeUpgradeAction.java
@@ -84,7 +84,7 @@ public class FinalizeUpgradeAction extends AbstractUpgradeServerAction {
       throws AmbariException, InterruptedException {
 
     String clusterName = getExecutionCommand().getClusterName();
-    Cluster cluster = m_clusters.getCluster(clusterName);
+    Cluster cluster = getClusters().getCluster(clusterName);
 
     UpgradeContext upgradeContext = getUpgradeContext(cluster);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1032bc5d/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixCapacitySchedulerOrderingPolicy.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixCapacitySchedulerOrderingPolicy.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixCapacitySchedulerOrderingPolicy.java
index f14d702..03dbd27 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixCapacitySchedulerOrderingPolicy.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixCapacitySchedulerOrderingPolicy.java
@@ -27,20 +27,16 @@ import java.util.regex.Pattern;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
-import org.apache.ambari.server.serveraction.AbstractServerAction;
 import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 
-import com.google.inject.Inject;
-
 /**
  * In HDP-2.6, the parent queue's cannot have a ordering-policy other than {@code utilization} or
  * {@code priority-utilization}.
  *
  * This class is used when moving from HDP-2.3/HDP-2.4/HDP-2.5 to HDP2.6
  */
-public class FixCapacitySchedulerOrderingPolicy extends AbstractServerAction {
+public class FixCapacitySchedulerOrderingPolicy extends AbstractUpgradeServerAction {
   private static final String SOURCE_CONFIG_TYPE = "capacity-scheduler";
   private static final String ORDERING_POLICY_SUFFIX = "ordering-policy";
 
@@ -53,17 +49,13 @@ public class FixCapacitySchedulerOrderingPolicy extends AbstractServerAction {
   private static final Pattern ROOT_QUEUE_REGEX = Pattern.compile(
       String.format("%s.([.\\-_\\w]+).queues", CAPACITY_SCHEDULER_PREFIX));
 
-
-  @Inject
-  private Clusters clusters;
-
   @Override
   public CommandReport execute(ConcurrentMap<String, Object> requestSharedDataContext)
       throws AmbariException, InterruptedException {
 
 
     String clusterName = getExecutionCommand().getClusterName();
-    Cluster cluster = clusters.getCluster(clusterName);
+    Cluster cluster = getClusters().getCluster(clusterName);
     Config config = cluster.getDesiredConfigByType(SOURCE_CONFIG_TYPE);
 
     if (null == config) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/1032bc5d/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixLzoCodecPath.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixLzoCodecPath.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixLzoCodecPath.java
index 4833729..4a92327 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixLzoCodecPath.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixLzoCodecPath.java
@@ -26,20 +26,17 @@ import java.util.concurrent.ConcurrentMap;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
-import org.apache.ambari.server.serveraction.AbstractServerAction;
 import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.commons.lang.StringUtils;
 
-import com.google.inject.Inject;
 
 /**
  * During stack upgrade, update lzo codec path in mapreduce.application.classpath and
  * at tez.cluster.additional.classpath.prefix to look like
  * /usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar
  */
-public class FixLzoCodecPath extends AbstractServerAction {
+public class FixLzoCodecPath extends AbstractUpgradeServerAction {
 
   /**
    * Lists config types and properties that may contain lzo codec path
@@ -49,14 +46,11 @@ public class FixLzoCodecPath extends AbstractServerAction {
     put("tez-site", new String [] {"tez.cluster.additional.classpath.prefix"});
   }};
 
-  @Inject
-  private Clusters clusters;
-
   @Override
   public CommandReport execute(ConcurrentMap<String, Object> requestSharedDataContext)
     throws AmbariException, InterruptedException {
     String clusterName = getExecutionCommand().getClusterName();
-    Cluster cluster = clusters.getCluster(clusterName);
+    Cluster cluster = getClusters().getCluster(clusterName);
 
     ArrayList<String> modifiedProperties = new ArrayList<>();
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1032bc5d/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixOozieAdminUsers.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixOozieAdminUsers.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixOozieAdminUsers.java
index 75588d5..305825a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixOozieAdminUsers.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixOozieAdminUsers.java
@@ -24,34 +24,28 @@ import java.util.concurrent.ConcurrentMap;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
-import org.apache.ambari.server.serveraction.AbstractServerAction;
 import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.commons.lang.StringUtils;
 
-import com.google.inject.Inject;
 
 /**
  * During stack upgrade, update lzo codec path in mapreduce.application.classpath and
  * at tez.cluster.additional.classpath.prefix to look like
  * /usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar
  */
-public class FixOozieAdminUsers extends AbstractServerAction {
+public class FixOozieAdminUsers extends AbstractUpgradeServerAction {
   private static final String TARGET_OOZIE_CONFIG_TYPE = "oozie-env";
   private static final String OOZIE_ADMIN_USERS_PROP = "oozie_admin_users";
   private static final String FALCON_CONFIG_TYPE = "falcon-env";
   private static final String FALCON_USER_PROP = "falcon_user";
 
 
-  @Inject
-  private Clusters clusters;
-
   @Override
   public CommandReport execute(ConcurrentMap<String, Object> requestSharedDataContext)
     throws AmbariException, InterruptedException {
     String clusterName = getExecutionCommand().getClusterName();
-    Cluster cluster = clusters.getCluster(clusterName);
+    Cluster cluster = getClusters().getCluster(clusterName);
     Config oozieConfig = cluster.getDesiredConfigByType(TARGET_OOZIE_CONFIG_TYPE);
     Config falconConfig = cluster.getDesiredConfigByType(FALCON_CONFIG_TYPE);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1032bc5d/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixYarnWebServiceUrl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixYarnWebServiceUrl.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixYarnWebServiceUrl.java
index 5823c8b..6a3c5e5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixYarnWebServiceUrl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/FixYarnWebServiceUrl.java
@@ -23,20 +23,16 @@ import java.util.concurrent.ConcurrentMap;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
-import org.apache.ambari.server.serveraction.AbstractServerAction;
 import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 
-import com.google.inject.Inject;
-
 /**
  * yarn.log.server.web-service.url is added in HDP 2.6
  * It takes value from yarn.timeline-service.webapp.address if the yarn.http.policy is HTTP_ONLY
  * and takes value from yarn.timeline-service.webapp.https.address if the yarn.http.policy is HTTPS_ONLY.
  * This class is used when moving from HDP-2.3/HDP-2.4/HDP-2.5 to HDP2.6
  */
-public class FixYarnWebServiceUrl extends AbstractServerAction {
+public class FixYarnWebServiceUrl extends AbstractUpgradeServerAction {
     private static final String SOURCE_CONFIG_TYPE = "yarn-site";
     private static final String YARN_TIMELINE_WEBAPP_HTTPADDRESS = "yarn.timeline-service.webapp.address";
     private static final String YARN_TIMELINE_WEBAPP_HTTPSADDRESS = "yarn.timeline-service.webapp.https.address";
@@ -45,15 +41,12 @@ public class FixYarnWebServiceUrl extends AbstractServerAction {
     private static final String HTTP = "HTTP_ONLY";
     private static final String HTTPS = "HTTPS_ONLY";
 
-    @Inject
-    private Clusters clusters;
-
     @Override
     public CommandReport execute(ConcurrentMap<String, Object> requestSharedDataContext)
             throws AmbariException, InterruptedException{
 
         String clusterName = getExecutionCommand().getClusterName();
-        Cluster cluster = clusters.getCluster(clusterName);
+        Cluster cluster = getClusters().getCluster(clusterName);
         Config config = cluster.getDesiredConfigByType(SOURCE_CONFIG_TYPE);
 
         if (config == null) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/1032bc5d/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseConfigCalculation.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseConfigCalculation.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseConfigCalculation.java
index 739dd7e..aaaec92 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseConfigCalculation.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseConfigCalculation.java
@@ -25,25 +25,19 @@ import java.util.concurrent.ConcurrentMap;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
-import org.apache.ambari.server.serveraction.AbstractServerAction;
 import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 
-import com.google.inject.Inject;
-
 /**
  * Computes HBase properties.  This class is only used when moving from
  * HDP-2.2 to HDP-2.3 in that upgrade pack.
  */
-public class HBaseConfigCalculation extends AbstractServerAction {
+public class HBaseConfigCalculation extends AbstractUpgradeServerAction {
   private static final String SOURCE_CONFIG_TYPE = "hbase-site";
   private static final String OLD_UPPER_LIMIT_PROPERTY_NAME = "hbase.regionserver.global.memstore.upperLimit";
   private static final String OLD_LOWER_LIMIT_PROPERTY_NAME = "hbase.regionserver.global.memstore.lowerLimit";
   private static final String NEW_LOWER_LIMIT_PROPERTY_NAME = "hbase.regionserver.global.memstore.size.lower.limit";
 
-  @Inject
-  private Clusters clusters;
 
   @Override
   public CommandReport execute(ConcurrentMap<String, Object> requestSharedDataContext)
@@ -51,7 +45,7 @@ public class HBaseConfigCalculation extends AbstractServerAction {
 
     String clusterName = getExecutionCommand().getClusterName();
 
-    Cluster cluster = clusters.getCluster(clusterName);
+    Cluster cluster = getClusters().getCluster(clusterName);
 
     Config config = cluster.getDesiredConfigByType(SOURCE_CONFIG_TYPE);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1032bc5d/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseEnvMaxDirectMemorySizeAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseEnvMaxDirectMemorySizeAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseEnvMaxDirectMemorySizeAction.java
index fb15555..26e3c73 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseEnvMaxDirectMemorySizeAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HBaseEnvMaxDirectMemorySizeAction.java
@@ -26,33 +26,26 @@ import java.util.regex.Pattern;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
-import org.apache.ambari.server.serveraction.AbstractServerAction;
 import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 
-import com.google.inject.Inject;
-
 /**
  * Computes HBase Env content property.
  * This class is only used when moving from HDP-2.3 to HDP-2.4 and HDP-2.3 to HDP-2.5
  */
-public class HBaseEnvMaxDirectMemorySizeAction extends AbstractServerAction {
+public class HBaseEnvMaxDirectMemorySizeAction extends AbstractUpgradeServerAction {
   private static final String SOURCE_CONFIG_TYPE = "hbase-env";
   private static final String CONTENT_NAME = "content";
   private static final String APPEND_CONTENT_LINE = "export HBASE_MASTER_OPTS=\"$HBASE_MASTER_OPTS {% if hbase_max_direct_memory_size %} -XX:MaxDirectMemorySize={{hbase_max_direct_memory_size}}m {% endif %}\"";
   private static final String CHECK_REGEX = "^.*\\s*(HBASE_MASTER_OPTS)\\s*=.*(XX:MaxDirectMemorySize).*$";
   private static final Pattern REGEX = Pattern.compile(CHECK_REGEX, Pattern.MULTILINE);
 
-  @Inject
-  private Clusters clusters;
-
   @Override
   public CommandReport execute(ConcurrentMap<String, Object> requestSharedDataContext)
     throws AmbariException, InterruptedException {
 
     String clusterName = getExecutionCommand().getClusterName();
-    Cluster cluster = clusters.getCluster(clusterName);
+    Cluster cluster = getClusters().getCluster(clusterName);
     Config config = cluster.getDesiredConfigByType(SOURCE_CONFIG_TYPE);
 
     if (config == null) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/1032bc5d/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveEnvClasspathAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveEnvClasspathAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveEnvClasspathAction.java
index c5000bf..12ef9b5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveEnvClasspathAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveEnvClasspathAction.java
@@ -26,17 +26,13 @@ import java.util.regex.Pattern;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
-import org.apache.ambari.server.serveraction.AbstractServerAction;
 import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 
-import com.google.inject.Inject;
-
 /**
  * Append hive-env config type with HIVE_HOME and HIVE_CONF_DIR variables if they are absent
  */
-public class HiveEnvClasspathAction extends AbstractServerAction {
+public class HiveEnvClasspathAction extends AbstractUpgradeServerAction {
   private static final String TARGET_CONFIG_TYPE = "hive-env";
   private static final String CONTENT_PROPERTY_NAME = "content";
 
@@ -48,16 +44,13 @@ public class HiveEnvClasspathAction extends AbstractServerAction {
 
   private static final String VERIFY_REGEXP = "^\\s*export\\s(?<property>%s|%s)\\s*=\\s*.*$";
 
-  @Inject
-  private Clusters clusters;
-
   @Override
   public CommandReport execute(ConcurrentMap<String, Object> requestSharedDataContext)
     throws AmbariException, InterruptedException {
 
 
     String clusterName = getExecutionCommand().getClusterName();
-    Cluster cluster = clusters.getCluster(clusterName);
+    Cluster cluster = getClusters().getCluster(clusterName);
     Config config = cluster.getDesiredConfigByType(TARGET_CONFIG_TYPE);
 
     if (config == null) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/1032bc5d/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveZKQuorumConfigAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveZKQuorumConfigAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveZKQuorumConfigAction.java
index 23eacec..c89152b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveZKQuorumConfigAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/HiveZKQuorumConfigAction.java
@@ -23,13 +23,9 @@ import java.util.concurrent.ConcurrentMap;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
-import org.apache.ambari.server.serveraction.AbstractServerAction;
 import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 
-import com.google.inject.Inject;
-
 /**
  * The {@link HiveZKQuorumConfigAction} is used to ensure that the following
  * settings are correctly set when upgrading a Hive Server:
@@ -48,16 +44,11 @@ import com.google.inject.Inject;
  * Hive that was upgraded previously. They are actually set (incorrectly) on a
  * non-Kerberized Hive installation by the installation wizard.
  */
-public class HiveZKQuorumConfigAction extends AbstractServerAction {
+public class HiveZKQuorumConfigAction extends AbstractUpgradeServerAction {
   protected static final String HIVE_SITE_CONFIG_TYPE = "hive-site";
   protected static final String HIVE_SITE_ZK_QUORUM = "hive.zookeeper.quorum";
   protected static final String HIVE_SITE_ZK_CONNECT_STRING = "hive.cluster.delegation.token.store.zookeeper.connectString";
 
-  /**
-   * Used for retrieving the cluster (and eventually the desired configuration).
-   */
-  @Inject
-  private Clusters m_clusters;
 
   /**
    * {@inheritDoc}
@@ -67,7 +58,7 @@ public class HiveZKQuorumConfigAction extends AbstractServerAction {
       throws AmbariException, InterruptedException {
 
     String clusterName = getExecutionCommand().getClusterName();
-    Cluster cluster = m_clusters.getCluster(clusterName);
+    Cluster cluster = getClusters().getCluster(clusterName);
 
     Config hiveSite = cluster.getDesiredConfigByType(HIVE_SITE_CONFIG_TYPE);
     if (hiveSite == null) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/1032bc5d/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/KerberosKeytabsAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/KerberosKeytabsAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/KerberosKeytabsAction.java
index 4c6371b..bf48873 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/KerberosKeytabsAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/KerberosKeytabsAction.java
@@ -24,10 +24,8 @@ import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
 import org.apache.ambari.server.controller.KerberosHelper;
-import org.apache.ambari.server.serveraction.AbstractServerAction;
 import org.apache.ambari.server.serveraction.kerberos.KDCType;
 import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 
 import com.google.inject.Inject;
@@ -42,18 +40,11 @@ import com.google.inject.Inject;
  *  <li>If the KDC type is set (KDCType is not {@link KDCType#NONE}, implying manual)</li>
  * </ul>
  */
-public class KerberosKeytabsAction extends AbstractServerAction {
+public class KerberosKeytabsAction extends AbstractUpgradeServerAction {
 
   private static final String KERBEROS_ENV = "kerberos-env";
   private static final String KDC_TYPE_KEY = "kdc_type";
 
-
-  /**
-   * Used for retrieving the cluster (and eventually the desired configuration).
-   */
-  @Inject
-  private Clusters m_clusters;
-
   @Inject
   private KerberosHelper m_kerberosHelper;
 
@@ -63,7 +54,7 @@ public class KerberosKeytabsAction extends AbstractServerAction {
 
 
     String clusterName = getExecutionCommand().getClusterName();
-    Cluster cluster = m_clusters.getCluster(clusterName);
+    Cluster cluster = getClusters().getCluster(clusterName);
 
     StringBuilder stdout = new StringBuilder();
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1032bc5d/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ManualStageAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ManualStageAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ManualStageAction.java
index 39b23bc..7faa6f6 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ManualStageAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ManualStageAction.java
@@ -22,12 +22,11 @@ import java.util.concurrent.ConcurrentMap;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
-import org.apache.ambari.server.serveraction.AbstractServerAction;
 
 /**
  * Action that represents a manual stage.
  */
-public class ManualStageAction extends AbstractServerAction {
+public class ManualStageAction extends AbstractUpgradeServerAction {
 
   @Override
   public CommandReport execute(

http://git-wip-us.apache.org/repos/asf/ambari/blob/1032bc5d/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/OozieConfigCalculation.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/OozieConfigCalculation.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/OozieConfigCalculation.java
index 9b8a7dc..5b64ba6 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/OozieConfigCalculation.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/OozieConfigCalculation.java
@@ -26,28 +26,21 @@ import java.util.regex.Pattern;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
-import org.apache.ambari.server.serveraction.AbstractServerAction;
 import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 
-import com.google.inject.Inject;
-
 /**
  * Changes oozie-env during upgrade (adds -Dhdp.version to $HADOOP_OPTS variable)
  */
-public class OozieConfigCalculation extends AbstractServerAction {
+public class OozieConfigCalculation extends AbstractUpgradeServerAction {
   private static final String TARGET_CONFIG_TYPE = "oozie-env";
   private static final String CONTENT_PROPERTY_NAME = "content";
 
-  @Inject
-  private Clusters clusters;
-
   @Override
   public CommandReport execute(ConcurrentMap<String, Object> requestSharedDataContext)
     throws AmbariException, InterruptedException {
     String clusterName = getExecutionCommand().getClusterName();
-    Cluster cluster = clusters.getCluster(clusterName);
+    Cluster cluster = getClusters().getCluster(clusterName);
     Config config = cluster.getDesiredConfigByType(TARGET_CONFIG_TYPE);
 
     if (config == null) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/1032bc5d/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/PreconfigureKerberosAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/PreconfigureKerberosAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/PreconfigureKerberosAction.java
index 30bc47f..d18f333 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/PreconfigureKerberosAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/PreconfigureKerberosAction.java
@@ -92,7 +92,7 @@ public class PreconfigureKerberosAction extends AbstractUpgradeServerAction {
 
     if (!isDowngrade()) {
       String clusterName = commandParameters.get("clusterName");
-      Cluster cluster = m_clusters.getCluster(clusterName);
+      Cluster cluster = getClusters().getCluster(clusterName);
 
       if (cluster.getSecurityType() == SecurityType.KERBEROS) {
         StackId stackId;

http://git-wip-us.apache.org/repos/asf/ambari/blob/1032bc5d/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculation.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculation.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculation.java
index e53c95f..0e4b650 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculation.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculation.java
@@ -24,32 +24,25 @@ import java.util.concurrent.ConcurrentMap;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
-import org.apache.ambari.server.serveraction.AbstractServerAction;
 import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 
-import com.google.inject.Inject;
-
 /**
  * Computes Ranger properties.  This class is only used when moving from
  * HDP-2.2 to HDP-2.3 in that upgrade pack.
  */
-public class RangerConfigCalculation extends AbstractServerAction {
+public class RangerConfigCalculation extends AbstractUpgradeServerAction {
   private static final String SOURCE_CONFIG_TYPE = "admin-properties";
   private static final String RANGER_ENV_CONFIG_TYPE = "ranger-env";
   private static final String RANGER_ADMIN_SITE_CONFIG_TYPE = "ranger-admin-site";
 
-  @Inject
-  private Clusters m_clusters;
-
   @Override
   public CommandReport execute(ConcurrentMap<String, Object> requestSharedDataContext)
       throws AmbariException, InterruptedException {
 
     String clusterName = getExecutionCommand().getClusterName();
 
-    Cluster cluster = m_clusters.getCluster(clusterName);
+    Cluster cluster = getClusters().getCluster(clusterName);
 
     Config sourceConfig = cluster.getDesiredConfigByType(SOURCE_CONFIG_TYPE);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1032bc5d/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculation.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculation.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculation.java
index 348b69e..18f97ee 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculation.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculation.java
@@ -24,20 +24,16 @@ import java.util.concurrent.ConcurrentMap;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
-import org.apache.ambari.server.serveraction.AbstractServerAction;
 import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.SecurityType;
 import org.apache.commons.lang.StringUtils;
 
-import com.google.inject.Inject;
-
 /**
 * Computes Ranger properties when upgrading to HDP-2.5
 */
 
-public class RangerKerberosConfigCalculation extends AbstractServerAction {
+public class RangerKerberosConfigCalculation extends AbstractUpgradeServerAction {
   private static final String RANGER_ADMIN_SITE_CONFIG_TYPE = "ranger-admin-site";
   private static final String HADOOP_ENV_CONFIG_TYPE = "hadoop-env";
   private static final String HIVE_ENV_CONFIG_TYPE = "hive-env";
@@ -58,15 +54,12 @@ public class RangerKerberosConfigCalculation extends AbstractServerAction {
   private static final String RANGER_PLUGINS_KAFKA_SERVICE_USER = "ranger.plugins.kafka.serviceuser";
   private static final String RANGER_PLUGINS_KMS_SERVICE_USER = "ranger.plugins.kms.serviceuser";
 
-  @Inject
-  private Clusters m_clusters;
-
   @Override
   public CommandReport execute(ConcurrentMap<String, Object> requestSharedDataContext)
       throws AmbariException, InterruptedException {
 
     String clusterName = getExecutionCommand().getClusterName();
-    Cluster cluster = m_clusters.getCluster(clusterName);
+    Cluster cluster = getClusters().getCluster(clusterName);
     String errMsg = "";
     String sucessMsg = "";
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1032bc5d/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKmsProxyConfig.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKmsProxyConfig.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKmsProxyConfig.java
index 71c3a07..b4299e5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKmsProxyConfig.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerKmsProxyConfig.java
@@ -24,31 +24,24 @@ import java.util.concurrent.ConcurrentMap;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
-import org.apache.ambari.server.serveraction.AbstractServerAction;
 import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.SecurityType;
 
-import com.google.inject.Inject;
-
 /**
 * Computes Ranger KMS Proxy properties in kms-site
 */
 
-public class RangerKmsProxyConfig extends AbstractServerAction {
+public class RangerKmsProxyConfig extends AbstractUpgradeServerAction {
   private static final String RANGER_ENV_CONFIG_TYPE = "ranger-env";
   private static final String RANGER_KMS_SITE_CONFIG_TYPE = "kms-site";
 
-  @Inject
-  private Clusters m_clusters;
-
   @Override
   public CommandReport execute(ConcurrentMap<String, Object> requestSharedDataContext)
     throws AmbariException, InterruptedException {
 
     String clusterName = getExecutionCommand().getClusterName();
-    Cluster cluster = m_clusters.getCluster(clusterName);
+    Cluster cluster = getClusters().getCluster(clusterName);
     String outputMsg = "";
 
     Config rangerEnv = cluster.getDesiredConfigByType(RANGER_ENV_CONFIG_TYPE);

http://git-wip-us.apache.org/repos/asf/ambari/blob/1032bc5d/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerUsersyncConfigCalculation.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerUsersyncConfigCalculation.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerUsersyncConfigCalculation.java
index 3573748..54d74be 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerUsersyncConfigCalculation.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerUsersyncConfigCalculation.java
@@ -25,30 +25,23 @@ import java.util.concurrent.ConcurrentMap;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
-import org.apache.ambari.server.serveraction.AbstractServerAction;
 import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 
-import com.google.inject.Inject;
-
 /**
  * Computes Ranger Usersync ldap grouphierarchylevels property. This class is only used when upgrading from
  * HDP-2.6.x to HDP-2.6.y.
  */
 
-public class RangerUsersyncConfigCalculation extends AbstractServerAction {
+public class RangerUsersyncConfigCalculation extends AbstractUpgradeServerAction {
   private static final String RANGER_USERSYNC_CONFIG_TYPE = "ranger-ugsync-site";
   private static final String RANGER_ENV_CONFIG_TYPE = "ranger-env";
 
-  @Inject
-  private Clusters m_clusters;
-
   @Override
   public CommandReport execute(ConcurrentMap<String, Object> requestSharedDataContext) throws AmbariException, InterruptedException {
 
   String clusterName = getExecutionCommand().getClusterName();
-  Cluster cluster = m_clusters.getCluster(clusterName);
+  Cluster cluster = getClusters().getCluster(clusterName);
   String outputMsg = "";
 
   Config rangerUsersyncConfig = cluster.getDesiredConfigByType(RANGER_USERSYNC_CONFIG_TYPE);

http://git-wip-us.apache.org/repos/asf/ambari/blob/1032bc5d/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerWebAlertConfigAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerWebAlertConfigAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerWebAlertConfigAction.java
index a6b94f4..0d10b22 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerWebAlertConfigAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/RangerWebAlertConfigAction.java
@@ -29,9 +29,7 @@ import org.apache.ambari.server.events.AlertHashInvalidationEvent;
 import org.apache.ambari.server.events.publishers.AmbariEventPublisher;
 import org.apache.ambari.server.orm.dao.AlertDefinitionDAO;
 import org.apache.ambari.server.orm.entities.AlertDefinitionEntity;
-import org.apache.ambari.server.serveraction.AbstractServerAction;
 import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.alert.AlertDefinitionHash;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -43,13 +41,10 @@ import com.google.inject.Inject;
 /**
  * This class is used to update Ranger service alert-check configs in Ambari
  */
-public class RangerWebAlertConfigAction extends AbstractServerAction {
+public class RangerWebAlertConfigAction extends AbstractUpgradeServerAction {
 
 
   @Inject
-  Clusters m_clusters;
-
-  @Inject
   AlertDefinitionDAO alertDefinitionDAO;
 
   @Inject
@@ -73,7 +68,7 @@ public class RangerWebAlertConfigAction extends AbstractServerAction {
   public CommandReport execute(ConcurrentMap<String, Object> requestSharedDataContext)
       throws AmbariException, InterruptedException {
     String clusterName = getExecutionCommand().getClusterName();
-    Cluster cluster = m_clusters.getCluster(clusterName);
+    Cluster cluster = getClusters().getCluster(clusterName);
 
     String ranger_admin_process = "ranger_admin_process";
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1032bc5d/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/SparkShufflePropertyConfig.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/SparkShufflePropertyConfig.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/SparkShufflePropertyConfig.java
index b1aa6e1..f743a30 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/SparkShufflePropertyConfig.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/SparkShufflePropertyConfig.java
@@ -27,14 +27,10 @@ import java.util.concurrent.ConcurrentMap;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
-import org.apache.ambari.server.serveraction.AbstractServerAction;
 import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.commons.lang.StringUtils;
 
-import com.google.inject.Inject;
-
 /**
  * Computes Yarn properties for SPARK.
  *
@@ -44,7 +40,7 @@ import com.google.inject.Inject;
  *
  * These properties available starting from HDP-2.4 stack.
  */
-public class SparkShufflePropertyConfig extends AbstractServerAction {
+public class SparkShufflePropertyConfig extends AbstractUpgradeServerAction {
   private static final String YARN_SITE_CONFIG_TYPE = "yarn-site";
 
   private static final String YARN_NODEMANAGER_AUX_SERVICES = "yarn.nodemanager.aux-services";
@@ -52,15 +48,12 @@ public class SparkShufflePropertyConfig extends AbstractServerAction {
   private static final String YARN_NODEMANAGER_AUX_SERVICES_SPARK_SHUFFLE_CLASS = "yarn.nodemanager.aux-services.spark_shuffle.class";
   private static final String YARN_NODEMANAGER_AUX_SERVICES_SPARK_SHUFFLE_CLASS_VALUE = "org.apache.spark.network.yarn.YarnShuffleService";
 
-  @Inject
-  private Clusters clusters;
-
   @Override
   public CommandReport execute(ConcurrentMap<String, Object> requestSharedDataContext)
       throws AmbariException, InterruptedException {
 
     String clusterName = getExecutionCommand().getClusterName();
-    Cluster cluster = clusters.getCluster(clusterName);
+    Cluster cluster = getClusters().getCluster(clusterName);
     Config yarnSiteConfig = cluster.getDesiredConfigByType(YARN_SITE_CONFIG_TYPE);
 
     if (yarnSiteConfig == null) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/1032bc5d/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpdateDesiredRepositoryAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpdateDesiredRepositoryAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpdateDesiredRepositoryAction.java
index 10d6630..3c6c8e1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpdateDesiredRepositoryAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpdateDesiredRepositoryAction.java
@@ -34,7 +34,6 @@ import org.apache.ambari.server.orm.entities.HostVersionEntity;
 import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.serveraction.ServerAction;
 import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.RepositoryType;
 import org.apache.ambari.server.state.RepositoryVersionState;
 import org.apache.ambari.server.state.UpgradeContext;
@@ -59,13 +58,6 @@ public class UpdateDesiredRepositoryAction extends AbstractUpgradeServerAction {
    */
   private static final Logger LOG = LoggerFactory.getLogger(UpdateDesiredRepositoryAction.class);
 
-
-  /**
-   * The Cluster that this ServerAction implementation is executing on.
-   */
-  @Inject
-  private Clusters clusters;
-
   /**
    * The Ambari configuration.
    */
@@ -86,7 +78,7 @@ public class UpdateDesiredRepositoryAction extends AbstractUpgradeServerAction {
       throws AmbariException, InterruptedException {
 
     String clusterName = getExecutionCommand().getClusterName();
-    Cluster cluster = clusters.getCluster(clusterName);
+    Cluster cluster = getClusters().getCluster(clusterName);
     UpgradeContext upgradeContext = getUpgradeContext(cluster);
 
     Map<String, String> roleParams = getExecutionCommand().getRoleParams();

http://git-wip-us.apache.org/repos/asf/ambari/blob/1032bc5d/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptor.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptor.java
index 8640600..ef145e4 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptor.java
@@ -32,7 +32,6 @@ import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.orm.dao.ArtifactDAO;
 import org.apache.ambari.server.orm.entities.ArtifactEntity;
 import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.UpgradeContext;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
@@ -60,9 +59,6 @@ public class UpgradeUserKerberosDescriptor extends AbstractUpgradeServerAction {
   private ArtifactDAO artifactDAO;
 
   @Inject
-  private Clusters clusters;
-
-  @Inject
   private AmbariMetaInfo ambariMetaInfo;
 
   @Inject
@@ -81,7 +77,7 @@ public class UpgradeUserKerberosDescriptor extends AbstractUpgradeServerAction {
       throws AmbariException, InterruptedException {
     HostRoleCommand hostRoleCommand = getHostRoleCommand();
     String clusterName = hostRoleCommand.getExecutionCommandWrapper().getExecutionCommand().getClusterName();
-    Cluster cluster = clusters.getCluster(clusterName);
+    Cluster cluster = getClusters().getCluster(clusterName);
     List<String> messages = new ArrayList<>();
     List<String> errorMessages = new ArrayList<>();
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1032bc5d/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/YarnConfigCalculation.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/YarnConfigCalculation.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/YarnConfigCalculation.java
index d638858..c1a472b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/YarnConfigCalculation.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/YarnConfigCalculation.java
@@ -24,33 +24,26 @@ import java.util.concurrent.ConcurrentMap;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
-import org.apache.ambari.server.serveraction.AbstractServerAction;
 import org.apache.ambari.server.state.Cluster;
-import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 
-import com.google.inject.Inject;
-
 /**
  * Computes Yarn properties.  This class is only used when moving from
  * HDP-2.1 to HDP-2.3 in that upgrade pack.
  */
-public class YarnConfigCalculation extends AbstractServerAction {
+public class YarnConfigCalculation extends AbstractUpgradeServerAction {
   private static final String YARN_SITE_CONFIG_TYPE = "yarn-site";
 
   private static final String YARN_RM_ZK_ADDRESS_PROPERTY_NAME = "yarn.resourcemanager.zk-address";
   private static final String HADOOP_REGISTRY_ZK_QUORUM_PROPERTY_NAME = "hadoop.registry.zk.quorum";
 
-  @Inject
-  private Clusters clusters;
-
   @Override
   public CommandReport execute(ConcurrentMap<String, Object> requestSharedDataContext)
       throws AmbariException, InterruptedException {
 
     String clusterName = getExecutionCommand().getClusterName();
 
-    Cluster cluster = clusters.getCluster(clusterName);
+    Cluster cluster = getClusters().getCluster(clusterName);
 
     Config yarnSiteConfig = cluster.getDesiredConfigByType(YARN_SITE_CONFIG_TYPE);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1032bc5d/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/FixCapacitySchedulerOrderingPolicyTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/FixCapacitySchedulerOrderingPolicyTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/FixCapacitySchedulerOrderingPolicyTest.java
index daf1c30..d322658 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/FixCapacitySchedulerOrderingPolicyTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/FixCapacitySchedulerOrderingPolicyTest.java
@@ -62,7 +62,7 @@ public class FixCapacitySchedulerOrderingPolicyTest {
     injector = EasyMock.createMock(Injector.class);
     clusters = EasyMock.createMock(Clusters.class);
     cluster = EasyMock.createMock(Cluster.class);
-    clustersField = FixCapacitySchedulerOrderingPolicy.class.getDeclaredField("clusters");
+    clustersField = AbstractUpgradeServerAction.class.getDeclaredField("m_clusters");
     clustersField.setAccessible(true);
 
     expect(clusters.getCluster((String) anyObject())).andReturn(cluster).anyTimes();

http://git-wip-us.apache.org/repos/asf/ambari/blob/1032bc5d/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/FixOozieAdminUsersTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/FixOozieAdminUsersTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/FixOozieAdminUsersTest.java
index fade2dd..bc42a2e 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/FixOozieAdminUsersTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/FixOozieAdminUsersTest.java
@@ -78,7 +78,7 @@ public class FixOozieAdminUsersTest {
     expect(injector.getInstance(Clusters.class)).andReturn(clusters).atLeastOnce();
     replay(injector, clusters, falconEnvConfig, oozieEnvConfig);
 
-    clustersField = FixOozieAdminUsers.class.getDeclaredField("clusters");
+    clustersField = AbstractUpgradeServerAction.class.getDeclaredField("m_clusters");
     clustersField.setAccessible(true);
 
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/1032bc5d/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/FixYarnWebServiceUrlTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/FixYarnWebServiceUrlTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/FixYarnWebServiceUrlTest.java
index ffd55eb..17b23e2 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/FixYarnWebServiceUrlTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/FixYarnWebServiceUrlTest.java
@@ -61,7 +61,7 @@ public class FixYarnWebServiceUrlTest {
         injector = EasyMock.createMock(Injector.class);
         clusters = EasyMock.createMock(Clusters.class);
         cluster = EasyMock.createMock(Cluster.class);
-        clustersField = FixYarnWebServiceUrl.class.getDeclaredField("clusters");
+        clustersField = AbstractUpgradeServerAction.class.getDeclaredField("m_clusters");
         clustersField.setAccessible(true);
 
         expect(clusters.getCluster((String) anyObject())).andReturn(cluster).anyTimes();

http://git-wip-us.apache.org/repos/asf/ambari/blob/1032bc5d/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HBaseEnvMaxDirectMemorySizeActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HBaseEnvMaxDirectMemorySizeActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HBaseEnvMaxDirectMemorySizeActionTest.java
index d884d74..a5a6b15 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HBaseEnvMaxDirectMemorySizeActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HBaseEnvMaxDirectMemorySizeActionTest.java
@@ -138,7 +138,7 @@ public class HBaseEnvMaxDirectMemorySizeActionTest {
 
     replay(injector, clusters, cluster, hbaseEnv);
 
-    m_clusterField = HBaseEnvMaxDirectMemorySizeAction.class.getDeclaredField("clusters");
+    m_clusterField = AbstractUpgradeServerAction.class.getDeclaredField("m_clusters");
     m_clusterField.setAccessible(true);
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1032bc5d/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HiveEnvClasspathActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HiveEnvClasspathActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HiveEnvClasspathActionTest.java
index d179db0..1a5a6c8 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HiveEnvClasspathActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HiveEnvClasspathActionTest.java
@@ -119,7 +119,7 @@ public class HiveEnvClasspathActionTest {
 
     replay(m_injector, m_clusters, cluster, hiveEnv);
 
-    m_clusterField = HiveEnvClasspathAction.class.getDeclaredField("clusters");
+    m_clusterField = AbstractUpgradeServerAction.class.getDeclaredField("m_clusters");
     m_clusterField.setAccessible(true);
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1032bc5d/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HiveZKQuorumConfigActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HiveZKQuorumConfigActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HiveZKQuorumConfigActionTest.java
index 2f047fa..2157568 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HiveZKQuorumConfigActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/HiveZKQuorumConfigActionTest.java
@@ -64,7 +64,7 @@ public class HiveZKQuorumConfigActionTest {
     EasyMock.expect(m_clusters.getCluster(CLUSTER_NAME)).andReturn(m_cluster).atLeastOnce();
 
     // set the mock objects on the class under test
-    Field m_clusterField = HiveZKQuorumConfigAction.class.getDeclaredField("m_clusters");
+    Field m_clusterField = AbstractUpgradeServerAction.class.getDeclaredField("m_clusters");
     m_clusterField.setAccessible(true);
     m_clusterField.set(m_action, m_clusters);
     m_action.setExecutionCommand(m_executionCommand);

http://git-wip-us.apache.org/repos/asf/ambari/blob/1032bc5d/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/KerberosKeytabsActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/KerberosKeytabsActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/KerberosKeytabsActionTest.java
index 3678aa7..8d96b06 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/KerberosKeytabsActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/KerberosKeytabsActionTest.java
@@ -26,17 +26,26 @@ import static org.junit.Assert.assertNotNull;
 import java.util.HashMap;
 import java.util.Map;
 
+import javax.persistence.EntityManager;
+
 import org.apache.ambari.server.actionmanager.ExecutionCommandWrapper;
 import org.apache.ambari.server.actionmanager.HostRoleCommand;
 import org.apache.ambari.server.actionmanager.HostRoleStatus;
 import org.apache.ambari.server.agent.CommandReport;
 import org.apache.ambari.server.agent.ExecutionCommand;
 import org.apache.ambari.server.audit.AuditLogger;
+import org.apache.ambari.server.controller.AmbariManagementController;
 import org.apache.ambari.server.controller.KerberosHelper;
+import org.apache.ambari.server.orm.DBAccessor;
+import org.apache.ambari.server.orm.dao.StackDAO;
+import org.apache.ambari.server.stack.StackManagerFactory;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Config;
 import org.apache.ambari.server.state.SecurityType;
+import org.apache.ambari.server.state.UpgradeContextFactory;
+import org.apache.ambari.server.state.UpgradeHelper;
+import org.apache.ambari.server.state.stack.OsFamily;
 import org.apache.commons.lang.StringUtils;
 import org.easymock.EasyMock;
 import org.junit.Assert;
@@ -87,6 +96,14 @@ public class KerberosKeytabsActionTest {
         bind(Clusters.class).toInstance(m_clusters);
         bind(KerberosHelper.class).toInstance(m_kerberosHelper);
         bind(AuditLogger.class).toInstance(EasyMock.createNiceMock(AuditLogger.class));
+        bind(OsFamily.class).toInstance(EasyMock.createNiceMock(OsFamily.class));
+        bind(AmbariManagementController.class).toInstance(EasyMock.createNiceMock(AmbariManagementController.class));
+        bind(UpgradeHelper.class).toInstance(EasyMock.createNiceMock(UpgradeHelper.class));
+        bind(UpgradeContextFactory.class).toInstance(EasyMock.createNiceMock(UpgradeContextFactory.class));
+        bind(StackManagerFactory.class).toInstance(EasyMock.createNiceMock(StackManagerFactory.class));
+        bind(StackDAO.class).toInstance(EasyMock.createNiceMock(StackDAO.class));
+        bind(EntityManager.class).toInstance(EasyMock.createNiceMock(EntityManager.class));
+        bind(DBAccessor.class).toInstance(EasyMock.createNiceMock(DBAccessor.class));
       }
     });
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/1032bc5d/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculationTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculationTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculationTest.java
index 6527c9b..7e4095d 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculationTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerConfigCalculationTest.java
@@ -87,7 +87,7 @@ public class RangerConfigCalculationTest {
 
     replay(m_injector, m_clusters, cluster, adminConfig, adminSiteConfig, rangerEnv);
 
-    m_clusterField = RangerConfigCalculation.class.getDeclaredField("m_clusters");
+    m_clusterField = AbstractUpgradeServerAction.class.getDeclaredField("m_clusters");
     m_clusterField.setAccessible(true);
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1032bc5d/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculationTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculationTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculationTest.java
index d6a47f5..84c3587 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculationTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerKerberosConfigCalculationTest.java
@@ -122,7 +122,7 @@ public class RangerKerberosConfigCalculationTest {
     replay(m_injector, m_clusters, cluster, hadoopConfig, hiveConfig, yarnConfig, hbaseConfig,
         knoxConfig, stormConfig, kafkaConfig, kmsConfig, hdfsSiteConfig, adminSiteConfig);
 
-    m_clusterField = RangerKerberosConfigCalculation.class.getDeclaredField("m_clusters");
+    m_clusterField = AbstractUpgradeServerAction.class.getDeclaredField("m_clusters");
     m_clusterField.setAccessible(true);
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1032bc5d/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerKmsProxyConfigTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerKmsProxyConfigTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerKmsProxyConfigTest.java
index 2be8db4..34a877b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerKmsProxyConfigTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerKmsProxyConfigTest.java
@@ -76,7 +76,7 @@ public class RangerKmsProxyConfigTest {
 
     replay(m_injector, m_clusters, cluster, rangerEnv, kmsSite);
 
-    m_clusterField = RangerKmsProxyConfig.class.getDeclaredField("m_clusters");
+    m_clusterField = AbstractUpgradeServerAction.class.getDeclaredField("m_clusters");
     m_clusterField.setAccessible(true);
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1032bc5d/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerUsersyncConfigCalculationTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerUsersyncConfigCalculationTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerUsersyncConfigCalculationTest.java
index 427fb33..6a44b54 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerUsersyncConfigCalculationTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerUsersyncConfigCalculationTest.java
@@ -81,7 +81,7 @@ public class RangerUsersyncConfigCalculationTest {
 
     replay(m_injector, m_clusters, cluster, rangerUsersyncConfig, rangerEnvConfig);
 
-    m_clusterField = RangerUsersyncConfigCalculation.class.getDeclaredField("m_clusters");
+    m_clusterField = AbstractUpgradeServerAction.class.getDeclaredField("m_clusters");
     m_clusterField.setAccessible(true);
 
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/1032bc5d/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerWebAlertConfigActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerWebAlertConfigActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerWebAlertConfigActionTest.java
index 362f372..7a1831c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerWebAlertConfigActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/RangerWebAlertConfigActionTest.java
@@ -19,10 +19,12 @@
 package org.apache.ambari.server.serveraction.upgrades;
 
 
+import static org.easymock.EasyMock.anyObject;
 import static org.easymock.EasyMock.expect;
 import static org.easymock.EasyMock.replay;
 
 import java.io.File;
+import java.lang.reflect.Field;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Scanner;
@@ -47,6 +49,8 @@ import org.junit.runner.RunWith;
 import org.mockito.Mockito;
 import org.mockito.runners.MockitoJUnitRunner;
 
+import com.google.inject.Injector;
+
 import junit.framework.Assert;
 
 
@@ -64,6 +68,7 @@ public class RangerWebAlertConfigActionTest {
   private AlertDefinitionEntity alertDefinitionEntity;
   private AlertDefinitionHash alertDefinitionHash;
   private AmbariEventPublisher eventPublisher;
+  private Field clustersField;
 
 
 
@@ -75,6 +80,8 @@ public class RangerWebAlertConfigActionTest {
     eventPublisher = Mockito.mock(AmbariEventPublisher.class);
     m_clusters = Mockito.mock(Clusters.class);
     rangerWebAlertConfigAction = new RangerWebAlertConfigAction();
+    clustersField = AbstractUpgradeServerAction.class.getDeclaredField("m_clusters");
+    clustersField.setAccessible(true);
   }
 
   @Test
@@ -129,7 +136,7 @@ public class RangerWebAlertConfigActionTest {
     }
 
     rangerWebAlertConfigAction.alertDefinitionDAO = alertDefinitionDAO;
-    rangerWebAlertConfigAction.m_clusters = m_clusters;
+    clustersField.set(rangerWebAlertConfigAction, m_clusters);
     rangerWebAlertConfigAction.alertDefinitionHash = alertDefinitionHash;
     rangerWebAlertConfigAction.eventPublisher = eventPublisher;
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1032bc5d/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/SparkShufflePropertyConfigTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/SparkShufflePropertyConfigTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/SparkShufflePropertyConfigTest.java
index 518ab42..1ed95f8 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/SparkShufflePropertyConfigTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/SparkShufflePropertyConfigTest.java
@@ -72,7 +72,7 @@ public class SparkShufflePropertyConfigTest {
 
     replay(m_injector, m_clusters, yarnConfig);
 
-    clusterField = SparkShufflePropertyConfig.class.getDeclaredField("clusters");
+    clusterField = AbstractUpgradeServerAction.class.getDeclaredField("m_clusters");
     clusterField.setAccessible(true);
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1032bc5d/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptorTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptorTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptorTest.java
index 59a8a4c..3b388e9 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptorTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/UpgradeUserKerberosDescriptorTest.java
@@ -213,7 +213,7 @@ public class UpgradeUserKerberosDescriptorTest {
   }
 
   private void prepareFields() throws NoSuchFieldException {
-    String[] fieldsNames = { "artifactDAO", "clusters", "ambariMetaInfo",
+    String[] fieldsNames = { "artifactDAO", "m_clusters", "ambariMetaInfo",
         "kerberosDescriptorFactory", "m_upgradeContextFactory" };
 
     for (String fieldName : fieldsNames) {
@@ -230,7 +230,7 @@ public class UpgradeUserKerberosDescriptorTest {
   }
   private void injectFields(UpgradeUserKerberosDescriptor action) throws IllegalAccessException {
     fields.get("artifactDAO").set(action, artifactDAO);
-    fields.get("clusters").set(action, clusters);
+    fields.get("m_clusters").set(action, clusters);
     fields.get("ambariMetaInfo").set(action, ambariMetaInfo);
     fields.get("kerberosDescriptorFactory").set(action, kerberosDescriptorFactory);
     fields.get("m_upgradeContextFactory").set(action, upgradeContextFactory);


[19/50] [abbrv] ambari git commit: AMBARI-21208 - Upgrade PreUpgradeCheck NullPointerException (wang yaoxin via jonathanhurley)

Posted by jl...@apache.org.
AMBARI-21208 - Upgrade PreUpgradeCheck NullPointerException  (wang yaoxin via jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/8e7654a7
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/8e7654a7
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/8e7654a7

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 8e7654a7a338ed0151db2ee0b7827495f146e90b
Parents: b4966c1
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Wed Oct 4 11:23:57 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Wed Oct 4 11:23:57 2017 -0400

----------------------------------------------------------------------
 .../java/org/apache/ambari/server/state/stack/UpgradePack.java | 6 ++++++
 1 file changed, 6 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/8e7654a7/ambari-server/src/main/java/org/apache/ambari/server/state/stack/UpgradePack.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/UpgradePack.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/UpgradePack.java
index 8662958..256b71d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/UpgradePack.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/UpgradePack.java
@@ -148,6 +148,9 @@ public class UpgradePack {
    * @return the preCheck name, e.g. "CheckDescription"
    */
   public List<String> getPrerequisiteChecks() {
+    if (prerequisiteChecks == null) {
+      return new ArrayList<String>();
+    }    
     return new ArrayList<>(prerequisiteChecks.checks);
   }
 
@@ -156,6 +159,9 @@ public class UpgradePack {
    * @return the prerequisite check configuration
    */
   public PrerequisiteCheckConfig getPrerequisiteCheckConfig() {
+    if (prerequisiteChecks == null) {
+      return new PrerequisiteCheckConfig();
+    }    
     return prerequisiteChecks.configuration;
   }
 


[17/50] [abbrv] ambari git commit: AMBARI-22129 Log Search UI: reorganize classes structure. (ababiichuk)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/components/filter-button/filter-button.component.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/components/filter-button/filter-button.component.ts b/ambari-logsearch/ambari-logsearch-web/src/app/components/filter-button/filter-button.component.ts
index 1481583..2c8ecd7 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/components/filter-button/filter-button.component.ts
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/components/filter-button/filter-button.component.ts
@@ -18,7 +18,7 @@
 
 import {Component, Input, forwardRef} from '@angular/core';
 import {ControlValueAccessor, NG_VALUE_ACCESSOR} from '@angular/forms';
-import {ListItem} from "@app/classes/list-item.class";
+import {ListItem} from "@app/classes/list-item";
 import {ComponentActionsService} from '@app/services/component-actions.service';
 import {UtilsService} from '@app/services/utils.service';
 import {MenuButtonComponent} from '@app/components/menu-button/menu-button.component';

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/components/filters-panel/filters-panel.component.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/components/filters-panel/filters-panel.component.ts b/ambari-logsearch/ambari-logsearch-web/src/app/components/filters-panel/filters-panel.component.ts
index c345c81..5eef03e 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/components/filters-panel/filters-panel.component.ts
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/components/filters-panel/filters-panel.component.ts
@@ -20,8 +20,8 @@ import {Component} from '@angular/core';
 import {FormGroup} from '@angular/forms';
 import {Subject} from 'rxjs/Subject';
 import {TranslateService} from '@ngx-translate/core';
-import {ListItem} from '@app/classes/list-item.class';
-import {CommonEntry} from '@app/models/common-entry.model';
+import {ListItem} from '@app/classes/list-item';
+import {CommonEntry} from '@app/classes/models/common-entry';
 import {FilteringService} from '@app/services/filtering.service';
 import {LogsContainerService} from '@app/services/logs-container.service';
 import {AppStateService} from '@app/services/storage/app-state.service';

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/components/log-context/log-context.component.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/components/log-context/log-context.component.ts b/ambari-logsearch/ambari-logsearch-web/src/app/components/log-context/log-context.component.ts
index 467de98..c0411e5 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/components/log-context/log-context.component.ts
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/components/log-context/log-context.component.ts
@@ -21,8 +21,8 @@ import 'rxjs/add/operator/map';
 import {LogsContainerService} from '@app/services/logs-container.service';
 import {ServiceLogsTruncatedService} from '@app/services/storage/service-logs-truncated.service';
 import {AppStateService} from '@app/services/storage/app-state.service';
-import {ServiceLog} from '@app/models/service-log.model';
-import {ServiceLogContextEntry} from '@app/classes/service-log-context-entry.class';
+import {ServiceLog} from '@app/classes/models/service-log';
+import {ServiceLogContextEntry} from '@app/classes/service-log-context-entry';
 
 @Component({
   selector: 'log-context',

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/components/logs-container/logs-container.component.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/components/logs-container/logs-container.component.ts b/ambari-logsearch/ambari-logsearch-web/src/app/components/logs-container/logs-container.component.ts
index 7345288..63fafb6 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/components/logs-container/logs-container.component.ts
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/components/logs-container/logs-container.component.ts
@@ -24,11 +24,11 @@ import {FilteringService} from '@app/services/filtering.service';
 import {LogsContainerService} from '@app/services/logs-container.service';
 import {ServiceLogsHistogramDataService} from '@app/services/storage/service-logs-histogram-data.service';
 import {AppStateService} from '@app/services/storage/app-state.service';
-import {AuditLog} from '@app/models/audit-log.model';
-import {ServiceLog} from '@app/models/service-log.model';
-import {LogField} from '@app/models/log-field.model';
-import {ActiveServiceLogEntry} from '@app/classes/active-service-log-entry.class';
-import {HistogramOptions} from '@app/classes/histogram-options.class';
+import {AuditLog} from '@app/classes/models/audit-log';
+import {ServiceLog} from '@app/classes/models/service-log';
+import {LogField} from '@app/classes/models/log-field';
+import {ActiveServiceLogEntry} from '@app/classes/active-service-log-entry';
+import {HistogramOptions} from '@app/classes/histogram-options';
 
 @Component({
   selector: 'logs-container',

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/components/logs-list/logs-list.component.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/components/logs-list/logs-list.component.ts b/ambari-logsearch/ambari-logsearch-web/src/app/components/logs-list/logs-list.component.ts
index c94b967..2462a61 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/components/logs-list/logs-list.component.ts
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/components/logs-list/logs-list.component.ts
@@ -21,8 +21,8 @@ import 'rxjs/add/operator/map';
 import {AppStateService} from '@app/services/storage/app-state.service';
 import {FilteringService} from '@app/services/filtering.service';
 import {UtilsService} from '@app/services/utils.service';
-import {AuditLog} from '@app/models/audit-log.model';
-import {ServiceLog} from '@app/models/service-log.model';
+import {AuditLog} from '@app/classes/models/audit-log';
+import {ServiceLog} from '@app/classes/models/service-log';
 
 @Component({
   selector: 'logs-list',

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/components/main-container/main-container.component.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/components/main-container/main-container.component.ts b/ambari-logsearch/ambari-logsearch-web/src/app/components/main-container/main-container.component.ts
index 32fe1cf..ad86a74 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/components/main-container/main-container.component.ts
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/components/main-container/main-container.component.ts
@@ -21,9 +21,9 @@ import {HttpClientService} from '@app/services/http-client.service';
 import {AppStateService} from '@app/services/storage/app-state.service';
 import {AuditLogsFieldsService} from '@app/services/storage/audit-logs-fields.service';
 import {ServiceLogsFieldsService} from '@app/services/storage/service-logs-fields.service';
-import {AuditLogField} from '@app/models/audit-log-field.model';
-import {ServiceLogField} from '@app/models/service-log-field.model';
-import {ActiveServiceLogEntry} from '@app/classes/active-service-log-entry.class';
+import {AuditLogField} from '@app/classes/models/audit-log-field';
+import {ServiceLogField} from '@app/classes/models/service-log-field';
+import {ActiveServiceLogEntry} from '@app/classes/active-service-log-entry';
 
 @Component({
   selector: 'main-container',

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/components/menu-button/menu-button.component.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/components/menu-button/menu-button.component.ts b/ambari-logsearch/ambari-logsearch-web/src/app/components/menu-button/menu-button.component.ts
index 7e347e6..3bac984 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/components/menu-button/menu-button.component.ts
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/components/menu-button/menu-button.component.ts
@@ -17,7 +17,7 @@
  */
 
 import {Component, Input, ViewChild, ElementRef} from '@angular/core';
-import {ListItem} from '@app/classes/list-item.class';
+import {ListItem} from '@app/classes/list-item';
 import {ComponentActionsService} from '@app/services/component-actions.service';
 import * as $ from 'jquery';
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/components/search-box/search-box.component.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/components/search-box/search-box.component.ts b/ambari-logsearch/ambari-logsearch-web/src/app/components/search-box/search-box.component.ts
index e547a62..5520310 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/components/search-box/search-box.component.ts
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/components/search-box/search-box.component.ts
@@ -19,7 +19,7 @@
 import {Component, OnInit, OnDestroy, Input, ViewChild, ElementRef, forwardRef} from '@angular/core';
 import {ControlValueAccessor, NG_VALUE_ACCESSOR} from '@angular/forms';
 import {Subject} from 'rxjs/Subject';
-import {CommonEntry} from '@app/models/common-entry.model';
+import {CommonEntry} from '@app/classes/models/common-entry';
 import {UtilsService} from '@app/services/utils.service';
 
 @Component({

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/components/time-histogram/time-histogram.component.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/components/time-histogram/time-histogram.component.ts b/ambari-logsearch/ambari-logsearch-web/src/app/components/time-histogram/time-histogram.component.ts
index c3ec388..e255166 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/components/time-histogram/time-histogram.component.ts
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/components/time-histogram/time-histogram.component.ts
@@ -21,7 +21,7 @@ import {ContainerElement, Selection} from 'd3';
 import * as d3 from 'd3';
 import * as moment from 'moment-timezone';
 import {AppSettingsService} from '@app/services/storage/app-settings.service';
-import {HistogramStyleOptions, HistogramOptions} from '@app/classes/histogram-options.class';
+import {HistogramStyleOptions, HistogramOptions} from '@app/classes/histogram-options';
 
 @Component({
   selector: 'time-histogram',

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/models/app-settings.model.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/models/app-settings.model.ts b/ambari-logsearch/ambari-logsearch-web/src/app/models/app-settings.model.ts
deleted file mode 100644
index 11821a3..0000000
--- a/ambari-logsearch/ambari-logsearch-web/src/app/models/app-settings.model.ts
+++ /dev/null
@@ -1,27 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import * as moment from 'moment-timezone';
-
-export interface AppSettings {
-  timeZone: string;
-}
-
-export const defaultSettings: AppSettings = {
-  timeZone: moment.tz.guess()
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/models/app-state.model.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/models/app-state.model.ts b/ambari-logsearch/ambari-logsearch-web/src/app/models/app-state.model.ts
deleted file mode 100644
index 267bf15..0000000
--- a/ambari-logsearch/ambari-logsearch-web/src/app/models/app-state.model.ts
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import {ActiveServiceLogEntry} from '@app/classes/active-service-log-entry.class';
-
-export interface AppState {
-  isAuthorized: boolean;
-  isInitialLoading: boolean;
-  isLoginInProgress: boolean;
-  isAuditLogsSet: boolean;
-  isServiceLogsSet: boolean;
-  activeLogsType?: string;
-  isServiceLogsFileView: boolean;
-  isServiceLogContextView: boolean;
-  activeLog: ActiveServiceLogEntry | null;
-}
-
-export const initialState: AppState = {
-  isAuthorized: false,
-  isInitialLoading: false,
-  isLoginInProgress: false,
-  isAuditLogsSet: false,
-  isServiceLogsSet: false,
-  activeLogsType: 'serviceLogs', // TODO implement setting the parameter depending on user's navigation
-  isServiceLogsFileView: false,
-  isServiceLogContextView: false,
-  activeLog: null
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/models/audit-log-field.model.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/models/audit-log-field.model.ts b/ambari-logsearch/ambari-logsearch-web/src/app/models/audit-log-field.model.ts
deleted file mode 100644
index 96372a1..0000000
--- a/ambari-logsearch/ambari-logsearch-web/src/app/models/audit-log-field.model.ts
+++ /dev/null
@@ -1,225 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import {LogField} from '@app/models/log-field.model';
-
-const columnsNamesMap = {
-  access: {
-    displayName: 'logs.accessType',
-    isDisplayed: true
-  },
-  action: {
-    displayName: 'logs.action'
-  },
-  agent: {
-    displayName: 'logs.agent'
-  },
-  agentHost: {
-    displayName: 'logs.agentHost'
-  },
-  authType: {
-    displayName: 'logs.authType'
-  },
-  bundle_id: {
-    displayName: 'logs.bundleId'
-  },
-  case_id: {
-    displayName: 'logs.caseId'
-  },
-  cliIP: {
-    displayName: 'logs.clientIp',
-    isDisplayed: true
-  },
-  cliType: {
-    displayName: 'logs.clientType'
-  },
-  cluster: {
-    displayName: 'logs.cluster'
-  },
-  dst: {
-    displayName: 'logs.dst'
-  },
-  evtTime: {
-    displayName: 'logs.eventTime',
-    isDisplayed: true
-  },
-  file: {
-    displayName: 'logs.file'
-  },
-  host: {
-    displayName: 'logs.host'
-  },
-  id: {
-    displayName: 'logs.id'
-  },
-  ip: {
-    displayName: 'logs.ip'
-  },
-  level: {
-    displayName: 'logs.level'
-  },
-  log_message: {
-    displayName: 'logs.message'
-  },
-  logType: {
-    displayName: 'logs.logType'
-  },
-  logfile_line_number: {
-    displayName: 'logs.logfileLineNumber'
-  },
-  logger_name: {
-    displayName: 'logs.loggerName'
-  },
-  logtime: {
-    displayName: 'logs.logTime'
-  },
-  path: {
-    displayName: 'logs.path'
-  },
-  perm: {
-    displayName: 'logs.perm'
-  },
-  policy: {
-    displayName: 'logs.policy'
-  },
-  proxyUsers: {
-    displayName: 'logs.proxyUsers'
-  },
-  reason: {
-    displayName: 'logs.reason'
-  },
-  repo: {
-    displayName: 'logs.repo',
-    isDisplayed: true
-  },
-  repoType: {
-    displayName: 'logs.repoType'
-  },
-  req_caller_id: {
-    displayName: 'logs.reqCallerId'
-  },
-  reqContext: {
-    displayName: 'logs.reqContext'
-  },
-  reqData: {
-    displayName: 'logs.reqData'
-  },
-  req_self_id: {
-    displayName: 'logs.reqSelfId'
-  },
-  resType: {
-    displayName: 'logs.resType'
-  },
-  resource: {
-    displayName: 'logs.resource',
-    isDisplayed: true
-  },
-  result: {
-    displayName: 'logs.result',
-    isDisplayed: true
-  },
-  sess: {
-    displayName: 'logs.session'
-  },
-  text: {
-    displayName: 'logs.text'
-  },
-  type: {
-    displayName: 'logs.type'
-  },
-  ugi: {
-    displayName: 'logs.ugi'
-  },
-  reqUser: {
-    displayName: 'logs.user',
-    isDisplayed: true
-  },
-  ws_base_url: {
-    displayName: 'logs.baseUrl'
-  },
-  ws_command: {
-    displayName: 'logs.command'
-  },
-  ws_component: {
-    displayName: 'logs.component'
-  },
-  ws_details: {
-    displayName: 'logs.details'
-  },
-  ws_display_name: {
-    displayName: 'logs.displayName'
-  },
-  ws_os: {
-    displayName: 'logs.os'
-  },
-  ws_repo_id: {
-    displayName: 'logs.repoId'
-  },
-  ws_repo_version: {
-    displayName: 'logs.repoVersion'
-  },
-  ws_repositories: {
-    displayName: 'logs.repositories'
-  },
-  ws_request_id: {
-    displayName: 'logs.requestId'
-  },
-  ws_result_status: {
-    displayName: 'logs.resultStatus'
-  },
-  ws_roles: {
-    displayName: 'logs.roles'
-  },
-  ws_stack_version: {
-    displayName: 'logs.stackVersion'
-  },
-  ws_stack: {
-    displayName: 'logs.stack'
-  },
-  ws_status: {
-    displayName: 'logs.status'
-  },
-  ws_task_id: {
-    displayName: 'logs.taskId'
-  },
-  ws_version_note: {
-    displayName: 'logs.versionNote'
-  },
-  ws_version_number: {
-    displayName: 'logs.versionNumber'
-  },
-  tags: {
-    isAvailable: false
-  },
-  tags_str: {
-    isAvailable: false
-  },
-  seq_num: {
-    isAvailable: false
-  }
-};
-
-export class AuditLogField extends LogField {
-  constructor(name: string) {
-    super(name);
-    const preset = columnsNamesMap[this.name];
-    if (preset) {
-      Object.assign(this, preset);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/models/audit-log.model.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/models/audit-log.model.ts b/ambari-logsearch/ambari-logsearch-web/src/app/models/audit-log.model.ts
deleted file mode 100644
index 2b34cd6..0000000
--- a/ambari-logsearch/ambari-logsearch-web/src/app/models/audit-log.model.ts
+++ /dev/null
@@ -1,46 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import {Log} from '@app/models/log.model';
-
-export interface AuditLog extends Log {
-  policy?: string;
-  reason?: string;
-  result: number;
-  text?: string;
-  tags?: string[];
-  resource?: string;
-  sess?: string;
-  access?: string;
-  logType: string;
-  tags_str?: string;
-  resType?: string;
-  reqUser: string;
-  reqData?: string;
-  repoType: number;
-  repo: string;
-  proxyUsers?: string[];
-  evtTime: string;
-  enforcer: string;
-  reqContext?: string;
-  cliType?: string;
-  cliIP?: string;
-  agent?: string;
-  agentHost?: string;
-  action?: string;
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/models/bar-graph.model.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/models/bar-graph.model.ts b/ambari-logsearch/ambari-logsearch-web/src/app/models/bar-graph.model.ts
deleted file mode 100644
index 6c9a049..0000000
--- a/ambari-logsearch/ambari-logsearch-web/src/app/models/bar-graph.model.ts
+++ /dev/null
@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import {CommonEntry} from '@app/models/common-entry.model';
-
-export interface BarGraph {
-  dataCount: CommonEntry[];
-  name: string;
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/models/common-entry.model.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/models/common-entry.model.ts b/ambari-logsearch/ambari-logsearch-web/src/app/models/common-entry.model.ts
deleted file mode 100644
index dad82ab..0000000
--- a/ambari-logsearch/ambari-logsearch-web/src/app/models/common-entry.model.ts
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-export interface CommonEntry {
-  name: string;
-  value: string;
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/models/count.model.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/models/count.model.ts b/ambari-logsearch/ambari-logsearch-web/src/app/models/count.model.ts
deleted file mode 100644
index 02fc41c..0000000
--- a/ambari-logsearch/ambari-logsearch-web/src/app/models/count.model.ts
+++ /dev/null
@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-export interface Count {
-  name: string;
-  count: number;
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/models/filter.model.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/models/filter.model.ts b/ambari-logsearch/ambari-logsearch-web/src/app/models/filter.model.ts
deleted file mode 100644
index c7ff662..0000000
--- a/ambari-logsearch/ambari-logsearch-web/src/app/models/filter.model.ts
+++ /dev/null
@@ -1,25 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-export interface Filter {
-  label: string;
-  hosts: string[];
-  defaultLevels: string[];
-  overrideLevels: string[];
-  expiryTime: string;
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/models/graph.model.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/models/graph.model.ts b/ambari-logsearch/ambari-logsearch-web/src/app/models/graph.model.ts
deleted file mode 100644
index be31f19..0000000
--- a/ambari-logsearch/ambari-logsearch-web/src/app/models/graph.model.ts
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-export interface Graph {
-  name: string;
-  count: string;
-  dataList?: Graph[];
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/models/log-field.model.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/models/log-field.model.ts b/ambari-logsearch/ambari-logsearch-web/src/app/models/log-field.model.ts
deleted file mode 100644
index 0e738ab..0000000
--- a/ambari-logsearch/ambari-logsearch-web/src/app/models/log-field.model.ts
+++ /dev/null
@@ -1,27 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-export class LogField {
-  constructor(name: string) {
-    this.name = name;
-  }
-  name: string;
-  displayName: string = this.name;
-  isDisplayed: boolean = false;
-  isAvailable: boolean = true;
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/models/log.model.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/models/log.model.ts b/ambari-logsearch/ambari-logsearch-web/src/app/models/log.model.ts
deleted file mode 100644
index c598e41..0000000
--- a/ambari-logsearch/ambari-logsearch-web/src/app/models/log.model.ts
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-export interface Log {
-  type: string;
-  _version_: number;
-  id: string;
-  file?: string;
-  seq_num: number;
-  bundle_id?: string;
-  case_id?: string;
-  log_message: string;
-  logfile_line_number: number;
-  line_number?: number;
-  message_md5: string;
-  cluster: string;
-  event_count: number;
-  event_md5: string;
-  event_dur_ms: number;
-  _ttl_: string;
-  _expire_at_: number;
-  _router_field_?: number;
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/models/node.model.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/models/node.model.ts b/ambari-logsearch/ambari-logsearch-web/src/app/models/node.model.ts
deleted file mode 100644
index b01421e..0000000
--- a/ambari-logsearch/ambari-logsearch-web/src/app/models/node.model.ts
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import {CommonEntry} from '@app/models/common-entry.model';
-
-export interface Node {
-  name: string;
-  type?: string;
-  value: string;
-  isParent: boolean;
-  isRoot: boolean;
-  childs?: Node[];
-  logLevelCount?: CommonEntry[];
-  vNodeList?: CommonEntry[];
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/models/service-log-field.model.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/models/service-log-field.model.ts b/ambari-logsearch/ambari-logsearch-web/src/app/models/service-log-field.model.ts
deleted file mode 100644
index 081eecf..0000000
--- a/ambari-logsearch/ambari-logsearch-web/src/app/models/service-log-field.model.ts
+++ /dev/null
@@ -1,107 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import {LogField} from '@app/models/log-field.model';
-
-const columnsNamesMap = {
-  log_message: {
-    displayName: 'logs.message',
-    isDisplayed: true
-  },
-  bundle_id: {
-    displayName: 'logs.bundleId'
-  },
-  case_id: {
-    displayName: 'logs.caseId'
-  },
-  cluster: {
-    displayName: 'logs.cluster'
-  },
-  event_count: {
-    displayName: 'logs.eventCount'
-  },
-  file: {
-    displayName: 'logs.file'
-  },
-  host: {
-    displayName: 'logs.host'
-  },
-  id: {
-    displayName: 'logs.id'
-  },
-  ip: {
-    displayName: 'logs.ip'
-  },
-  level: {
-    displayName: 'logs.level',
-    isDisplayed: true
-  },
-  line_number: {
-    displayName: 'logs.lineNumber'
-  },
-  logtype: {
-    displayName: 'logs.logType'
-  },
-  logfile_line_number: {
-    displayName: 'logs.logfileLineNumber'
-  },
-  logger_name: {
-    displayName: 'logs.loggerName'
-  },
-  logtime: {
-    isDisplayed: true
-  },
-  method: {
-    displayName: 'logs.method'
-  },
-  path: {
-    displayName: 'logs.path'
-  },
-  rowtype: {
-    displayName: 'logs.rowType'
-  },
-  thread_name: {
-    displayName: 'logs.threadName'
-  },
-  type: {
-    displayName: 'logs.type',
-    isDisplayed: true
-  },
-  tags: {
-    isAvailable: false
-  },
-  text: {
-    isAvailable: false
-  },
-  message: {
-    isAvailable: false
-  },
-  seq_num: {
-    isAvailable: false
-  }
-};
-
-export class ServiceLogField extends LogField {
-  constructor(name: string) {
-    super(name);
-    const preset = columnsNamesMap[this.name];
-    if (preset) {
-      Object.assign(this, preset);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/models/service-log.model.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/models/service-log.model.ts b/ambari-logsearch/ambari-logsearch-web/src/app/models/service-log.model.ts
deleted file mode 100644
index ee27343..0000000
--- a/ambari-logsearch/ambari-logsearch-web/src/app/models/service-log.model.ts
+++ /dev/null
@@ -1,27 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import {Log} from '@app/models/log.model';
-
-export interface ServiceLog extends Log {
-  path: string;
-  host: string;
-  level: string;
-  logtime: number;
-  ip: string;
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/models/solr-collection-state.model.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/models/solr-collection-state.model.ts b/ambari-logsearch/ambari-logsearch-web/src/app/models/solr-collection-state.model.ts
deleted file mode 100644
index 0824dda..0000000
--- a/ambari-logsearch/ambari-logsearch-web/src/app/models/solr-collection-state.model.ts
+++ /dev/null
@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-export interface SolrCollectionState {
-  znodeReady: boolean;
-  configurationUploaded: boolean;
-  solrCollectionReady: boolean;
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/models/store.model.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/models/store.model.ts b/ambari-logsearch/ambari-logsearch-web/src/app/models/store.model.ts
deleted file mode 100644
index 518e7cd..0000000
--- a/ambari-logsearch/ambari-logsearch-web/src/app/models/store.model.ts
+++ /dev/null
@@ -1,180 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import {Observable} from 'rxjs/Observable';
-import {Store, Action} from '@ngrx/store';
-import {AppSettings} from '@app/models/app-settings.model';
-import {AppState} from '@app/models/app-state.model';
-import {AuditLog} from '@app/models/audit-log.model';
-import {ServiceLog} from '@app/models/service-log.model';
-import {BarGraph} from '@app/models/bar-graph.model';
-import {Graph} from '@app/models/graph.model';
-import {Node} from '@app/models/node.model';
-import {UserConfig} from '@app/models/user-config.model';
-import {Filter} from '@app/models/filter.model';
-import {AuditLogField} from '@app/models/audit-log-field.model';
-import {ServiceLogField} from '@app/models/service-log-field.model';
-
-export const storeActions = {
-  'ARRAY.ADD': 'ADD',
-  'ARRAY.ADD.START': 'ADD_TO_START',
-  'ARRAY.DELETE.PRIMITIVE': 'DELETE_PRIMITIVE',
-  'ARRAY.DELETE.OBJECT': 'DELETE_OBJECT',
-  'ARRAY.CLEAR': 'CLEAR',
-  'ARRAY.MAP': 'MAP',
-
-  'OBJECT.SET': 'SET'
-};
-
-export interface AppStore {
-  appSettings: AppSettings;
-  appState: AppState;
-  auditLogs: AuditLog[];
-  serviceLogs: ServiceLog[];
-  serviceLogsHistogramData: BarGraph[];
-  serviceLogsTruncated: ServiceLog[];
-  graphs: Graph[];
-  hosts: Node[];
-  userConfigs: UserConfig[];
-  filters: Filter[];
-  clusters: string[];
-  components: Node[];
-  serviceLogsFields: ServiceLogField[];
-  auditLogsFields: AuditLogField[];
-}
-
-export class ModelService {
-
-  constructor(modelName: string, store: Store<AppStore>) {
-    this.modelName = modelName;
-    this.store = store;
-  }
-
-  protected modelName: string;
-
-  protected store: Store<AppStore>;
-
-  getAll(): Observable<any> {
-    return this.store.select(this.modelName);
-  }
-
-}
-
-export class CollectionModelService extends ModelService {
-
-  addInstance(instance: any): void {
-    this.addInstances([instance]);
-  }
-
-  addInstances(instances: any[]): void {
-    this.store.dispatch({
-      type: `${storeActions['ARRAY.ADD']}_${this.modelName}`,
-      payload: instances
-    });
-  }
-
-  addInstancesToStart(instances: any[]): void {
-    this.store.dispatch({
-      type: `${storeActions['ARRAY.ADD.START']}_${this.modelName}`,
-      payload: instances
-    });
-  }
-
-  deleteObjectInstance(instance: any): void {
-    this.store.dispatch({
-      type: `${storeActions['ARRAY.DELETE.OBJECT']}_${this.modelName}`,
-      payload: instance
-    });
-  }
-
-  deletePrimitiveInstance(instance: any): void {
-    this.store.dispatch({
-      type: `${storeActions['ARRAY.DELETE.PRIMITIVE']}_${this.modelName}`,
-      payload: instance
-    });
-  }
-
-  clear(): void {
-    this.store.dispatch({
-      type: `${storeActions['ARRAY.CLEAR']}_${this.modelName}`
-    });
-  }
-
-  mapCollection(modifier: (item: any) => {}): void {
-    this.store.dispatch({
-      type: `${storeActions['ARRAY.MAP']}_${this.modelName}`,
-      payload: {
-        modifier: modifier
-      }
-    });
-  }
-
-}
-
-export class ObjectModelService extends ModelService {
-
-  getParameter(key: string): Observable<any> {
-    return this.store.select(this.modelName, key);
-  }
-
-  setParameter(key: string, value: any): void {
-    let payload = {};
-    payload[key] = value;
-    this.setParameters(payload);
-  }
-
-  setParameters(params: any): void {
-    this.store.dispatch({
-      type: `${storeActions['OBJECT.SET']}_${this.modelName}`,
-      payload: params
-    });
-  }
-
-}
-
-export function getCollectionReducer(modelName: string, defaultState: any = []): any {
-  return (state: any = defaultState, action: Action) => {
-    switch (action.type) {
-      case `${storeActions['ARRAY.ADD']}_${modelName}`:
-        return [...state, ...action.payload];
-      case `${storeActions['ARRAY.ADD.START']}_${modelName}`:
-        return [...action.payload, ...state];
-      case `${storeActions['ARRAY.DELETE.OBJECT']}_${modelName}`:
-        return state.filter(instance => instance.id !== action.payload.id);
-      case `${storeActions['ARRAY.DELETE.PRIMITIVE']}_${modelName}`:
-        return state.filter(item => item !== action.payload);
-      case `${storeActions['ARRAY.CLEAR']}_${modelName}`:
-        return [];
-      case `${storeActions['ARRAY.MAP']}_${modelName}`:
-        return state.map(action.payload.modifier);
-      default:
-        return state;
-    }
-  };
-}
-
-export function getObjectReducer(modelName: string, defaultState: any = {}) {
-  return (state: any = defaultState, action: Action): any => {
-    switch (action.type) {
-      case `${storeActions['OBJECT.SET']}_${modelName}`:
-        return Object.assign({}, state, action.payload);
-      default:
-        return state;
-    }
-  };
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/models/user-config.model.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/models/user-config.model.ts b/ambari-logsearch/ambari-logsearch-web/src/app/models/user-config.model.ts
deleted file mode 100644
index f52761c..0000000
--- a/ambari-logsearch/ambari-logsearch-web/src/app/models/user-config.model.ts
+++ /dev/null
@@ -1,26 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-export interface UserConfig {
-  id: string;
-  userName: string;
-  filtername: string;
-  values: string;
-  shareNameList: string[];
-  rowType: string;
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/services/component-actions.service.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/services/component-actions.service.ts b/ambari-logsearch/ambari-logsearch-web/src/app/services/component-actions.service.ts
index b3ff0b0..19b873c 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/services/component-actions.service.ts
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/services/component-actions.service.ts
@@ -19,10 +19,10 @@
 import {Injectable} from '@angular/core';
 import {AppSettingsService} from '@app/services/storage/app-settings.service';
 import {AppStateService} from '@app/services/storage/app-state.service';
-import {CollectionModelService} from '@app/models/store.model';
+import {CollectionModelService} from '@app/classes/models/store';
 import {FilteringService} from '@app/services/filtering.service';
 import {LogsContainerService} from '@app/services/logs-container.service';
-import {ServiceLog} from '@app/models/service-log.model';
+import {ServiceLog} from '@app/classes/models/service-log';
 
 @Injectable()
 export class ComponentActionsService {

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/services/filtering.service.spec.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/services/filtering.service.spec.ts b/ambari-logsearch/ambari-logsearch-web/src/app/services/filtering.service.spec.ts
index c4db041..5d79902 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/services/filtering.service.spec.ts
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/services/filtering.service.spec.ts
@@ -24,8 +24,8 @@ import {ComponentsService, components} from '@app/services/storage/components.se
 import {HostsService, hosts} from '@app/services/storage/hosts.service';
 import {UtilsService} from '@app/services/utils.service';
 import {HttpClientService} from '@app/services/http-client.service';
-import {ListItem} from '@app/classes/list-item.class';
-import {Node} from '@app/models/node.model';
+import {ListItem} from '@app/classes/list-item';
+import {Node} from '@app/classes/models/node';
 
 import {FilteringService} from './filtering.service';
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/services/filtering.service.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/services/filtering.service.ts b/ambari-logsearch/ambari-logsearch-web/src/app/services/filtering.service.ts
index 7fe6517..9e3a7d2 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/services/filtering.service.ts
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/services/filtering.service.ts
@@ -24,8 +24,8 @@ import {Observable} from 'rxjs/Observable';
 import 'rxjs/add/observable/timer';
 import 'rxjs/add/operator/takeUntil';
 import * as moment from 'moment-timezone';
-import {ListItem} from '@app/classes/list-item.class';
-import {Node} from '@app/models/node.model';
+import {ListItem} from '@app/classes/list-item';
+import {Node} from '@app/classes/models/node';
 import {AppSettingsService} from '@app/services/storage/app-settings.service';
 import {ClustersService} from '@app/services/storage/clusters.service';
 import {ComponentsService} from '@app/services/storage/components.service';

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/services/http-client.service.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/services/http-client.service.ts b/ambari-logsearch/ambari-logsearch-web/src/app/services/http-client.service.ts
index 495f706..9b61bf6 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/services/http-client.service.ts
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/services/http-client.service.ts
@@ -20,10 +20,10 @@ import {Injectable} from '@angular/core';
 import {Observable} from 'rxjs/Observable';
 import 'rxjs/add/operator/first';
 import {Http, XHRBackend, Request, RequestOptions, RequestOptionsArgs, Response, Headers, URLSearchParams} from '@angular/http';
-import {AuditLogsQueryParams} from '@app/classes/queries/audit-logs-query-params.class';
-import {ServiceLogsQueryParams} from '@app/classes/queries/service-logs-query-params.class';
-import {ServiceLogsHistogramQueryParams} from '@app/classes/queries/service-logs-histogram-query-params.class';
-import {ServiceLogsTruncatedQueryParams} from '@app/classes/queries/service-logs-truncated-query-params.class';
+import {AuditLogsQueryParams} from '@app/classes/queries/audit-logs-query-params';
+import {ServiceLogsQueryParams} from '@app/classes/queries/service-logs-query-params';
+import {ServiceLogsHistogramQueryParams} from '@app/classes/queries/service-logs-histogram-query-params';
+import {ServiceLogsTruncatedQueryParams} from '@app/classes/queries/service-logs-truncated-query-params';
 import {AppStateService} from '@app/services/storage/app-state.service';
 
 @Injectable()

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/services/logs-container.service.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/services/logs-container.service.ts b/ambari-logsearch/ambari-logsearch-web/src/app/services/logs-container.service.ts
index 14e9ad4..0319262 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/services/logs-container.service.ts
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/services/logs-container.service.ts
@@ -26,7 +26,7 @@ import {ServiceLogsFieldsService} from '@app/services/storage/service-logs-field
 import {ServiceLogsHistogramDataService} from '@app/services/storage/service-logs-histogram-data.service';
 import {ServiceLogsTruncatedService} from '@app/services/storage/service-logs-truncated.service';
 import {AppStateService} from '@app/services/storage/app-state.service';
-import {ActiveServiceLogEntry} from '@app/classes/active-service-log-entry.class';
+import {ActiveServiceLogEntry} from '@app/classes/active-service-log-entry';
 
 @Injectable()
 export class LogsContainerService {

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/app-settings.service.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/app-settings.service.ts b/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/app-settings.service.ts
index 6de9988..cec2656 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/app-settings.service.ts
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/app-settings.service.ts
@@ -18,8 +18,8 @@
 
 import {Injectable} from '@angular/core';
 import {Store} from '@ngrx/store';
-import {defaultSettings} from '@app/models/app-settings.model';
-import {AppStore, ObjectModelService, getObjectReducer} from '@app/models/store.model';
+import {defaultSettings} from '@app/classes/models/app-settings';
+import {AppStore, ObjectModelService, getObjectReducer} from '@app/classes/models/store';
 
 export const modelName = 'appSettings';
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/app-state.service.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/app-state.service.ts b/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/app-state.service.ts
index d77d80f..df773fc 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/app-state.service.ts
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/app-state.service.ts
@@ -18,8 +18,8 @@
 
 import {Injectable} from '@angular/core';
 import {Store} from '@ngrx/store';
-import {initialState} from '@app/models/app-state.model';
-import {AppStore, ObjectModelService, getObjectReducer} from '@app/models/store.model';
+import {initialState} from '@app/classes/models/app-state';
+import {AppStore, ObjectModelService, getObjectReducer} from '@app/classes/models/store';
 
 export const modelName = 'appState';
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/audit-logs-fields.service.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/audit-logs-fields.service.ts b/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/audit-logs-fields.service.ts
index bb8c661..0950de6 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/audit-logs-fields.service.ts
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/audit-logs-fields.service.ts
@@ -18,7 +18,7 @@
 
 import {Injectable} from '@angular/core';
 import {Store} from '@ngrx/store';
-import {AppStore, CollectionModelService, getCollectionReducer} from '@app/models/store.model';
+import {AppStore, CollectionModelService, getCollectionReducer} from '@app/classes/models/store';
 
 export const modelName = 'auditLogsFields';
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/audit-logs.service.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/audit-logs.service.ts b/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/audit-logs.service.ts
index bc33bd9..a467fc9 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/audit-logs.service.ts
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/audit-logs.service.ts
@@ -18,7 +18,7 @@
 
 import {Injectable} from '@angular/core';
 import {Store} from '@ngrx/store';
-import {AppStore, CollectionModelService, getCollectionReducer} from '@app/models/store.model';
+import {AppStore, CollectionModelService, getCollectionReducer} from '@app/classes/models/store';
 
 export const modelName = 'auditLogs';
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/clusters.service.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/clusters.service.ts b/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/clusters.service.ts
index f21a8f9..35a07be 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/clusters.service.ts
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/clusters.service.ts
@@ -18,7 +18,7 @@
 
 import {Injectable} from '@angular/core';
 import {Store} from '@ngrx/store';
-import {AppStore, CollectionModelService, getCollectionReducer} from '@app/models/store.model';
+import {AppStore, CollectionModelService, getCollectionReducer} from '@app/classes/models/store';
 
 export const modelName = 'clusters';
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/components.service.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/components.service.ts b/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/components.service.ts
index 6b2a0ba..1432f6a 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/components.service.ts
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/components.service.ts
@@ -18,7 +18,7 @@
 
 import {Injectable} from '@angular/core';
 import {Store} from '@ngrx/store';
-import {AppStore, CollectionModelService, getCollectionReducer} from '@app/models/store.model';
+import {AppStore, CollectionModelService, getCollectionReducer} from '@app/classes/models/store';
 
 export const modelName = 'components';
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/filters.service.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/filters.service.ts b/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/filters.service.ts
index b850006..493e2e6 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/filters.service.ts
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/filters.service.ts
@@ -19,7 +19,7 @@
 
 import {Injectable} from '@angular/core';
 import {Store} from '@ngrx/store';
-import {AppStore, CollectionModelService, getCollectionReducer} from '@app/models/store.model';
+import {AppStore, CollectionModelService, getCollectionReducer} from '@app/classes/models/store';
 
 export const modelName = 'filters';
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/graphs.service.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/graphs.service.ts b/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/graphs.service.ts
index e541444..8a5bb2b 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/graphs.service.ts
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/graphs.service.ts
@@ -19,7 +19,7 @@
 
 import {Injectable} from '@angular/core';
 import {Store} from '@ngrx/store';
-import {AppStore, CollectionModelService, getCollectionReducer} from '@app/models/store.model';
+import {AppStore, CollectionModelService, getCollectionReducer} from '@app/classes/models/store';
 
 export const modelName = 'graphs';
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/hosts.service.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/hosts.service.ts b/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/hosts.service.ts
index 0cb0a74..acf7dda 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/hosts.service.ts
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/hosts.service.ts
@@ -18,7 +18,7 @@
 
 import {Injectable} from '@angular/core';
 import {Store} from '@ngrx/store';
-import {AppStore, CollectionModelService, getCollectionReducer} from '@app/models/store.model';
+import {AppStore, CollectionModelService, getCollectionReducer} from '@app/classes/models/store';
 
 export const modelName = 'hosts';
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/service-logs-fields.service.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/service-logs-fields.service.ts b/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/service-logs-fields.service.ts
index 0082cd6..1440d8d 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/service-logs-fields.service.ts
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/service-logs-fields.service.ts
@@ -18,7 +18,7 @@
 
 import {Injectable} from '@angular/core';
 import {Store} from '@ngrx/store';
-import {AppStore, CollectionModelService, getCollectionReducer} from '@app/models/store.model';
+import {AppStore, CollectionModelService, getCollectionReducer} from '@app/classes/models/store';
 
 export const modelName = 'serviceLogsFields';
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/service-logs-histogram-data.service.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/service-logs-histogram-data.service.ts b/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/service-logs-histogram-data.service.ts
index e680777..91ee94a 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/service-logs-histogram-data.service.ts
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/service-logs-histogram-data.service.ts
@@ -18,7 +18,7 @@
 
 import {Injectable} from '@angular/core';
 import {Store} from '@ngrx/store';
-import {AppStore, CollectionModelService, getCollectionReducer} from '@app/models/store.model';
+import {AppStore, CollectionModelService, getCollectionReducer} from '@app/classes/models/store';
 
 export const modelName = 'serviceLogsHistogramData';
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/service-logs-truncated.service.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/service-logs-truncated.service.ts b/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/service-logs-truncated.service.ts
index f8fe0f7..53b73ba 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/service-logs-truncated.service.ts
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/service-logs-truncated.service.ts
@@ -18,7 +18,7 @@
 
 import {Injectable} from '@angular/core';
 import {Store} from '@ngrx/store';
-import {AppStore, CollectionModelService, getCollectionReducer} from '@app/models/store.model';
+import {AppStore, CollectionModelService, getCollectionReducer} from '@app/classes/models/store';
 
 export const modelName = 'serviceLogsTruncated';
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/service-logs.service.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/service-logs.service.ts b/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/service-logs.service.ts
index f0ff0d7..0f4fa35 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/service-logs.service.ts
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/service-logs.service.ts
@@ -18,7 +18,7 @@
 
 import {Injectable} from '@angular/core';
 import {Store} from '@ngrx/store';
-import {AppStore, CollectionModelService, getCollectionReducer} from '@app/models/store.model';
+import {AppStore, CollectionModelService, getCollectionReducer} from '@app/classes/models/store';
 
 export const modelName = 'serviceLogs';
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b4966c10/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/user-configs.service.ts
----------------------------------------------------------------------
diff --git a/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/user-configs.service.ts b/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/user-configs.service.ts
index 1596e78..3b6bb15 100644
--- a/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/user-configs.service.ts
+++ b/ambari-logsearch/ambari-logsearch-web/src/app/services/storage/user-configs.service.ts
@@ -19,7 +19,7 @@
 
 import {Injectable} from '@angular/core';
 import {Store} from '@ngrx/store';
-import {AppStore, CollectionModelService, getCollectionReducer} from '@app/models/store.model';
+import {AppStore, CollectionModelService, getCollectionReducer} from '@app/classes/models/store';
 
 export const modelName = 'userConfigs';
 


[22/50] [abbrv] ambari git commit: AMBARI-22133. Repositories With No Components On Them Are Shown as Upgradeable (alexantonenko)

Posted by jl...@apache.org.
AMBARI-22133. Repositories With No Components On Them Are Shown as Upgradeable (alexantonenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/d1ba2298
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/d1ba2298
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/d1ba2298

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: d1ba22989ad81b252f2103f3ae3e674ce56bc2ea
Parents: 158bd65
Author: Alex Antonenko <aa...@hortonworks.com>
Authored: Wed Oct 4 21:19:52 2017 +0300
Committer: Alex Antonenko <aa...@hortonworks.com>
Committed: Wed Oct 4 21:19:52 2017 +0300

----------------------------------------------------------------------
 .../stack_upgrade/upgrade_version_box_view.js   | 42 +++++++----
 .../upgrade_version_box_view_test.js            | 77 +++++++++++---------
 2 files changed, 72 insertions(+), 47 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/d1ba2298/ambari-web/app/views/main/admin/stack_upgrade/upgrade_version_box_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/admin/stack_upgrade/upgrade_version_box_view.js b/ambari-web/app/views/main/admin/stack_upgrade/upgrade_version_box_view.js
index 81049e9..28f4f32 100644
--- a/ambari-web/app/views/main/admin/stack_upgrade/upgrade_version_box_view.js
+++ b/ambari-web/app/views/main/admin/stack_upgrade/upgrade_version_box_view.js
@@ -276,28 +276,44 @@ App.UpgradeVersionBoxView = Em.View.extend({
           break;
         default:
           var isVersionColumnView = this.get('isVersionColumnView');
-          element.set('isButtonGroup', true);
-          element.set('text', isVersionColumnView ? Em.I18n.t('common.upgrade') : Em.I18n.t('admin.stackVersions.version.performUpgrade'));
-          element.set('action', 'confirmUpgrade');
-          element.get('buttons').pushObject({
-            text: isVersionColumnView ? Em.I18n.t('common.reinstall') : Em.I18n.t('admin.stackVersions.version.reinstall'),
-            action: 'installRepoVersionPopup',
-            isDisabled: isDisabled
+          var stackServices = this.get('content.stackServices');
+          var isUpgradable = stackServices && stackServices.some( function(stackService){
+              return stackService.get('isUpgradable');
           });
+          var isPatch = this.get('content.isPatch');
+          var isMaint = this.get('content.isMaint');
 
-          element.get('buttons').pushObject({
-            text: Em.I18n.t('admin.stackVersions.version.preUpgradeCheck'),
-            action: 'showUpgradeOptions',
-            isDisabled: isDisabled
-          });
+          element.set('isButtonGroup', true);
+          if (isUpgradable){
+            element.set('text', isVersionColumnView ? Em.I18n.t('common.upgrade') : Em.I18n.t('admin.stackVersions.version.performUpgrade'));
+            element.set('action', 'confirmUpgrade');
+            element.get('buttons').pushObject({
+              text: isVersionColumnView ? Em.I18n.t('common.reinstall') : Em.I18n.t('admin.stackVersions.version.reinstall'),
+              action: 'installRepoVersionPopup',
+              isDisabled: isDisabled
+            });
 
-          if (this.get('content.isPatch') || this.get('content.isMaint')) {
+            element.get('buttons').pushObject({
+              text: Em.I18n.t('admin.stackVersions.version.preUpgradeCheck'),
+              action: 'showUpgradeOptions',
+              isDisabled: isDisabled
+            });
+          }
+          else{
+            element.set('iconClass', 'icon-ok');
+            element.set('text', Em.I18n.t('common.installed'))
+          }
+
+          if ( isPatch || isMaint ) {
             element.get('buttons').pushObject({
               text: Em.I18n.t('common.hide'),
               action: 'confirmDiscardRepoVersion',
               isDisabled: isDisabled
             });
           }
+
+
+
       }
       element.set('isDisabled', isDisabled);
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/d1ba2298/ambari-web/test/views/main/admin/stack_upgrade/upgrade_version_box_view_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/views/main/admin/stack_upgrade/upgrade_version_box_view_test.js b/ambari-web/test/views/main/admin/stack_upgrade/upgrade_version_box_view_test.js
index 5a8f135..506fb81 100644
--- a/ambari-web/test/views/main/admin/stack_upgrade/upgrade_version_box_view_test.js
+++ b/ambari-web/test/views/main/admin/stack_upgrade/upgrade_version_box_view_test.js
@@ -357,6 +357,8 @@ describe('App.UpgradeVersionBoxView', function () {
 
   describe("#showHosts()", function () {
     beforeEach(function () {
+      view.set('content.stackVersion', Em.Object.create({supportsRevert: false}));
+      view.set('content.stackServices', [Em.Object.create({isUpgradable: true})])
       sinon.spy(App.ModalPopup, 'show');
       sinon.stub(view, 'filterHostsByStack', Em.K);
     });
@@ -681,6 +683,7 @@ describe('App.UpgradeVersionBoxView', function () {
       {
         inputData: {
           'content.status': 'INSTALLED',
+          'content.stackServices': [Em.Object.create({isUpgradable:true})],
           'controller.requestInProgress': true,
           'content.isPatch': true,
           'parentView.repoVersions': [
@@ -707,15 +710,16 @@ describe('App.UpgradeVersionBoxView', function () {
           isButtonGroup: true,
           buttons: [
             {
-              text: Em.I18n.t('admin.stackVersions.version.reinstall'),
-              action: 'installRepoVersionPopup',
-              isDisabled: true
+              "action": "installRepoVersionPopup",
+              "isDisabled": true,
+              "text": "Reinstall Packages",
             },
             {
-              text: Em.I18n.t('admin.stackVersions.version.preUpgradeCheck'),
-              action: 'showUpgradeOptions',
-              isDisabled: true
+              "action": "showUpgradeOptions",
+              "isDisabled": true,
+              "text": "Pre-Upgrade Check"
             },
+
             {
               "action": "confirmDiscardRepoVersion",
               "isDisabled": true,
@@ -751,18 +755,7 @@ describe('App.UpgradeVersionBoxView', function () {
         expected: {
           status: 'INSTALLED',
           isButtonGroup: true,
-          buttons: [
-            {
-              text: Em.I18n.t('admin.stackVersions.version.reinstall'),
-              action: 'installRepoVersionPopup',
-              isDisabled: true
-            },
-            {
-              text: Em.I18n.t('admin.stackVersions.version.preUpgradeCheck'),
-              action: 'showUpgradeOptions',
-              isDisabled: true
-            }
-          ],
+          buttons: [],
           isDisabled: true
         },
         title: 'installed version, later than current one, admin access, no requests in progress, another installation running'
@@ -1397,7 +1390,33 @@ describe('App.UpgradeVersionBoxView', function () {
       })));
     });
 
-    it('version higher than current and in INSTALLED state', function() {
+    it('version higher than current and in INSTALLED state hasnt services andis not patch or maint', function() {
+      view.set('controller', Em.Object.create({
+        currentVersion: Em.Object.create({
+          repository_version: '2.0',
+          stack_name: 'HDP'
+        })
+      }));
+      view.set('content', Em.Object.create({
+        status: 'INSTALLED',
+        repositoryVersion: '2.1',
+        stackVersionType: 'HDP',
+        isPatch: false
+      }));
+      var element = Em.Object.create({
+        buttons: []
+      });
+      view.processPreUpgradeState(element);
+      expect(JSON.stringify(element)).to.be.equal(JSON.stringify(Em.Object.create({
+        "buttons": [],
+        "isButtonGroup": true,
+        'iconClass': 'icon-ok',
+        "text": Em.I18n.t('common.installed'),
+        "isDisabled": false
+      })));
+    });
+
+    it('version higher than current and in INSTALLED state hasnt services ant is patch', function() {
       view.set('controller', Em.Object.create({
         currentVersion: Em.Object.create({
           repository_version: '2.0',
@@ -1417,24 +1436,14 @@ describe('App.UpgradeVersionBoxView', function () {
       expect(JSON.stringify(element)).to.be.equal(JSON.stringify(Em.Object.create({
         "buttons": [
           {
-            "text": Em.I18n.t('admin.stackVersions.version.reinstall'),
-            "action": "installRepoVersionPopup",
-            "isDisabled": false
-          },
-          {
-            text: Em.I18n.t('admin.stackVersions.version.preUpgradeCheck'),
-            action: 'showUpgradeOptions',
-            isDisabled: false
-          },
-          {
-            "text": Em.I18n.t('common.hide'),
-            "action": "confirmDiscardRepoVersion",
-            "isDisabled": false
+           "text":Em.I18n.t('common.hide'),
+           "action":"confirmDiscardRepoVersion",
+           "isDisabled":false
           }
         ],
         "isButtonGroup": true,
-        "text": Em.I18n.t('admin.stackVersions.version.performUpgrade'),
-        "action": 'confirmUpgrade',
+        'iconClass': 'icon-ok',
+        "text": Em.I18n.t('common.installed'),
         "isDisabled": false
       })));
     });


[37/50] [abbrv] ambari git commit: Revert "AMBARI-22131 Move resources/stacks/HDP/3.0/widgets.json to resources/widgets.json (dsen)"

Posted by jl...@apache.org.
Revert "AMBARI-22131 Move resources/stacks/HDP/3.0/widgets.json to resources/widgets.json (dsen)"

This reverts commit b609fb43fe6d77adb7606436d9772414bf002b6b.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/32bf39e7
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/32bf39e7
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/32bf39e7

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 32bf39e7d276d825b5874e6a55a54bc69e24e294
Parents: b609fb4
Author: Dmytro Sen <ds...@apache.org>
Authored: Fri Oct 6 16:27:13 2017 +0300
Committer: Dmytro Sen <ds...@apache.org>
Committed: Fri Oct 6 16:27:13 2017 +0300

----------------------------------------------------------------------
 .../server/api/services/AmbariMetaInfo.java     |  8 --
 .../AmbariManagementControllerImpl.java         | 22 +++--
 .../internal/ServiceResourceProvider.java       |  8 ++
 .../internal/StackArtifactResourceProvider.java | 18 +++-
 .../server/orm/entities/WidgetLayoutEntity.java |  6 +-
 .../ambari/server/stack/StackDirectory.java     | 18 ++++
 .../apache/ambari/server/stack/StackModule.java |  5 ++
 .../apache/ambari/server/state/StackInfo.java   |  8 ++
 .../server/api/services/AmbariMetaInfoTest.java |  8 --
 .../AmbariManagementControllerImplTest.java     |  6 +-
 .../AmbariManagementControllerTest.java         |  6 --
 .../resources/stacks/OTHER/1.0/widgets.json     | 95 ++++++++++++++++++++
 12 files changed, 174 insertions(+), 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/32bf39e7/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
index 425d247..de84965 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
@@ -21,7 +21,6 @@ package org.apache.ambari.server.api.services;
 import static org.apache.ambari.server.controller.spi.Resource.InternalType.Component;
 import static org.apache.ambari.server.controller.spi.Resource.InternalType.HostComponent;
 import static org.apache.ambari.server.controller.utilities.PropertyHelper.AGGREGATE_FUNCTION_IDENTIFIERS;
-import static org.apache.ambari.server.stack.StackDirectory.WIDGETS_DESCRIPTOR_FILE_NAME;
 
 import java.io.File;
 import java.io.FileReader;
@@ -126,7 +125,6 @@ public class AmbariMetaInfo {
   private File commonServicesRoot;
   private File extensionsRoot;
   private File serverVersionFile;
-  private File commonWidgetsDescriptorFile;
   private File customActionRoot;
   private Map<String, VersionDefinitionXml> versionDefinitions = null;
 
@@ -216,8 +214,6 @@ public class AmbariMetaInfo {
     serverVersionFile = new File(serverVersionFilePath);
 
     customActionRoot = new File(conf.getCustomActionDefinitionPath());
-
-    commonWidgetsDescriptorFile = new File(conf.getResourceDirPath(), WIDGETS_DESCRIPTOR_FILE_NAME);
   }
 
   /**
@@ -1439,8 +1435,4 @@ public class AmbariMetaInfo {
 
     return null;
   }
-
-  public File getCommonWidgetsDescriptorFile() {
-    return commonWidgetsDescriptorFile;
-  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/32bf39e7/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 5642575..b2993e3 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -5184,12 +5184,22 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
         widgetDescriptorFiles.add(widgetDescriptorFile);
       }
     } else {
-      // common cluster level widgets
-      File commonWidgetsFile = ambariMetaInfo.getCommonWidgetsDescriptorFile();
-      if (commonWidgetsFile != null && commonWidgetsFile.exists()) {
-        widgetDescriptorFiles.add(commonWidgetsFile);
-      } else {
-        LOG.warn("Common widgets file with path {%s} doesn't exist. No cluster widgets will be created.", commonWidgetsFile);
+      Set<StackId> stackIds = new HashSet<>();
+
+      for (Service svc : cluster.getServices().values()) {
+        stackIds.add(svc.getDesiredStackId());
+      }
+
+      for (StackId stackId : stackIds) {
+        StackInfo stackInfo = ambariMetaInfo.getStack(stackId);
+
+        String widgetDescriptorFileLocation = stackInfo.getWidgetsDescriptorFileLocation();
+        if (widgetDescriptorFileLocation != null) {
+          File widgetDescriptorFile = new File(widgetDescriptorFileLocation);
+          if (widgetDescriptorFile.exists()) {
+            widgetDescriptorFiles.add(widgetDescriptorFile);
+          }
+        }
       }
     }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/32bf39e7/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
index e65693b..76a4547 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
@@ -423,6 +423,8 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
     // do all validation checks
     validateCreateRequests(requests, clusters);
 
+    Set<Cluster> clustersSetFromRequests = new HashSet<>();
+
     for (ServiceRequest request : requests) {
       Cluster cluster = clusters.getCluster(request.getClusterName());
 
@@ -478,6 +480,12 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
 
       // Initialize service widgets
       getManagementController().initializeWidgetsAndLayouts(cluster, s);
+      clustersSetFromRequests.add(cluster);
+    }
+
+    // Create cluster widgets and layouts
+    for (Cluster cluster : clustersSetFromRequests) {
+      getManagementController().initializeWidgetsAndLayouts(cluster, null);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/32bf39e7/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java
index a7f7710..2e8a32a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java
@@ -426,7 +426,7 @@ public class StackArtifactResourceProvider extends AbstractControllerResourcePro
     }
 
     if (StringUtils.isEmpty(serviceName)) {
-      return null;
+      return getWidgetsDescriptorForCluster(stackInfo);
     } else {
       return getWidgetsDescriptorForService(stackInfo, serviceName);
     }
@@ -450,6 +450,22 @@ public class StackArtifactResourceProvider extends AbstractControllerResourcePro
     return widgetDescriptor;
   }
 
+  public Map<String, Object> getWidgetsDescriptorForCluster(StackInfo stackInfo)
+      throws NoSuchParentResourceException, IOException {
+
+    Map<String, Object> widgetDescriptor = null;
+
+    String widgetDescriptorFileLocation = stackInfo.getWidgetsDescriptorFileLocation();
+    if (widgetDescriptorFileLocation != null) {
+      File widgetDescriptorFile = new File(widgetDescriptorFileLocation);
+      if (widgetDescriptorFile.exists()) {
+        widgetDescriptor = gson.fromJson(new FileReader(widgetDescriptorFile), widgetLayoutType);
+      }
+    }
+
+    return widgetDescriptor;
+  }
+
   /**
    * Get a kerberos descriptor.
    *

http://git-wip-us.apache.org/repos/asf/ambari/blob/32bf39e7/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/WidgetLayoutEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/WidgetLayoutEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/WidgetLayoutEntity.java
index 1fa45e9..90d98fc 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/WidgetLayoutEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/WidgetLayoutEntity.java
@@ -33,7 +33,6 @@ import javax.persistence.OneToMany;
 import javax.persistence.OrderBy;
 import javax.persistence.Table;
 import javax.persistence.TableGenerator;
-import javax.persistence.UniqueConstraint;
 
 @Entity
 @Table(name = "widget_layout")
@@ -42,8 +41,7 @@ import javax.persistence.UniqueConstraint;
         pkColumnName = "sequence_name",
         valueColumnName = "sequence_value",
         pkColumnValue = "widget_layout_id_seq",
-        initialValue = 0,
-        uniqueConstraints=@UniqueConstraint(columnNames={"layout_name", "cluster_id"})
+        initialValue = 0
 )
 @NamedQueries({
     @NamedQuery(name = "WidgetLayoutEntity.findAll", query = "SELECT widgetLayout FROM WidgetLayoutEntity widgetLayout"),
@@ -58,7 +56,7 @@ public class WidgetLayoutEntity {
   @Column(name = "id", nullable = false, updatable = false)
   private Long id;
 
-  @Column(name = "layout_name", nullable = false, length = 255)
+  @Column(name = "layout_name", nullable = false, unique = true, length = 255)
   private String layoutName;
 
   @Column(name = "section_name", nullable = false, length = 255)

http://git-wip-us.apache.org/repos/asf/ambari/blob/32bf39e7/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
index e3c586b..9259466 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
@@ -94,6 +94,11 @@ public class StackDirectory extends StackDefinitionDirectory {
   private String kerberosDescriptorPreconfigureFilePath;
 
   /**
+   * widgets descriptor file path
+   */
+  private String widgetsDescriptorFilePath;
+
+  /**
    * repository file
    */
   private RepositoryXml repoFile;
@@ -228,6 +233,15 @@ public class StackDirectory extends StackDefinitionDirectory {
   }
 
   /**
+   * Obtain the path to the (stack-level) widgets descriptor file
+   *
+   * @return the path to the (stack-level) widgets descriptor file
+   */
+  public String getWidgetsDescriptorFilePath() {
+    return widgetsDescriptorFilePath;
+  }
+
+  /**
    * Obtain the repository directory path.
    *
    * @return repository directory path
@@ -310,6 +324,10 @@ public class StackDirectory extends StackDefinitionDirectory {
       kerberosDescriptorPreconfigureFilePath = getAbsolutePath() + File.separator + KERBEROS_DESCRIPTOR_PRECONFIGURE_FILE_NAME;
     }
 
+    if (subDirs.contains(WIDGETS_DESCRIPTOR_FILE_NAME)) {
+      widgetsDescriptorFilePath = getAbsolutePath() + File.separator + WIDGETS_DESCRIPTOR_FILE_NAME;
+    }
+
     parseUpgradePacks(subDirs);
     parseServiceDirectories(subDirs);
     parseRepoFile(subDirs);

http://git-wip-us.apache.org/repos/asf/ambari/blob/32bf39e7/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
index 71235f3..742706d 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
@@ -294,6 +294,10 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
       stackInfo.setKerberosDescriptorPreConfigurationFileLocation(parentStack.getModuleInfo().getKerberosDescriptorPreConfigurationFileLocation());
     }
 
+    if (stackInfo.getWidgetsDescriptorFileLocation() == null) {
+      stackInfo.setWidgetsDescriptorFileLocation(parentStack.getModuleInfo().getWidgetsDescriptorFileLocation());
+    }
+
     mergeServicesWithParent(parentStack, allStacks, commonServices, extensions);
   }
 
@@ -569,6 +573,7 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
       stackInfo.setRcoFileLocation(stackDirectory.getRcoFilePath());
       stackInfo.setKerberosDescriptorFileLocation(stackDirectory.getKerberosDescriptorFilePath());
       stackInfo.setKerberosDescriptorPreConfigurationFileLocation(stackDirectory.getKerberosDescriptorPreconfigureFilePath());
+      stackInfo.setWidgetsDescriptorFileLocation(stackDirectory.getWidgetsDescriptorFilePath());
       stackInfo.setUpgradesFolder(stackDirectory.getUpgradesDir());
       stackInfo.setUpgradePacks(stackDirectory.getUpgradePacks());
       stackInfo.setConfigUpgradePack(stackDirectory.getConfigUpgradePack());

http://git-wip-us.apache.org/repos/asf/ambari/blob/32bf39e7/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
index 3efc997..dcf850f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
@@ -429,6 +429,14 @@ public class StackInfo implements Comparable<StackInfo>, Validable {
     this.kerberosDescriptorPreConfigurationFileLocation = kerberosDescriptorPreConfigurationFileLocation;
   }
 
+  public String getWidgetsDescriptorFileLocation() {
+    return widgetsDescriptorFileLocation;
+  }
+
+  public void setWidgetsDescriptorFileLocation(String widgetsDescriptorFileLocation) {
+    this.widgetsDescriptorFileLocation = widgetsDescriptorFileLocation;
+  }
+
   /**
    * Set the path of the stack upgrade directory.
    *

http://git-wip-us.apache.org/repos/asf/ambari/blob/32bf39e7/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
index 25e8d04..4baca5c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
@@ -1894,14 +1894,6 @@ public class AmbariMetaInfoTest {
     Assert.assertTrue(descriptor.getService("NEW_SERVICE").shouldPreconfigure());
   }
 
-  @Test
-  public void testGetCommonWidgetsFile() throws AmbariException {
-    File widgetsFile = metaInfo.getCommonWidgetsDescriptorFile();
-
-    Assert.assertNotNull(widgetsFile);
-    Assert.assertEquals("/var/lib/ambari-server/resources/widgets.json", widgetsFile.getPath());
-  }
-
   private File getStackRootTmp(String buildDir) {
     return new File(buildDir + "/ambari-metaInfo");
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/32bf39e7/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
index 9547271..a02690f 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
@@ -2367,14 +2367,18 @@ public class AmbariManagementControllerImplTest {
     Cluster cluster = createNiceMock(Cluster.class);
     Service service = createNiceMock(Service.class);
     expect(service.getDesiredStackId()).andReturn(stackId).atLeastOnce();
+    expect(cluster.getServices()).andReturn(ImmutableMap.<String, Service>builder()
+        .put("HDFS", service)
+        .build());
 
     expect(clusters.getCluster("c1")).andReturn(cluster).atLeastOnce();
 
 
     StackInfo stackInfo = createNiceMock(StackInfo.class);
+    expect(stackInfo.getWidgetsDescriptorFileLocation()).andReturn(null).once();
 
     expect(ambariMetaInfo.getStack("HDP", "2.1")).andReturn(stackInfo).atLeastOnce();
-    expect(ambariMetaInfo.getCommonWidgetsDescriptorFile()).andReturn(null).once();
+    expect(ambariMetaInfo.getStack(stackId)).andReturn(stackInfo).atLeastOnce();
 
     replay(injector, clusters, ambariMetaInfo, stackInfo, cluster, service, repoVersionDAO, repoVersion);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/32bf39e7/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
index 7094caa..b370829 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
@@ -34,7 +34,6 @@ import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
-import java.io.File;
 import java.io.StringReader;
 import java.lang.reflect.Type;
 import java.text.MessageFormat;
@@ -10425,11 +10424,6 @@ public class AmbariManagementControllerTest {
     Assert.assertEquals("UPDATED_BLOCKED_TIME", layoutUserWidgetEntities.get(3).getWidget().getWidgetName());
     Assert.assertEquals("HBASE_SUMMARY", layoutUserWidgetEntities.get(0).getWidget().getDefaultSectionName());
 
-    File widgetsFile  = ambariMetaInfo.getCommonWidgetsDescriptorFile();
-    assertNotNull(widgetsFile);
-    assertEquals("src/test/resources/widgets.json", widgetsFile.getPath());
-    assertTrue(widgetsFile.exists());
-
     candidateLayoutEntity = null;
     for (WidgetLayoutEntity entity : layoutEntities) {
       if (entity.getLayoutName().equals("default_system_heatmap")) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/32bf39e7/ambari-server/src/test/resources/stacks/OTHER/1.0/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/OTHER/1.0/widgets.json b/ambari-server/src/test/resources/stacks/OTHER/1.0/widgets.json
new file mode 100644
index 0000000..3176354
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/OTHER/1.0/widgets.json
@@ -0,0 +1,95 @@
+{
+  "layouts": [
+    {
+      "layout_name": "default_system_heatmap",
+      "display_name": "Heatmaps",
+      "section_name": "SYSTEM_HEATMAPS",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "Host Disk Space Used %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "disk_free",
+              "metric_path": "metrics/disk/disk_free",
+              "service_name": "STACK"
+            },
+            {
+              "name": "disk_total",
+              "metric_path": "metrics/disk/disk_total",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Disk Space Used %",
+              "value": "${((disk_total-disk_free)/disk_total)*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Host Memory Used %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "mem_total",
+              "metric_path": "metrics/memory/mem_total",
+              "service_name": "STACK"
+            },
+            {
+              "name": "mem_free",
+              "metric_path": "metrics/memory/mem_free",
+              "service_name": "STACK"
+            },
+            {
+              "name": "mem_cached",
+              "metric_path": "metrics/memory/mem_cached",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Memory Used %",
+              "value": "${((mem_total-mem_free-mem_cached)/mem_total)*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Host CPU Wait IO %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "cpu_wio",
+              "metric_path": "metrics/cpu/cpu_wio",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Memory Used %",
+              "value": "${cpu_wio*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        }
+      ]
+    }
+  ]
+}


[42/50] [abbrv] ambari git commit: AMBARI-22131 Move resources/stacks/HDP/3.0/widgets.json to resources/widgets.json (dsen)

Posted by jl...@apache.org.
AMBARI-22131 Move resources/stacks/HDP/3.0/widgets.json to resources/widgets.json (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/388cb418
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/388cb418
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/388cb418

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: 388cb4180b3ea08e25d95aaf67bac68e8283a518
Parents: f44c866
Author: Dmytro Sen <ds...@apache.org>
Authored: Fri Oct 6 17:07:24 2017 +0300
Committer: Dmytro Sen <ds...@apache.org>
Committed: Fri Oct 6 17:07:41 2017 +0300

----------------------------------------------------------------------
 .../server/api/services/AmbariMetaInfo.java     |  8 ++
 .../AmbariManagementControllerImpl.java         | 22 ++---
 .../internal/ServiceResourceProvider.java       |  8 --
 .../internal/StackArtifactResourceProvider.java | 18 +---
 .../server/orm/entities/WidgetLayoutEntity.java |  6 +-
 .../ambari/server/stack/StackDirectory.java     | 18 ----
 .../apache/ambari/server/stack/StackModule.java |  5 --
 .../apache/ambari/server/state/StackInfo.java   |  8 --
 .../resources/stacks/HDP/2.0.6/widgets.json     | 95 --------------------
 .../main/resources/stacks/HDP/3.0/widgets.json  | 95 --------------------
 ambari-server/src/main/resources/widgets.json   | 95 ++++++++++++++++++++
 .../server/api/services/AmbariMetaInfoTest.java |  8 ++
 .../AmbariManagementControllerImplTest.java     |  6 +-
 .../AmbariManagementControllerTest.java         |  6 ++
 .../resources/stacks/OTHER/1.0/widgets.json     | 95 --------------------
 ambari-server/src/test/resources/widgets.json   | 95 ++++++++++++++++++++
 16 files changed, 224 insertions(+), 364 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/388cb418/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
index de84965..425d247 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
@@ -21,6 +21,7 @@ package org.apache.ambari.server.api.services;
 import static org.apache.ambari.server.controller.spi.Resource.InternalType.Component;
 import static org.apache.ambari.server.controller.spi.Resource.InternalType.HostComponent;
 import static org.apache.ambari.server.controller.utilities.PropertyHelper.AGGREGATE_FUNCTION_IDENTIFIERS;
+import static org.apache.ambari.server.stack.StackDirectory.WIDGETS_DESCRIPTOR_FILE_NAME;
 
 import java.io.File;
 import java.io.FileReader;
@@ -125,6 +126,7 @@ public class AmbariMetaInfo {
   private File commonServicesRoot;
   private File extensionsRoot;
   private File serverVersionFile;
+  private File commonWidgetsDescriptorFile;
   private File customActionRoot;
   private Map<String, VersionDefinitionXml> versionDefinitions = null;
 
@@ -214,6 +216,8 @@ public class AmbariMetaInfo {
     serverVersionFile = new File(serverVersionFilePath);
 
     customActionRoot = new File(conf.getCustomActionDefinitionPath());
+
+    commonWidgetsDescriptorFile = new File(conf.getResourceDirPath(), WIDGETS_DESCRIPTOR_FILE_NAME);
   }
 
   /**
@@ -1435,4 +1439,8 @@ public class AmbariMetaInfo {
 
     return null;
   }
+
+  public File getCommonWidgetsDescriptorFile() {
+    return commonWidgetsDescriptorFile;
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/388cb418/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index b2993e3..5642575 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -5184,22 +5184,12 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
         widgetDescriptorFiles.add(widgetDescriptorFile);
       }
     } else {
-      Set<StackId> stackIds = new HashSet<>();
-
-      for (Service svc : cluster.getServices().values()) {
-        stackIds.add(svc.getDesiredStackId());
-      }
-
-      for (StackId stackId : stackIds) {
-        StackInfo stackInfo = ambariMetaInfo.getStack(stackId);
-
-        String widgetDescriptorFileLocation = stackInfo.getWidgetsDescriptorFileLocation();
-        if (widgetDescriptorFileLocation != null) {
-          File widgetDescriptorFile = new File(widgetDescriptorFileLocation);
-          if (widgetDescriptorFile.exists()) {
-            widgetDescriptorFiles.add(widgetDescriptorFile);
-          }
-        }
+      // common cluster level widgets
+      File commonWidgetsFile = ambariMetaInfo.getCommonWidgetsDescriptorFile();
+      if (commonWidgetsFile != null && commonWidgetsFile.exists()) {
+        widgetDescriptorFiles.add(commonWidgetsFile);
+      } else {
+        LOG.warn("Common widgets file with path {%s} doesn't exist. No cluster widgets will be created.", commonWidgetsFile);
       }
     }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/388cb418/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
index 76a4547..e65693b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
@@ -423,8 +423,6 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
     // do all validation checks
     validateCreateRequests(requests, clusters);
 
-    Set<Cluster> clustersSetFromRequests = new HashSet<>();
-
     for (ServiceRequest request : requests) {
       Cluster cluster = clusters.getCluster(request.getClusterName());
 
@@ -480,12 +478,6 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
 
       // Initialize service widgets
       getManagementController().initializeWidgetsAndLayouts(cluster, s);
-      clustersSetFromRequests.add(cluster);
-    }
-
-    // Create cluster widgets and layouts
-    for (Cluster cluster : clustersSetFromRequests) {
-      getManagementController().initializeWidgetsAndLayouts(cluster, null);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/388cb418/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java
index 2e8a32a..a7f7710 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java
@@ -426,7 +426,7 @@ public class StackArtifactResourceProvider extends AbstractControllerResourcePro
     }
 
     if (StringUtils.isEmpty(serviceName)) {
-      return getWidgetsDescriptorForCluster(stackInfo);
+      return null;
     } else {
       return getWidgetsDescriptorForService(stackInfo, serviceName);
     }
@@ -450,22 +450,6 @@ public class StackArtifactResourceProvider extends AbstractControllerResourcePro
     return widgetDescriptor;
   }
 
-  public Map<String, Object> getWidgetsDescriptorForCluster(StackInfo stackInfo)
-      throws NoSuchParentResourceException, IOException {
-
-    Map<String, Object> widgetDescriptor = null;
-
-    String widgetDescriptorFileLocation = stackInfo.getWidgetsDescriptorFileLocation();
-    if (widgetDescriptorFileLocation != null) {
-      File widgetDescriptorFile = new File(widgetDescriptorFileLocation);
-      if (widgetDescriptorFile.exists()) {
-        widgetDescriptor = gson.fromJson(new FileReader(widgetDescriptorFile), widgetLayoutType);
-      }
-    }
-
-    return widgetDescriptor;
-  }
-
   /**
    * Get a kerberos descriptor.
    *

http://git-wip-us.apache.org/repos/asf/ambari/blob/388cb418/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/WidgetLayoutEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/WidgetLayoutEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/WidgetLayoutEntity.java
index 90d98fc..1fa45e9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/WidgetLayoutEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/WidgetLayoutEntity.java
@@ -33,6 +33,7 @@ import javax.persistence.OneToMany;
 import javax.persistence.OrderBy;
 import javax.persistence.Table;
 import javax.persistence.TableGenerator;
+import javax.persistence.UniqueConstraint;
 
 @Entity
 @Table(name = "widget_layout")
@@ -41,7 +42,8 @@ import javax.persistence.TableGenerator;
         pkColumnName = "sequence_name",
         valueColumnName = "sequence_value",
         pkColumnValue = "widget_layout_id_seq",
-        initialValue = 0
+        initialValue = 0,
+        uniqueConstraints=@UniqueConstraint(columnNames={"layout_name", "cluster_id"})
 )
 @NamedQueries({
     @NamedQuery(name = "WidgetLayoutEntity.findAll", query = "SELECT widgetLayout FROM WidgetLayoutEntity widgetLayout"),
@@ -56,7 +58,7 @@ public class WidgetLayoutEntity {
   @Column(name = "id", nullable = false, updatable = false)
   private Long id;
 
-  @Column(name = "layout_name", nullable = false, unique = true, length = 255)
+  @Column(name = "layout_name", nullable = false, length = 255)
   private String layoutName;
 
   @Column(name = "section_name", nullable = false, length = 255)

http://git-wip-us.apache.org/repos/asf/ambari/blob/388cb418/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
index 9259466..e3c586b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
@@ -94,11 +94,6 @@ public class StackDirectory extends StackDefinitionDirectory {
   private String kerberosDescriptorPreconfigureFilePath;
 
   /**
-   * widgets descriptor file path
-   */
-  private String widgetsDescriptorFilePath;
-
-  /**
    * repository file
    */
   private RepositoryXml repoFile;
@@ -233,15 +228,6 @@ public class StackDirectory extends StackDefinitionDirectory {
   }
 
   /**
-   * Obtain the path to the (stack-level) widgets descriptor file
-   *
-   * @return the path to the (stack-level) widgets descriptor file
-   */
-  public String getWidgetsDescriptorFilePath() {
-    return widgetsDescriptorFilePath;
-  }
-
-  /**
    * Obtain the repository directory path.
    *
    * @return repository directory path
@@ -324,10 +310,6 @@ public class StackDirectory extends StackDefinitionDirectory {
       kerberosDescriptorPreconfigureFilePath = getAbsolutePath() + File.separator + KERBEROS_DESCRIPTOR_PRECONFIGURE_FILE_NAME;
     }
 
-    if (subDirs.contains(WIDGETS_DESCRIPTOR_FILE_NAME)) {
-      widgetsDescriptorFilePath = getAbsolutePath() + File.separator + WIDGETS_DESCRIPTOR_FILE_NAME;
-    }
-
     parseUpgradePacks(subDirs);
     parseServiceDirectories(subDirs);
     parseRepoFile(subDirs);

http://git-wip-us.apache.org/repos/asf/ambari/blob/388cb418/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
index 742706d..71235f3 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
@@ -294,10 +294,6 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
       stackInfo.setKerberosDescriptorPreConfigurationFileLocation(parentStack.getModuleInfo().getKerberosDescriptorPreConfigurationFileLocation());
     }
 
-    if (stackInfo.getWidgetsDescriptorFileLocation() == null) {
-      stackInfo.setWidgetsDescriptorFileLocation(parentStack.getModuleInfo().getWidgetsDescriptorFileLocation());
-    }
-
     mergeServicesWithParent(parentStack, allStacks, commonServices, extensions);
   }
 
@@ -573,7 +569,6 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
       stackInfo.setRcoFileLocation(stackDirectory.getRcoFilePath());
       stackInfo.setKerberosDescriptorFileLocation(stackDirectory.getKerberosDescriptorFilePath());
       stackInfo.setKerberosDescriptorPreConfigurationFileLocation(stackDirectory.getKerberosDescriptorPreconfigureFilePath());
-      stackInfo.setWidgetsDescriptorFileLocation(stackDirectory.getWidgetsDescriptorFilePath());
       stackInfo.setUpgradesFolder(stackDirectory.getUpgradesDir());
       stackInfo.setUpgradePacks(stackDirectory.getUpgradePacks());
       stackInfo.setConfigUpgradePack(stackDirectory.getConfigUpgradePack());

http://git-wip-us.apache.org/repos/asf/ambari/blob/388cb418/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
index dcf850f..3efc997 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
@@ -429,14 +429,6 @@ public class StackInfo implements Comparable<StackInfo>, Validable {
     this.kerberosDescriptorPreConfigurationFileLocation = kerberosDescriptorPreConfigurationFileLocation;
   }
 
-  public String getWidgetsDescriptorFileLocation() {
-    return widgetsDescriptorFileLocation;
-  }
-
-  public void setWidgetsDescriptorFileLocation(String widgetsDescriptorFileLocation) {
-    this.widgetsDescriptorFileLocation = widgetsDescriptorFileLocation;
-  }
-
   /**
    * Set the path of the stack upgrade directory.
    *

http://git-wip-us.apache.org/repos/asf/ambari/blob/388cb418/ambari-server/src/main/resources/stacks/HDP/2.0.6/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/widgets.json b/ambari-server/src/main/resources/stacks/HDP/2.0.6/widgets.json
deleted file mode 100644
index 3176354..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/widgets.json
+++ /dev/null
@@ -1,95 +0,0 @@
-{
-  "layouts": [
-    {
-      "layout_name": "default_system_heatmap",
-      "display_name": "Heatmaps",
-      "section_name": "SYSTEM_HEATMAPS",
-      "widgetLayoutInfo": [
-        {
-          "widget_name": "Host Disk Space Used %",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "disk_free",
-              "metric_path": "metrics/disk/disk_free",
-              "service_name": "STACK"
-            },
-            {
-              "name": "disk_total",
-              "metric_path": "metrics/disk/disk_total",
-              "service_name": "STACK"
-            }
-          ],
-          "values": [
-            {
-              "name": "Host Disk Space Used %",
-              "value": "${((disk_total-disk_free)/disk_total)*100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "Host Memory Used %",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "mem_total",
-              "metric_path": "metrics/memory/mem_total",
-              "service_name": "STACK"
-            },
-            {
-              "name": "mem_free",
-              "metric_path": "metrics/memory/mem_free",
-              "service_name": "STACK"
-            },
-            {
-              "name": "mem_cached",
-              "metric_path": "metrics/memory/mem_cached",
-              "service_name": "STACK"
-            }
-          ],
-          "values": [
-            {
-              "name": "Host Memory Used %",
-              "value": "${((mem_total-mem_free-mem_cached)/mem_total)*100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "Host CPU Wait IO %",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "cpu_wio",
-              "metric_path": "metrics/cpu/cpu_wio",
-              "service_name": "STACK"
-            }
-          ],
-          "values": [
-            {
-              "name": "Host Memory Used %",
-              "value": "${cpu_wio*100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        }
-      ]
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/388cb418/ambari-server/src/main/resources/stacks/HDP/3.0/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/3.0/widgets.json b/ambari-server/src/main/resources/stacks/HDP/3.0/widgets.json
deleted file mode 100644
index 3176354..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/3.0/widgets.json
+++ /dev/null
@@ -1,95 +0,0 @@
-{
-  "layouts": [
-    {
-      "layout_name": "default_system_heatmap",
-      "display_name": "Heatmaps",
-      "section_name": "SYSTEM_HEATMAPS",
-      "widgetLayoutInfo": [
-        {
-          "widget_name": "Host Disk Space Used %",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "disk_free",
-              "metric_path": "metrics/disk/disk_free",
-              "service_name": "STACK"
-            },
-            {
-              "name": "disk_total",
-              "metric_path": "metrics/disk/disk_total",
-              "service_name": "STACK"
-            }
-          ],
-          "values": [
-            {
-              "name": "Host Disk Space Used %",
-              "value": "${((disk_total-disk_free)/disk_total)*100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "Host Memory Used %",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "mem_total",
-              "metric_path": "metrics/memory/mem_total",
-              "service_name": "STACK"
-            },
-            {
-              "name": "mem_free",
-              "metric_path": "metrics/memory/mem_free",
-              "service_name": "STACK"
-            },
-            {
-              "name": "mem_cached",
-              "metric_path": "metrics/memory/mem_cached",
-              "service_name": "STACK"
-            }
-          ],
-          "values": [
-            {
-              "name": "Host Memory Used %",
-              "value": "${((mem_total-mem_free-mem_cached)/mem_total)*100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "Host CPU Wait IO %",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "cpu_wio",
-              "metric_path": "metrics/cpu/cpu_wio",
-              "service_name": "STACK"
-            }
-          ],
-          "values": [
-            {
-              "name": "Host Memory Used %",
-              "value": "${cpu_wio*100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        }
-      ]
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/388cb418/ambari-server/src/main/resources/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/widgets.json b/ambari-server/src/main/resources/widgets.json
new file mode 100644
index 0000000..3176354
--- /dev/null
+++ b/ambari-server/src/main/resources/widgets.json
@@ -0,0 +1,95 @@
+{
+  "layouts": [
+    {
+      "layout_name": "default_system_heatmap",
+      "display_name": "Heatmaps",
+      "section_name": "SYSTEM_HEATMAPS",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "Host Disk Space Used %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "disk_free",
+              "metric_path": "metrics/disk/disk_free",
+              "service_name": "STACK"
+            },
+            {
+              "name": "disk_total",
+              "metric_path": "metrics/disk/disk_total",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Disk Space Used %",
+              "value": "${((disk_total-disk_free)/disk_total)*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Host Memory Used %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "mem_total",
+              "metric_path": "metrics/memory/mem_total",
+              "service_name": "STACK"
+            },
+            {
+              "name": "mem_free",
+              "metric_path": "metrics/memory/mem_free",
+              "service_name": "STACK"
+            },
+            {
+              "name": "mem_cached",
+              "metric_path": "metrics/memory/mem_cached",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Memory Used %",
+              "value": "${((mem_total-mem_free-mem_cached)/mem_total)*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Host CPU Wait IO %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "cpu_wio",
+              "metric_path": "metrics/cpu/cpu_wio",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Memory Used %",
+              "value": "${cpu_wio*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/388cb418/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
index 4baca5c..25e8d04 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
@@ -1894,6 +1894,14 @@ public class AmbariMetaInfoTest {
     Assert.assertTrue(descriptor.getService("NEW_SERVICE").shouldPreconfigure());
   }
 
+  @Test
+  public void testGetCommonWidgetsFile() throws AmbariException {
+    File widgetsFile = metaInfo.getCommonWidgetsDescriptorFile();
+
+    Assert.assertNotNull(widgetsFile);
+    Assert.assertEquals("/var/lib/ambari-server/resources/widgets.json", widgetsFile.getPath());
+  }
+
   private File getStackRootTmp(String buildDir) {
     return new File(buildDir + "/ambari-metaInfo");
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/388cb418/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
index a02690f..9547271 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
@@ -2367,18 +2367,14 @@ public class AmbariManagementControllerImplTest {
     Cluster cluster = createNiceMock(Cluster.class);
     Service service = createNiceMock(Service.class);
     expect(service.getDesiredStackId()).andReturn(stackId).atLeastOnce();
-    expect(cluster.getServices()).andReturn(ImmutableMap.<String, Service>builder()
-        .put("HDFS", service)
-        .build());
 
     expect(clusters.getCluster("c1")).andReturn(cluster).atLeastOnce();
 
 
     StackInfo stackInfo = createNiceMock(StackInfo.class);
-    expect(stackInfo.getWidgetsDescriptorFileLocation()).andReturn(null).once();
 
     expect(ambariMetaInfo.getStack("HDP", "2.1")).andReturn(stackInfo).atLeastOnce();
-    expect(ambariMetaInfo.getStack(stackId)).andReturn(stackInfo).atLeastOnce();
+    expect(ambariMetaInfo.getCommonWidgetsDescriptorFile()).andReturn(null).once();
 
     replay(injector, clusters, ambariMetaInfo, stackInfo, cluster, service, repoVersionDAO, repoVersion);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/388cb418/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
index b370829..7094caa 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
@@ -34,6 +34,7 @@ import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import java.io.File;
 import java.io.StringReader;
 import java.lang.reflect.Type;
 import java.text.MessageFormat;
@@ -10424,6 +10425,11 @@ public class AmbariManagementControllerTest {
     Assert.assertEquals("UPDATED_BLOCKED_TIME", layoutUserWidgetEntities.get(3).getWidget().getWidgetName());
     Assert.assertEquals("HBASE_SUMMARY", layoutUserWidgetEntities.get(0).getWidget().getDefaultSectionName());
 
+    File widgetsFile  = ambariMetaInfo.getCommonWidgetsDescriptorFile();
+    assertNotNull(widgetsFile);
+    assertEquals("src/test/resources/widgets.json", widgetsFile.getPath());
+    assertTrue(widgetsFile.exists());
+
     candidateLayoutEntity = null;
     for (WidgetLayoutEntity entity : layoutEntities) {
       if (entity.getLayoutName().equals("default_system_heatmap")) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/388cb418/ambari-server/src/test/resources/stacks/OTHER/1.0/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/OTHER/1.0/widgets.json b/ambari-server/src/test/resources/stacks/OTHER/1.0/widgets.json
deleted file mode 100644
index 3176354..0000000
--- a/ambari-server/src/test/resources/stacks/OTHER/1.0/widgets.json
+++ /dev/null
@@ -1,95 +0,0 @@
-{
-  "layouts": [
-    {
-      "layout_name": "default_system_heatmap",
-      "display_name": "Heatmaps",
-      "section_name": "SYSTEM_HEATMAPS",
-      "widgetLayoutInfo": [
-        {
-          "widget_name": "Host Disk Space Used %",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "disk_free",
-              "metric_path": "metrics/disk/disk_free",
-              "service_name": "STACK"
-            },
-            {
-              "name": "disk_total",
-              "metric_path": "metrics/disk/disk_total",
-              "service_name": "STACK"
-            }
-          ],
-          "values": [
-            {
-              "name": "Host Disk Space Used %",
-              "value": "${((disk_total-disk_free)/disk_total)*100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "Host Memory Used %",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "mem_total",
-              "metric_path": "metrics/memory/mem_total",
-              "service_name": "STACK"
-            },
-            {
-              "name": "mem_free",
-              "metric_path": "metrics/memory/mem_free",
-              "service_name": "STACK"
-            },
-            {
-              "name": "mem_cached",
-              "metric_path": "metrics/memory/mem_cached",
-              "service_name": "STACK"
-            }
-          ],
-          "values": [
-            {
-              "name": "Host Memory Used %",
-              "value": "${((mem_total-mem_free-mem_cached)/mem_total)*100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "Host CPU Wait IO %",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "cpu_wio",
-              "metric_path": "metrics/cpu/cpu_wio",
-              "service_name": "STACK"
-            }
-          ],
-          "values": [
-            {
-              "name": "Host Memory Used %",
-              "value": "${cpu_wio*100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        }
-      ]
-    }
-  ]
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/388cb418/ambari-server/src/test/resources/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/widgets.json b/ambari-server/src/test/resources/widgets.json
new file mode 100644
index 0000000..3176354
--- /dev/null
+++ b/ambari-server/src/test/resources/widgets.json
@@ -0,0 +1,95 @@
+{
+  "layouts": [
+    {
+      "layout_name": "default_system_heatmap",
+      "display_name": "Heatmaps",
+      "section_name": "SYSTEM_HEATMAPS",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "Host Disk Space Used %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": true,
+          "metrics": [
+            {
+              "name": "disk_free",
+              "metric_path": "metrics/disk/disk_free",
+              "service_name": "STACK"
+            },
+            {
+              "name": "disk_total",
+              "metric_path": "metrics/disk/disk_total",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Disk Space Used %",
+              "value": "${((disk_total-disk_free)/disk_total)*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Host Memory Used %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "mem_total",
+              "metric_path": "metrics/memory/mem_total",
+              "service_name": "STACK"
+            },
+            {
+              "name": "mem_free",
+              "metric_path": "metrics/memory/mem_free",
+              "service_name": "STACK"
+            },
+            {
+              "name": "mem_cached",
+              "metric_path": "metrics/memory/mem_cached",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Memory Used %",
+              "value": "${((mem_total-mem_free-mem_cached)/mem_total)*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        },
+        {
+          "widget_name": "Host CPU Wait IO %",
+          "description": "",
+          "widget_type": "HEATMAP",
+          "is_visible": false,
+          "metrics": [
+            {
+              "name": "cpu_wio",
+              "metric_path": "metrics/cpu/cpu_wio",
+              "service_name": "STACK"
+            }
+          ],
+          "values": [
+            {
+              "name": "Host Memory Used %",
+              "value": "${cpu_wio*100}"
+            }
+          ],
+          "properties": {
+            "display_unit": "%",
+            "max_limit": "100"
+          }
+        }
+      ]
+    }
+  ]
+}


[36/50] [abbrv] ambari git commit: AMBARI-22131 Move resources/stacks/HDP/3.0/widgets.json to resources/widgets.json (dsen)

Posted by jl...@apache.org.
AMBARI-22131 Move resources/stacks/HDP/3.0/widgets.json to resources/widgets.json (dsen)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b609fb43
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b609fb43
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b609fb43

Branch: refs/heads/branch-feature-AMBARI-14714
Commit: b609fb43fe6d77adb7606436d9772414bf002b6b
Parents: 3f00252
Author: Dmytro Sen <ds...@apache.org>
Authored: Fri Oct 6 13:57:14 2017 +0300
Committer: Dmytro Sen <ds...@apache.org>
Committed: Fri Oct 6 13:57:14 2017 +0300

----------------------------------------------------------------------
 .../server/api/services/AmbariMetaInfo.java     |  8 ++
 .../AmbariManagementControllerImpl.java         | 22 ++---
 .../internal/ServiceResourceProvider.java       |  8 --
 .../internal/StackArtifactResourceProvider.java | 18 +---
 .../server/orm/entities/WidgetLayoutEntity.java |  6 +-
 .../ambari/server/stack/StackDirectory.java     | 18 ----
 .../apache/ambari/server/stack/StackModule.java |  5 --
 .../apache/ambari/server/state/StackInfo.java   |  8 --
 .../server/api/services/AmbariMetaInfoTest.java |  8 ++
 .../AmbariManagementControllerImplTest.java     |  6 +-
 .../AmbariManagementControllerTest.java         |  6 ++
 .../resources/stacks/OTHER/1.0/widgets.json     | 95 --------------------
 12 files changed, 34 insertions(+), 174 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b609fb43/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
index de84965..425d247 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
@@ -21,6 +21,7 @@ package org.apache.ambari.server.api.services;
 import static org.apache.ambari.server.controller.spi.Resource.InternalType.Component;
 import static org.apache.ambari.server.controller.spi.Resource.InternalType.HostComponent;
 import static org.apache.ambari.server.controller.utilities.PropertyHelper.AGGREGATE_FUNCTION_IDENTIFIERS;
+import static org.apache.ambari.server.stack.StackDirectory.WIDGETS_DESCRIPTOR_FILE_NAME;
 
 import java.io.File;
 import java.io.FileReader;
@@ -125,6 +126,7 @@ public class AmbariMetaInfo {
   private File commonServicesRoot;
   private File extensionsRoot;
   private File serverVersionFile;
+  private File commonWidgetsDescriptorFile;
   private File customActionRoot;
   private Map<String, VersionDefinitionXml> versionDefinitions = null;
 
@@ -214,6 +216,8 @@ public class AmbariMetaInfo {
     serverVersionFile = new File(serverVersionFilePath);
 
     customActionRoot = new File(conf.getCustomActionDefinitionPath());
+
+    commonWidgetsDescriptorFile = new File(conf.getResourceDirPath(), WIDGETS_DESCRIPTOR_FILE_NAME);
   }
 
   /**
@@ -1435,4 +1439,8 @@ public class AmbariMetaInfo {
 
     return null;
   }
+
+  public File getCommonWidgetsDescriptorFile() {
+    return commonWidgetsDescriptorFile;
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/b609fb43/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index b2993e3..5642575 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -5184,22 +5184,12 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
         widgetDescriptorFiles.add(widgetDescriptorFile);
       }
     } else {
-      Set<StackId> stackIds = new HashSet<>();
-
-      for (Service svc : cluster.getServices().values()) {
-        stackIds.add(svc.getDesiredStackId());
-      }
-
-      for (StackId stackId : stackIds) {
-        StackInfo stackInfo = ambariMetaInfo.getStack(stackId);
-
-        String widgetDescriptorFileLocation = stackInfo.getWidgetsDescriptorFileLocation();
-        if (widgetDescriptorFileLocation != null) {
-          File widgetDescriptorFile = new File(widgetDescriptorFileLocation);
-          if (widgetDescriptorFile.exists()) {
-            widgetDescriptorFiles.add(widgetDescriptorFile);
-          }
-        }
+      // common cluster level widgets
+      File commonWidgetsFile = ambariMetaInfo.getCommonWidgetsDescriptorFile();
+      if (commonWidgetsFile != null && commonWidgetsFile.exists()) {
+        widgetDescriptorFiles.add(commonWidgetsFile);
+      } else {
+        LOG.warn("Common widgets file with path {%s} doesn't exist. No cluster widgets will be created.", commonWidgetsFile);
       }
     }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b609fb43/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
index 76a4547..e65693b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ServiceResourceProvider.java
@@ -423,8 +423,6 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
     // do all validation checks
     validateCreateRequests(requests, clusters);
 
-    Set<Cluster> clustersSetFromRequests = new HashSet<>();
-
     for (ServiceRequest request : requests) {
       Cluster cluster = clusters.getCluster(request.getClusterName());
 
@@ -480,12 +478,6 @@ public class ServiceResourceProvider extends AbstractControllerResourceProvider
 
       // Initialize service widgets
       getManagementController().initializeWidgetsAndLayouts(cluster, s);
-      clustersSetFromRequests.add(cluster);
-    }
-
-    // Create cluster widgets and layouts
-    for (Cluster cluster : clustersSetFromRequests) {
-      getManagementController().initializeWidgetsAndLayouts(cluster, null);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b609fb43/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java
index 2e8a32a..a7f7710 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/StackArtifactResourceProvider.java
@@ -426,7 +426,7 @@ public class StackArtifactResourceProvider extends AbstractControllerResourcePro
     }
 
     if (StringUtils.isEmpty(serviceName)) {
-      return getWidgetsDescriptorForCluster(stackInfo);
+      return null;
     } else {
       return getWidgetsDescriptorForService(stackInfo, serviceName);
     }
@@ -450,22 +450,6 @@ public class StackArtifactResourceProvider extends AbstractControllerResourcePro
     return widgetDescriptor;
   }
 
-  public Map<String, Object> getWidgetsDescriptorForCluster(StackInfo stackInfo)
-      throws NoSuchParentResourceException, IOException {
-
-    Map<String, Object> widgetDescriptor = null;
-
-    String widgetDescriptorFileLocation = stackInfo.getWidgetsDescriptorFileLocation();
-    if (widgetDescriptorFileLocation != null) {
-      File widgetDescriptorFile = new File(widgetDescriptorFileLocation);
-      if (widgetDescriptorFile.exists()) {
-        widgetDescriptor = gson.fromJson(new FileReader(widgetDescriptorFile), widgetLayoutType);
-      }
-    }
-
-    return widgetDescriptor;
-  }
-
   /**
    * Get a kerberos descriptor.
    *

http://git-wip-us.apache.org/repos/asf/ambari/blob/b609fb43/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/WidgetLayoutEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/WidgetLayoutEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/WidgetLayoutEntity.java
index 90d98fc..1fa45e9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/WidgetLayoutEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/WidgetLayoutEntity.java
@@ -33,6 +33,7 @@ import javax.persistence.OneToMany;
 import javax.persistence.OrderBy;
 import javax.persistence.Table;
 import javax.persistence.TableGenerator;
+import javax.persistence.UniqueConstraint;
 
 @Entity
 @Table(name = "widget_layout")
@@ -41,7 +42,8 @@ import javax.persistence.TableGenerator;
         pkColumnName = "sequence_name",
         valueColumnName = "sequence_value",
         pkColumnValue = "widget_layout_id_seq",
-        initialValue = 0
+        initialValue = 0,
+        uniqueConstraints=@UniqueConstraint(columnNames={"layout_name", "cluster_id"})
 )
 @NamedQueries({
     @NamedQuery(name = "WidgetLayoutEntity.findAll", query = "SELECT widgetLayout FROM WidgetLayoutEntity widgetLayout"),
@@ -56,7 +58,7 @@ public class WidgetLayoutEntity {
   @Column(name = "id", nullable = false, updatable = false)
   private Long id;
 
-  @Column(name = "layout_name", nullable = false, unique = true, length = 255)
+  @Column(name = "layout_name", nullable = false, length = 255)
   private String layoutName;
 
   @Column(name = "section_name", nullable = false, length = 255)

http://git-wip-us.apache.org/repos/asf/ambari/blob/b609fb43/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
index 9259466..e3c586b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
@@ -94,11 +94,6 @@ public class StackDirectory extends StackDefinitionDirectory {
   private String kerberosDescriptorPreconfigureFilePath;
 
   /**
-   * widgets descriptor file path
-   */
-  private String widgetsDescriptorFilePath;
-
-  /**
    * repository file
    */
   private RepositoryXml repoFile;
@@ -233,15 +228,6 @@ public class StackDirectory extends StackDefinitionDirectory {
   }
 
   /**
-   * Obtain the path to the (stack-level) widgets descriptor file
-   *
-   * @return the path to the (stack-level) widgets descriptor file
-   */
-  public String getWidgetsDescriptorFilePath() {
-    return widgetsDescriptorFilePath;
-  }
-
-  /**
    * Obtain the repository directory path.
    *
    * @return repository directory path
@@ -324,10 +310,6 @@ public class StackDirectory extends StackDefinitionDirectory {
       kerberosDescriptorPreconfigureFilePath = getAbsolutePath() + File.separator + KERBEROS_DESCRIPTOR_PRECONFIGURE_FILE_NAME;
     }
 
-    if (subDirs.contains(WIDGETS_DESCRIPTOR_FILE_NAME)) {
-      widgetsDescriptorFilePath = getAbsolutePath() + File.separator + WIDGETS_DESCRIPTOR_FILE_NAME;
-    }
-
     parseUpgradePacks(subDirs);
     parseServiceDirectories(subDirs);
     parseRepoFile(subDirs);

http://git-wip-us.apache.org/repos/asf/ambari/blob/b609fb43/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
index 742706d..71235f3 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
@@ -294,10 +294,6 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
       stackInfo.setKerberosDescriptorPreConfigurationFileLocation(parentStack.getModuleInfo().getKerberosDescriptorPreConfigurationFileLocation());
     }
 
-    if (stackInfo.getWidgetsDescriptorFileLocation() == null) {
-      stackInfo.setWidgetsDescriptorFileLocation(parentStack.getModuleInfo().getWidgetsDescriptorFileLocation());
-    }
-
     mergeServicesWithParent(parentStack, allStacks, commonServices, extensions);
   }
 
@@ -573,7 +569,6 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
       stackInfo.setRcoFileLocation(stackDirectory.getRcoFilePath());
       stackInfo.setKerberosDescriptorFileLocation(stackDirectory.getKerberosDescriptorFilePath());
       stackInfo.setKerberosDescriptorPreConfigurationFileLocation(stackDirectory.getKerberosDescriptorPreconfigureFilePath());
-      stackInfo.setWidgetsDescriptorFileLocation(stackDirectory.getWidgetsDescriptorFilePath());
       stackInfo.setUpgradesFolder(stackDirectory.getUpgradesDir());
       stackInfo.setUpgradePacks(stackDirectory.getUpgradePacks());
       stackInfo.setConfigUpgradePack(stackDirectory.getConfigUpgradePack());

http://git-wip-us.apache.org/repos/asf/ambari/blob/b609fb43/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
index dcf850f..3efc997 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
@@ -429,14 +429,6 @@ public class StackInfo implements Comparable<StackInfo>, Validable {
     this.kerberosDescriptorPreConfigurationFileLocation = kerberosDescriptorPreConfigurationFileLocation;
   }
 
-  public String getWidgetsDescriptorFileLocation() {
-    return widgetsDescriptorFileLocation;
-  }
-
-  public void setWidgetsDescriptorFileLocation(String widgetsDescriptorFileLocation) {
-    this.widgetsDescriptorFileLocation = widgetsDescriptorFileLocation;
-  }
-
   /**
    * Set the path of the stack upgrade directory.
    *

http://git-wip-us.apache.org/repos/asf/ambari/blob/b609fb43/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
index 4baca5c..25e8d04 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
@@ -1894,6 +1894,14 @@ public class AmbariMetaInfoTest {
     Assert.assertTrue(descriptor.getService("NEW_SERVICE").shouldPreconfigure());
   }
 
+  @Test
+  public void testGetCommonWidgetsFile() throws AmbariException {
+    File widgetsFile = metaInfo.getCommonWidgetsDescriptorFile();
+
+    Assert.assertNotNull(widgetsFile);
+    Assert.assertEquals("/var/lib/ambari-server/resources/widgets.json", widgetsFile.getPath());
+  }
+
   private File getStackRootTmp(String buildDir) {
     return new File(buildDir + "/ambari-metaInfo");
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/b609fb43/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
index a02690f..9547271 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
@@ -2367,18 +2367,14 @@ public class AmbariManagementControllerImplTest {
     Cluster cluster = createNiceMock(Cluster.class);
     Service service = createNiceMock(Service.class);
     expect(service.getDesiredStackId()).andReturn(stackId).atLeastOnce();
-    expect(cluster.getServices()).andReturn(ImmutableMap.<String, Service>builder()
-        .put("HDFS", service)
-        .build());
 
     expect(clusters.getCluster("c1")).andReturn(cluster).atLeastOnce();
 
 
     StackInfo stackInfo = createNiceMock(StackInfo.class);
-    expect(stackInfo.getWidgetsDescriptorFileLocation()).andReturn(null).once();
 
     expect(ambariMetaInfo.getStack("HDP", "2.1")).andReturn(stackInfo).atLeastOnce();
-    expect(ambariMetaInfo.getStack(stackId)).andReturn(stackInfo).atLeastOnce();
+    expect(ambariMetaInfo.getCommonWidgetsDescriptorFile()).andReturn(null).once();
 
     replay(injector, clusters, ambariMetaInfo, stackInfo, cluster, service, repoVersionDAO, repoVersion);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/b609fb43/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
index b370829..7094caa 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
@@ -34,6 +34,7 @@ import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import java.io.File;
 import java.io.StringReader;
 import java.lang.reflect.Type;
 import java.text.MessageFormat;
@@ -10424,6 +10425,11 @@ public class AmbariManagementControllerTest {
     Assert.assertEquals("UPDATED_BLOCKED_TIME", layoutUserWidgetEntities.get(3).getWidget().getWidgetName());
     Assert.assertEquals("HBASE_SUMMARY", layoutUserWidgetEntities.get(0).getWidget().getDefaultSectionName());
 
+    File widgetsFile  = ambariMetaInfo.getCommonWidgetsDescriptorFile();
+    assertNotNull(widgetsFile);
+    assertEquals("src/test/resources/widgets.json", widgetsFile.getPath());
+    assertTrue(widgetsFile.exists());
+
     candidateLayoutEntity = null;
     for (WidgetLayoutEntity entity : layoutEntities) {
       if (entity.getLayoutName().equals("default_system_heatmap")) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/b609fb43/ambari-server/src/test/resources/stacks/OTHER/1.0/widgets.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/OTHER/1.0/widgets.json b/ambari-server/src/test/resources/stacks/OTHER/1.0/widgets.json
deleted file mode 100644
index 3176354..0000000
--- a/ambari-server/src/test/resources/stacks/OTHER/1.0/widgets.json
+++ /dev/null
@@ -1,95 +0,0 @@
-{
-  "layouts": [
-    {
-      "layout_name": "default_system_heatmap",
-      "display_name": "Heatmaps",
-      "section_name": "SYSTEM_HEATMAPS",
-      "widgetLayoutInfo": [
-        {
-          "widget_name": "Host Disk Space Used %",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": true,
-          "metrics": [
-            {
-              "name": "disk_free",
-              "metric_path": "metrics/disk/disk_free",
-              "service_name": "STACK"
-            },
-            {
-              "name": "disk_total",
-              "metric_path": "metrics/disk/disk_total",
-              "service_name": "STACK"
-            }
-          ],
-          "values": [
-            {
-              "name": "Host Disk Space Used %",
-              "value": "${((disk_total-disk_free)/disk_total)*100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "Host Memory Used %",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "mem_total",
-              "metric_path": "metrics/memory/mem_total",
-              "service_name": "STACK"
-            },
-            {
-              "name": "mem_free",
-              "metric_path": "metrics/memory/mem_free",
-              "service_name": "STACK"
-            },
-            {
-              "name": "mem_cached",
-              "metric_path": "metrics/memory/mem_cached",
-              "service_name": "STACK"
-            }
-          ],
-          "values": [
-            {
-              "name": "Host Memory Used %",
-              "value": "${((mem_total-mem_free-mem_cached)/mem_total)*100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        },
-        {
-          "widget_name": "Host CPU Wait IO %",
-          "description": "",
-          "widget_type": "HEATMAP",
-          "is_visible": false,
-          "metrics": [
-            {
-              "name": "cpu_wio",
-              "metric_path": "metrics/cpu/cpu_wio",
-              "service_name": "STACK"
-            }
-          ],
-          "values": [
-            {
-              "name": "Host Memory Used %",
-              "value": "${cpu_wio*100}"
-            }
-          ],
-          "properties": {
-            "display_unit": "%",
-            "max_limit": "100"
-          }
-        }
-      ]
-    }
-  ]
-}