You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ao...@apache.org on 2017/11/27 12:59:56 UTC

[01/16] ambari git commit: AMBARI-22494 Unable to install the cluster. (atkach)

Repository: ambari
Updated Branches:
  refs/heads/branch-feature-AMBARI-22457 bc8b3916b -> 606d876b6


AMBARI-22494 Unable to install the cluster. (atkach)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b7ed9aaa
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b7ed9aaa
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b7ed9aaa

Branch: refs/heads/branch-feature-AMBARI-22457
Commit: b7ed9aaa517d6da788d073e85f4198697e0fc9ca
Parents: 0615fa0
Author: Andrii Tkach <at...@apache.org>
Authored: Wed Nov 22 12:23:10 2017 +0200
Committer: Andrii Tkach <at...@apache.org>
Committed: Wed Nov 22 12:23:10 2017 +0200

----------------------------------------------------------------------
 ambari-web/app/controllers/wizard/step3_controller.js | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b7ed9aaa/ambari-web/app/controllers/wizard/step3_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/wizard/step3_controller.js b/ambari-web/app/controllers/wizard/step3_controller.js
index fb41516..abc745d 100644
--- a/ambari-web/app/controllers/wizard/step3_controller.js
+++ b/ambari-web/app/controllers/wizard/step3_controller.js
@@ -1093,7 +1093,7 @@ App.WizardStep3Controller = Em.Controller.extend(App.ReloadPopupMixin, {
           return {
             hostName: Em.get(task, 'Tasks.host_name'),
             transparentHugePage: Em.get(task, 'Tasks.structured_out.transparentHugePage.message'),
-            installedPackages: installed_packages ? installed_packages : []
+            installedPackages: installed_packages && Array.isArray(installed_packages) ? installed_packages : []
           };
         }));
 


[06/16] ambari git commit: AMBARI-22498. Remove trailing lines (if any) from llapstatus command before converting the o/p to JSON.

Posted by ao...@apache.org.
AMBARI-22498. Remove trailing lines (if any) from llapstatus command before converting the o/p to JSON.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/677e27e6
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/677e27e6
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/677e27e6

Branch: refs/heads/branch-feature-AMBARI-22457
Commit: 677e27e658b1fe6e272789ce97ecfbaa5ad0d0ca
Parents: 416570d
Author: Swapan Shridhar <ss...@hortonworks.com>
Authored: Wed Nov 22 10:24:38 2017 -0800
Committer: Swapan Shridhar <ss...@hortonworks.com>
Committed: Wed Nov 22 10:26:53 2017 -0800

----------------------------------------------------------------------
 .../package/scripts/hive_server_interactive.py  | 20 +++++++--
 .../HIVE/running_withMOTDmsg_andTrailingMsg.txt | 46 ++++++++++++++++++++
 .../stacks/2.5/HIVE/test_hive_server_int.py     | 21 +++++++++
 3 files changed, 84 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/677e27e6/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_interactive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_interactive.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_interactive.py
index 57cbcd0..32322cd 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_interactive.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_interactive.py
@@ -416,13 +416,14 @@ class HiveServerInteractiveDefault(HiveServerInteractive):
 
 
     """
-    Remove extra lines from 'llapstatus' status output (eg: because of MOTD logging) so as to have a valid JSON data to be passed in
-    to JSON converter.
+    Remove extra lines (begginning/end) from 'llapstatus' status output (eg: because of MOTD logging) so as to have 
+    a valid JSON data to be passed in to JSON converter.
     """
     def _make_valid_json(self, output):
       '''
 
-      Note: It is assumed right now that extra lines will be only at the start and not at the end.
+      Note: Extra lines (eg: because of MOTD) may be at the start or the end (some other logging getting appended)
+      of the passed-in data.
 
       Sample expected JSON to be passed for 'loads' is either of the form :
 
@@ -458,6 +459,19 @@ class HiveServerInteractiveDefault(HiveServerInteractive):
       if (len_splits < 3):
         raise Fail ("Malformed JSON data received from 'llapstatus' command. Exiting ....")
 
+      # Firstly, remove extra lines from the END.
+      updated_splits = []
+      for itr, line in enumerate(reversed(splits)):
+        if line == "}": # Our assumption of end of JSON data.
+          updated_splits = splits[:-itr]
+          break
+
+      if len(updated_splits) > 0:
+        splits = updated_splits
+        len_splits = len(splits)
+
+
+      # Secondly, remove extra lines from the BEGGINNING.
       marker_idx = None # To detect where from to start reading for JSON data
       for idx, split in enumerate(splits):
         curr_elem = split.strip()

http://git-wip-us.apache.org/repos/asf/ambari/blob/677e27e6/ambari-server/src/test/python/stacks/2.5/HIVE/running_withMOTDmsg_andTrailingMsg.txt
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/HIVE/running_withMOTDmsg_andTrailingMsg.txt b/ambari-server/src/test/python/stacks/2.5/HIVE/running_withMOTDmsg_andTrailingMsg.txt
new file mode 100644
index 0000000..394faef
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.5/HIVE/running_withMOTDmsg_andTrailingMsg.txt
@@ -0,0 +1,46 @@
+######## Hortonworks #############
+This is MOTD message, added for testing in qe infra
+{
+  "amInfo" : {
+    "appName" : "llap",
+    "appType" : "org-apache-slider",
+    "appId" : "application_1455662455106_10882",
+    "containerId" : "container_e14_1455662455106_10882_01_000001",
+    "hostname" : "HOST_REPLACED",
+    "amWebUrl" : "http://HOST_REPLACED:1025/"
+  },
+  "state" : "RUNNING_ALL",
+  "originalConfigurationPath" : "hdfs://HOST_REPLACED:8020/user/USER_REPLACED/.slider/cluster/llap/snapshot",
+  "generatedConfigurationPath" : "hdfs://HOST_REPLACED:8020/user/USER_REPLACED/.slider/cluster/llap/generated",
+  "desiredInstances" : 3,
+  "liveInstances" : 3,
+  "appStartTime" : 1459625802169,
+  "llapInstances" : [ {
+    "hostname" : "HOST_REPLACED",
+    "containerId" : "container_e14_1455662455106_10882_01_000003",
+    "statusUrl" : "http://HOST_REPLACED:15002/status",
+    "webUrl" : "http://HOST_REPLACED:15002",
+    "rpcPort" : 15001,
+    "mgmtPort" : 15004,
+    "shufflePort" : 15551
+  }, {
+    "hostname" : "HOST_REPLACED",
+    "containerId" : "container_e14_1455662455106_10882_01_000002",
+    "statusUrl" : "http://HOST_REPLACED:15002/status",
+    "webUrl" : "http://HOST_REPLACED:15002",
+    "rpcPort" : 15001,
+    "mgmtPort" : 15004,
+    "shufflePort" : 15551
+  }, {
+    "hostname" : "HOST_REPLACED",
+    "containerId" : "container_e14_1455662455106_10882_01_000004",
+    "statusUrl" : "http://HOST_REPLACED:15002/status",
+    "webUrl" : "http://HOST_REPLACED:15002",
+    "rpcPort" : 15001,
+    "mgmtPort" : 15004,
+    "shufflePort" : 15551
+  } ]
+}
+
+# THIS IS A DUMMY TRAILING MESSAGE 1
+# THIS IS A DUMMY TRAILING MESSAGE 2
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/677e27e6/ambari-server/src/test/python/stacks/2.5/HIVE/test_hive_server_int.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.5/HIVE/test_hive_server_int.py b/ambari-server/src/test/python/stacks/2.5/HIVE/test_hive_server_int.py
index cf79ec7..4eb16c2 100644
--- a/ambari-server/src/test/python/stacks/2.5/HIVE/test_hive_server_int.py
+++ b/ambari-server/src/test/python/stacks/2.5/HIVE/test_hive_server_int.py
@@ -915,6 +915,27 @@ class TestHiveServerInteractive(RMFTestCase):
 
 
 
+  # Tests for function '_make_valid_json()' : will be passed in with 'llapstatus' output which will be :
+  #     (1). A string parseable as JSON, but has 2 and 3.
+  #     (2). Has extra lines in beginning (eg: from MOTD logging embedded)
+  #          AND/OR
+  #     (3). Extra lines at the end.
+
+  # Begginning and end lines need to be removed before parsed as JSON
+  def test_make_valid_json_11(self):
+      # Setting up input for fn. '_make_valid_json()'
+      input_file_handle = open(self.get_src_folder() + "/test/python/stacks/2.5/HIVE/running_withMOTDmsg_andTrailingMsg.txt","r")
+      llap_app_info = input_file_handle.read()
+      llap_app_info_as_json = self.hsi._make_valid_json(llap_app_info)
+
+      # Set up expected output
+      expected_ouput_file_handle = open(self.get_src_folder() + "/test/python/stacks/2.5/HIVE/running.json","r")
+      expected_ouput_data = expected_ouput_file_handle.read()
+      expected_ouput_data_as_json = json.loads(expected_ouput_data)
+
+      # Verification
+      self.assertEqual(llap_app_info_as_json, expected_ouput_data_as_json)
+
 
   # Tests for fn : 'check_llap_app_status_in_hdp_tp()'
 


[15/16] ambari git commit: AMBARI-22517. NPE during Ambari schema upgrade while updating Hive configs.

Posted by ao...@apache.org.
AMBARI-22517. NPE during Ambari schema upgrade while updating Hive configs.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c6825e91
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c6825e91
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c6825e91

Branch: refs/heads/branch-feature-AMBARI-22457
Commit: c6825e913b7f0421b88c8753d73ead0025c6031c
Parents: 5fab8ff
Author: Swapan Shridhar <ss...@hortonworks.com>
Authored: Mon Nov 27 00:07:15 2017 -0800
Committer: Swapan Shridhar <ss...@hortonworks.com>
Committed: Mon Nov 27 00:40:13 2017 -0800

----------------------------------------------------------------------
 .../server/upgrade/UpgradeCatalog260.java       | 38 ++++++++++----------
 1 file changed, 20 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/c6825e91/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog260.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog260.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog260.java
index 5831565..35f9f65 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog260.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog260.java
@@ -775,26 +775,28 @@ public class UpgradeCatalog260 extends AbstractUpgradeCatalog {
           // hive-interactive-site/hive.llap.zk.sm.keytab.file and hive-interactive-site/hive.llap.task.keytab.file respectively,
           // based on what hive-interactive-site/hive.llap.daemon.keytab.file has.
           Config hsiSiteConfig = cluster.getDesiredConfigByType(HIVE_INTERACTIVE_SITE);
-          Map<String, String> hsiSiteConfigProperties = hsiSiteConfig.getProperties();
-          if (hsiSiteConfigProperties != null &&
-                  hsiSiteConfigProperties.containsKey(HIVE_LLAP_DAEMON_KEYTAB_FILE)) {
-            String[] identities = {HIVE_LLAP_ZK_SM_KEYTAB_FILE, HIVE_LLAP_TASK_KEYTAB_FILE};
-            Map<String, String> newProperties = new HashMap<>();
-            for (String identity : identities) {
-              // Update only if we were able to modify the corresponding kerberos descriptor,
-              // reflected in list 'getYarnKerberosDescUpdatedList'.
-              if (getYarnKerberosDescUpdatedList().contains(identity) && hsiSiteConfigProperties.containsKey(identity)) {
-                newProperties.put(identity, hsiSiteConfigProperties.get(HIVE_LLAP_DAEMON_KEYTAB_FILE));
+          if (hsiSiteConfig != null) {
+            Map<String, String> hsiSiteConfigProperties = hsiSiteConfig.getProperties();
+            if (hsiSiteConfigProperties != null &&
+                    hsiSiteConfigProperties.containsKey(HIVE_LLAP_DAEMON_KEYTAB_FILE)) {
+              String[] identities = {HIVE_LLAP_ZK_SM_KEYTAB_FILE, HIVE_LLAP_TASK_KEYTAB_FILE};
+              Map<String, String> newProperties = new HashMap<>();
+              for (String identity : identities) {
+                // Update only if we were able to modify the corresponding kerberos descriptor,
+                // reflected in list 'getYarnKerberosDescUpdatedList'.
+                if (getYarnKerberosDescUpdatedList().contains(identity) && hsiSiteConfigProperties.containsKey(identity)) {
+                  newProperties.put(identity, hsiSiteConfigProperties.get(HIVE_LLAP_DAEMON_KEYTAB_FILE));
+                }
               }
-            }
 
-            // Update step.
-            if (newProperties.size() > 0) {
-              try {
-                updateConfigurationPropertiesForCluster(cluster, HIVE_INTERACTIVE_SITE, newProperties, true, false);
-                LOG.info("Updated HSI config(s) : " + newProperties.keySet() + " with value(s) = " + newProperties.values()+" respectively.");
-              } catch (AmbariException e) {
-                e.printStackTrace();
+              // Update step.
+              if (newProperties.size() > 0) {
+                try {
+                  updateConfigurationPropertiesForCluster(cluster, HIVE_INTERACTIVE_SITE, newProperties, true, false);
+                  LOG.info("Updated HSI config(s) : " + newProperties.keySet() + " with value(s) = " + newProperties.values() + " respectively.");
+                } catch (AmbariException e) {
+                  e.printStackTrace();
+                }
               }
             }
           }


[11/16] ambari git commit: AMBARI-20891 - Allow extensions to auto-link with supported stack versions

Posted by ao...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hbase-site.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hbase-site.xml
new file mode 100644
index 0000000..5024e85
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hbase-site.xml
@@ -0,0 +1,137 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>hbase.regionserver.msginterval</name>
+    <value>1000</value>
+    <description>Interval between messages from the RegionServer to HMaster
+    in milliseconds.  Default is 15. Set this value low if you want unit
+    tests to be responsive.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.pause</name>
+    <value>5000</value>
+    <description>General client pause value.  Used mostly as value to wait
+    before running a retry of a failed get, region lookup, etc.</description>
+  </property>
+  <property>
+    <name>hbase.master.meta.thread.rescanfrequency</name>
+    <value>10000</value>
+    <description>How long the HMaster sleeps (in milliseconds) between scans of
+    the root and meta tables.
+    </description>
+  </property>
+  <property>
+    <name>hbase.server.thread.wakefrequency</name>
+    <value>1000</value>
+    <description>Time to sleep in between searches for work (in milliseconds).
+    Used as sleep interval by service threads such as META scanner and log roller.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.handler.count</name>
+    <value>5</value>
+    <description>Count of RPC Server instances spun up on RegionServers
+    Same property is used by the HMaster for count of master handlers.
+    Default is 10.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.lease.period</name>
+    <value>6000</value>
+    <description>Length of time the master will wait before timing out a region
+    server lease. Since region servers report in every second (see above), this
+    value has been reduced so that the master will notice a dead region server
+    sooner. The default is 30 seconds.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.info.port</name>
+    <value>-1</value>
+    <description>The port for the hbase master web UI
+    Set to -1 if you do not want the info server to run.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port</name>
+    <value>-1</value>
+    <description>The port for the hbase regionserver web UI
+    Set to -1 if you do not want the info server to run.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port.auto</name>
+    <value>true</value>
+    <description>Info server auto port bind. Enables automatic port
+    search if hbase.regionserver.info.port is already in use.
+    Enabled for testing to run multiple tests on one machine.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.lease.thread.wakefrequency</name>
+    <value>3000</value>
+    <description>The interval between checks for expired region server leases.
+    This value has been reduced due to the other reduced values above so that
+    the master will notice a dead region server sooner. The default is 15 seconds.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.optionalcacheflushinterval</name>
+    <value>10000</value>
+    <description>
+    Amount of time to wait since the last time a region was flushed before
+    invoking an optional cache flush. Default 60,000.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.safemode</name>
+    <value>false</value>
+    <description>
+    Turn on/off safe mode in region server. Always on for production, always off
+    for tests.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.max.filesize</name>
+    <value>67108864</value>
+    <description>
+    Maximum desired file size for an HRegion.  If filesize exceeds
+    value + (value / 2), the HRegion is split in two.  Default: 256M.
+
+    Keep the maximum filesize small so we split more often in tests.
+    </description>
+  </property>
+  <property>
+    <name>hadoop.log.dir</name>
+    <value>${user.dir}/../logs</value>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.clientPort</name>
+    <value>21818</value>
+    <description>Property from ZooKeeper's config zoo.cfg.
+    The port at which the clients will connect.
+    </description>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hdfs-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hdfs-log4j.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hdfs-log4j.xml
new file mode 100644
index 0000000..649472d
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hdfs-log4j.xml
@@ -0,0 +1,199 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <value>
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+#Security audit appender
+#
+hadoop.security.logger=INFO,console
+hadoop.security.log.maxfilesize=256MB
+hadoop.security.log.maxbackupindex=20
+log4j.category.SecurityLogger=${hadoop.security.logger}
+hadoop.security.log.file=SecurityAuth.audit
+log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
+
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
+
+#
+# hdfs audit logging
+#
+hdfs.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
+log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
+log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# mapred audit logging
+#
+mapred.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
+log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
+log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
+log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# Rolling File Appender
+#
+
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Logfile size and and 30-day backups
+log4j.appender.RFA.MaxFileSize=256MB
+log4j.appender.RFA.MaxBackupIndex=10
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+# Custom Logging levels
+
+hadoop.metrics.log.level=INFO
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Null Appender
+# Trap security logger on the hadoop client side
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+
+# Removes "deprecated" messages
+log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hdfs-site.xml
new file mode 100644
index 0000000..2b979d7
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hdfs-site.xml
@@ -0,0 +1,396 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+<!-- file system properties -->
+
+  <property>
+    <name>dfs.name.dir</name>
+    <!-- cluster variant -->
+    <value>/mnt/hmc/hadoop/hdfs/namenode</value>
+    <description>Determines where on the local filesystem the DFS name node
+      should store the name table.  If this is a comma-delimited list
+      of directories then the name table is replicated in all of the
+      directories, for redundancy. </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.support.append</name>
+    <value>true</value>
+    <description>to enable dfs append</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.webhdfs.enabled</name>
+    <value>false</value>
+    <description>to enable webhdfs</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.datanode.failed.volumes.tolerated</name>
+    <value>0</value>
+    <description>#of failed disks dn would tolerate</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.block.local-path-access.user</name>
+    <value>hbase</value>
+    <description>the user who is allowed to perform short
+    circuit reads.
+    </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.data.dir</name>
+    <value>/mnt/hmc/hadoop/hdfs/data</value>
+    <description>Determines where on the local filesystem an DFS data node
+  should store its blocks.  If this is a comma-delimited
+  list of directories, then data will be stored in all named
+  directories, typically on different devices.
+  Directories that do not exist are ignored.
+  </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.hosts.exclude</name>
+    <value>/etc/hadoop/conf/dfs.exclude</value>
+    <description>Names a file that contains a list of hosts that are
+    not permitted to connect to the namenode.  The full pathname of the
+    file must be specified.  If the value is empty, no hosts are
+    excluded.</description>
+  </property>
+
+  <property>
+    <name>dfs.hosts</name>
+    <value>/etc/hadoop/conf/dfs.include</value>
+    <description>Names a file that contains a list of hosts that are
+    permitted to connect to the namenode. The full pathname of the file
+    must be specified.  If the value is empty, all hosts are
+    permitted.</description>
+  </property>
+
+  <property>
+    <name>dfs.replication.max</name>
+    <value>50</value>
+    <description>Maximal block replication.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.replication</name>
+    <value>3</value>
+    <description>Default block replication.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.heartbeat.interval</name>
+    <value>3</value>
+    <description>Determines datanode heartbeat interval in seconds.</description>
+  </property>
+
+  <property>
+    <name>dfs.safemode.threshold.pct</name>
+    <value>1.0f</value>
+    <description>
+        Specifies the percentage of blocks that should satisfy
+        the minimal replication requirement defined by dfs.replication.min.
+        Values less than or equal to 0 mean not to start in safe mode.
+        Values greater than 1 will make safe mode permanent.
+        </description>
+  </property>
+
+  <property>
+    <name>dfs.balance.bandwidthPerSec</name>
+    <value>6250000</value>
+    <description>
+        Specifies the maximum amount of bandwidth that each datanode
+        can utilize for the balancing purpose in term of
+        the number of bytes per second.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.address</name>
+    <value>0.0.0.0:50010</value>
+  </property>
+
+  <property>
+    <name>dfs.datanode.http.address</name>
+    <value>0.0.0.0:50075</value>
+  </property>
+
+  <property>
+    <name>dfs.block.size</name>
+    <value>134217728</value>
+    <description>The default block size for new files.</description>
+  </property>
+
+  <property>
+    <name>dfs.http.address</name>
+    <value>hdp1.cybervisiontech.com.ua:50070</value>
+<description>The name of the default file system.  Either the
+literal string "local" or a host:port for HDFS.</description>
+<final>true</final>
+</property>
+
+<property>
+<name>dfs.datanode.du.reserved</name>
+<!-- cluster variant -->
+<value>1073741824</value>
+<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+</description>
+</property>
+
+<property>
+<name>dfs.datanode.ipc.address</name>
+<value>0.0.0.0:8010</value>
+<description>
+The datanode ipc server address and port.
+If the port is 0 then the server will start on a free port.
+</description>
+</property>
+
+<property>
+<name>dfs.blockreport.initialDelay</name>
+<value>120</value>
+<description>Delay for first block report in seconds.</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>40</value>
+<description>The number of server threads for the namenode.</description>
+</property>
+
+<property>
+<name>dfs.datanode.max.xcievers</name>
+<value>1024</value>
+<description>PRIVATE CONFIG VARIABLE</description>
+</property>
+
+<!-- Permissions configuration -->
+
+<property>
+<name>dfs.umaskmode</name>
+<value>077</value>
+<description>
+The octal umask used when creating files and directories.
+</description>
+</property>
+
+<property>
+<name>dfs.web.ugi</name>
+<!-- cluster variant -->
+<value>gopher,gopher</value>
+<description>The user account used by the web interface.
+Syntax: USERNAME,GROUP1,GROUP2, ...
+</description>
+</property>
+
+<property>
+<name>dfs.permissions</name>
+<value>true</value>
+<description>
+If "true", enable permission checking in HDFS.
+If "false", permission checking is turned off,
+but all other behavior is unchanged.
+Switching from one parameter value to the other does not change the mode,
+owner or group of files or directories.
+</description>
+</property>
+
+<property>
+<name>dfs.permissions.supergroup</name>
+<value>hdfs</value>
+<description>The name of the group of super-users.</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>100</value>
+<description>Added to grow Queue size so that more client connections are allowed</description>
+</property>
+
+<property>
+<name>ipc.server.max.response.size</name>
+<value>5242880</value>
+</property>
+<property>
+<name>dfs.block.access.token.enable</name>
+<value>true</value>
+<description>
+If "true", access tokens are used as capabilities for accessing datanodes.
+If "false", no access tokens are checked on accessing datanodes.
+</description>
+</property>
+
+<property>
+<name>dfs.namenode.kerberos.principal</name>
+<value>nn/_HOST@</value>
+<description>
+Kerberos principal name for the NameNode
+</description>
+</property>
+
+<property>
+<name>dfs.secondary.namenode.kerberos.principal</name>
+<value>nn/_HOST@</value>
+    <description>
+        Kerberos principal name for the secondary NameNode.
+    </description>
+  </property>
+
+
+<!--
+  This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
+-->
+  <property>
+    <name>dfs.namenode.kerberos.https.principal</name>
+    <value>host/_HOST@</value>
+     <description>The Kerberos principal for the host that the NameNode runs on.</description>
+
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.kerberos.https.principal</name>
+    <value>host/_HOST@</value>
+    <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
+
+  </property>
+
+  <property>
+    <!-- cluster variant -->
+    <name>dfs.secondary.http.address</name>
+    <value>hdp2.cybervisiontech.com.ua:50090</value>
+    <description>Address of secondary namenode web server</description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.https.port</name>
+    <value>50490</value>
+    <description>The https port where secondary-namenode binds</description>
+  </property>
+
+  <property>
+    <name>dfs.web.authentication.kerberos.principal</name>
+    <value>HTTP/_HOST@</value>
+    <description>
+      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
+      HTTP SPENGO specification.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.web.authentication.kerberos.keytab</name>
+    <value>/nn.service.keytab</value>
+    <description>
+      The Kerberos keytab file with the credentials for the
+      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.kerberos.principal</name>
+    <value>dn/_HOST@</value>
+ <description>
+        The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.keytab.file</name>
+    <value>/nn.service.keytab</value>
+ <description>
+        Combined keytab file containing the namenode service and host principals.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.keytab.file</name>
+    <value>/nn.service.keytab</value>
+  <description>
+        Combined keytab file containing the namenode service and host principals.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.keytab.file</name>
+    <value>/dn.service.keytab</value>
+ <description>
+        The filename of the keytab file for the DataNode.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.https.port</name>
+    <value>50470</value>
+ <description>The https port where namenode binds</description>
+
+  </property>
+
+  <property>
+    <name>dfs.https.address</name>
+    <value>hdp1.cybervisiontech.com.ua:50470</value>
+  <description>The https address where namenode binds</description>
+
+  </property>
+
+  <property>
+    <name>dfs.datanode.data.dir.perm</name>
+    <value>750</value>
+<description>The permissions that should be there on dfs.data.dir
+directories. The datanode will not come up if the permissions are
+different on existing dfs.data.dir directories. If the directories
+don't exist, they will be created with this permission.</description>
+  </property>
+
+  <property>
+  <name>dfs.access.time.precision</name>
+  <value>0</value>
+  <description>The access time for HDFS file is precise upto this value.
+               The default value is 1 hour. Setting a value of 0 disables
+               access times for HDFS.
+  </description>
+</property>
+
+<property>
+ <name>dfs.cluster.administrators</name>
+ <value> hdfs</value>
+ <description>ACL for who all can view the default servlets in the HDFS</description>
+</property>
+
+<property>
+  <name>ipc.server.read.threadpool.size</name>
+  <value>5</value>
+  <description></description>
+</property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/metainfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/metainfo.xml
new file mode 100644
index 0000000..da61660
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/metainfo.xml
@@ -0,0 +1,30 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HDFS</name>
+      <extends>common-services/HDFS/1.0</extends>
+      <configuration-dependencies>
+        <config-type>core-site</config-type>
+        <config-type>global</config-type>
+        <config-type>hdfs-site</config-type>
+        <config-type>hadoop-policy</config-type>
+        <config-type>hdfs-log4j</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/package/dummy-script.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/package/dummy-script.py b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/package/dummy-script.py
new file mode 100644
index 0000000..35de4bb
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/package/dummy-script.py
@@ -0,0 +1,20 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HIVE/metainfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HIVE/metainfo.xml
new file mode 100644
index 0000000..9c122b2
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HIVE/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HIVE</name>
+      <extends>common-services/HIVE/1.0</extends>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/MAPREDUCE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/MAPREDUCE/metainfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/MAPREDUCE/metainfo.xml
new file mode 100644
index 0000000..3b0b3d9
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/MAPREDUCE/metainfo.xml
@@ -0,0 +1,23 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>MAPREDUCE</name>
+      <extends>common-services/MAPREDUCE/1.0</extends>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/ZOOKEEPER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/ZOOKEEPER/metainfo.xml
new file mode 100644
index 0000000..9c8a299
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/ZOOKEEPER/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ZOOKEEPER</name>
+      <extends>common-services/ZOOKEEPER/1.0</extends>
+    </service>
+  </services>
+</metainfo>


[14/16] ambari git commit: AMBARI-22220 - Should be able to switch the extension version to which a stack version is linked

Posted by ao...@apache.org.
AMBARI-22220 - Should be able to switch the extension version to which a stack version is linked

Conflicts:
	ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/5fab8ff3
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/5fab8ff3
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/5fab8ff3

Branch: refs/heads/branch-feature-AMBARI-22457
Commit: 5fab8ff3d768c2edf8408361feba87bc09b02bf2
Parents: d7b25ee
Author: Tim Thorpe <tt...@apache.org>
Authored: Mon Oct 16 06:16:35 2017 -0700
Committer: Tim Thorpe <tt...@apache.org>
Committed: Fri Nov 24 12:20:38 2017 -0800

----------------------------------------------------------------------
 .../controller/AmbariManagementController.java  |   6 +-
 .../AmbariManagementControllerImpl.java         |  44 ++++---
 .../controller/AmbariManagementHelper.java      |  26 ++++
 .../internal/ExtensionLinkResourceProvider.java |  15 +++
 .../ambari/server/stack/ExtensionHelper.java    |  31 ++++-
 .../server/stack/StackManagerExtensionTest.java |  22 +++-
 .../resources/extensions/EXT/0.2/metainfo.xml   |   4 +-
 .../resources/extensions/EXT/0.4/metainfo.xml   |  32 +++++
 .../EXT/0.4/services/OOZIE2/metainfo.xml        | 118 +++++++++++++++++++
 .../services/OOZIE2/themes/broken_theme.json    |   3 +
 10 files changed, 271 insertions(+), 30 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/5fab8ff3/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java
index 54c0848..9ac48a4 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementController.java
@@ -369,18 +369,18 @@ public interface AmbariManagementController {
   public void createExtensionLink(ExtensionLinkRequest request) throws AmbariException;
 
   /**
-   * Update a link between an extension and a stack
+   * Update a link - switch the link's extension version while keeping the same stack version and extension name
    *
    * @throws AmbariException if we fail to link the extension to the stack
    */
   public void updateExtensionLink(ExtensionLinkRequest request) throws AmbariException;
 
   /**
-   * Update a link between an extension and a stack
+   * Update a link - switch the link's extension version while keeping the same stack version and extension name
    *
    * @throws AmbariException if we fail to link the extension to the stack
    */
-  public void updateExtensionLink(ExtensionLinkEntity linkEntity) throws AmbariException;
+  void updateExtensionLink(ExtensionLinkEntity oldLinkEntity, ExtensionLinkRequest newLinkRequest) throws AmbariException;
 
   /**
    * Delete a link between an extension and a stack

http://git-wip-us.apache.org/repos/asf/ambari/blob/5fab8ff3/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index 585ee46..e294aeb 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -5656,12 +5656,9 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
   }
 
   /**
-   * This method will update a link between an extension version and a stack version (Extension Link).
-   * Updating will only force ambari server to reread the stack and extension directories.
+   * Update a link - switch the link's extension version while keeping the same stack version and extension name
    *
-   * An extension version is like a stack version but it contains custom services.  Linking an extension
-   * version to the current stack version allows the cluster to install the custom services contained in
-   * the extension version.
+   * @throws AmbariException if we fail to link the extension to the stack
    */
   @Override
   public void updateExtensionLink(ExtensionLinkRequest request) throws AmbariException {
@@ -5675,32 +5672,43 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
       throw new AmbariException("Unable to find extension link"
             + ", linkId=" + request.getLinkId(), e);
     }
-    updateExtensionLink(linkEntity);
+    updateExtensionLink(linkEntity, request);
   }
 
   /**
-   * This method will update a link between an extension version and a stack version (Extension Link).
-   * Updating will only force ambari server to reread the stack and extension directories.
+   * Update a link - switch the link's extension version while keeping the same stack version and extension name
    *
-   * An extension version is like a stack version but it contains custom services.  Linking an extension
-   * version to the current stack version allows the cluster to install the custom services contained in
-   * the extension version.
+   * @throws AmbariException if we fail to link the extension to the stack
    */
   @Override
-  public void updateExtensionLink(ExtensionLinkEntity linkEntity) throws AmbariException {
-    StackInfo stackInfo = ambariMetaInfo.getStack(linkEntity.getStack().getStackName(), linkEntity.getStack().getStackVersion());
+  public void updateExtensionLink(ExtensionLinkEntity oldLinkEntity, ExtensionLinkRequest newLinkRequest) throws AmbariException {
+    StackInfo stackInfo = ambariMetaInfo.getStack(oldLinkEntity.getStack().getStackName(), oldLinkEntity.getStack().getStackVersion());
 
     if (stackInfo == null) {
-      throw new StackAccessException("stackName=" + linkEntity.getStack().getStackName() + ", stackVersion=" + linkEntity.getStack().getStackVersion());
+      throw new StackAccessException(String.format("stackName=%s, stackVersion=%s", oldLinkEntity.getStack().getStackName(), oldLinkEntity.getStack().getStackVersion()));
     }
 
-    ExtensionInfo extensionInfo = ambariMetaInfo.getExtension(linkEntity.getExtension().getExtensionName(), linkEntity.getExtension().getExtensionVersion());
+    if (newLinkRequest.getExtensionName() == null || newLinkRequest.getExtensionVersion() == null) {
+      throw new AmbariException(String.format("Invalid extension name or version: %s/%s",
+		  newLinkRequest.getExtensionName(), newLinkRequest.getExtensionVersion()));
+    }
 
-    if (extensionInfo == null) {
-      throw new StackAccessException("extensionName=" + linkEntity.getExtension().getExtensionName() + ", extensionVersion=" + linkEntity.getExtension().getExtensionVersion());
+    if (!newLinkRequest.getExtensionName().equals(oldLinkEntity.getExtension().getExtensionName())) {
+      throw new AmbariException(String.format("Update is not allowed to switch the extension name, only the version.  Old name/new name: %s/%s",
+		  oldLinkEntity.getExtension().getExtensionName(), newLinkRequest.getExtensionName()));
+    }
+
+    ExtensionInfo oldExtensionInfo = ambariMetaInfo.getExtension(oldLinkEntity.getExtension().getExtensionName(), oldLinkEntity.getExtension().getExtensionVersion());
+    ExtensionInfo newExtensionInfo = ambariMetaInfo.getExtension(newLinkRequest.getExtensionName(), newLinkRequest.getExtensionVersion());
+
+    if (oldExtensionInfo == null) {
+      throw new StackAccessException(String.format("Old extensionName=%s, extensionVersion=%s", oldLinkEntity.getExtension().getExtensionName(), oldLinkEntity.getExtension().getExtensionVersion()));
+    }
+    if (newExtensionInfo == null) {
+      throw new StackAccessException(String.format("New extensionName=%s, extensionVersion=%s", newLinkRequest.getExtensionName(), newLinkRequest.getExtensionVersion()));
     }
 
-    ambariMetaInfo.getStackManager().linkStackToExtension(stackInfo, extensionInfo);
+    helper.updateExtensionLink(ambariMetaInfo.getStackManager(), oldLinkEntity, stackInfo, oldExtensionInfo, newExtensionInfo);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/ambari/blob/5fab8ff3/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementHelper.java
index 0c8edfe..e98c2e9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementHelper.java
@@ -162,6 +162,32 @@ public class AmbariManagementHelper {
     }
   }
 
+  /**
+   * Updates the extension version of the currently linked extension to the stack version
+   */
+  public void updateExtensionLink(StackManager stackManager, ExtensionLinkEntity linkEntity, StackInfo stackInfo,
+                                  ExtensionInfo oldExtensionInfo, ExtensionInfo newExtensionInfo) throws AmbariException {
+    //validateUpdateExtensionLinkRequest(stackInfo, extensionInfo);
+    ExtensionHelper.validateUpdateLink(stackManager, stackInfo, oldExtensionInfo, newExtensionInfo);
+
+    ExtensionEntity extension = extensionDAO.find(newExtensionInfo.getName(), newExtensionInfo.getVersion());
+    linkEntity.setExtension(extension);
+
+    try {
+      linkEntity = linkDAO.merge(linkEntity);
+    } catch (RollbackException e) {
+      String message = "Unable to update extension link";
+      LOG.debug(message, e);
+      String errorMessage = message
+              + ", stackName=" + stackInfo.getName()
+              + ", stackVersion=" + stackInfo.getVersion()
+              + ", extensionName=" + newExtensionInfo.getName()
+              + ", extensionVersion=" + newExtensionInfo.getVersion();
+      LOG.warn(errorMessage);
+      throw new AmbariException(errorMessage, e);
+    }
+  }
+
   private ExtensionLinkEntity createExtensionLinkEntity(StackInfo stackInfo, ExtensionInfo extensionInfo) throws AmbariException {
     StackEntity stack = stackDAO.find(stackInfo.getName(), stackInfo.getVersion());
     ExtensionEntity extension = extensionDAO.find(extensionInfo.getName(), extensionInfo.getVersion());

http://git-wip-us.apache.org/repos/asf/ambari/blob/5fab8ff3/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ExtensionLinkResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ExtensionLinkResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ExtensionLinkResourceProvider.java
index 67cc972..82e70f6 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ExtensionLinkResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ExtensionLinkResourceProvider.java
@@ -188,6 +188,21 @@ public class ExtensionLinkResourceProvider extends AbstractControllerResourcePro
         throws SystemException, UnsupportedPropertyException,
         NoSuchResourceException, NoSuchParentResourceException {
 
+    final Set<ExtensionLinkRequest> requests = new HashSet<>();
+    for (Map<String, Object> propertyMap : request.getProperties()) {
+      requests.add(getRequest(propertyMap));
+    }
+
+    RequestStatusResponse response = modifyResources(new Command<RequestStatusResponse>() {
+      @Override
+      public RequestStatusResponse invoke() throws AmbariException {
+        for (ExtensionLinkRequest extensionLinkRequest : requests) {
+          getManagementController().updateExtensionLink(extensionLinkRequest);
+        }
+        return null;
+      }
+    });
+
     //Need to reread the stacks/extensions directories so the latest information is available
     try {
       getManagementController().updateStacks();

http://git-wip-us.apache.org/repos/asf/ambari/blob/5fab8ff3/ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionHelper.java
index 8e1d989..89d0f61 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionHelper.java
@@ -18,6 +18,9 @@
 
 package org.apache.ambari.server.stack;
 
+import java.util.ArrayList;
+import java.util.Collection;
+
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.ServiceNotFoundException;
 import org.apache.ambari.server.state.Cluster;
@@ -72,6 +75,12 @@ public class ExtensionHelper {
     validateRequiredExtensions(stack, extension);
   }
 
+  public static void validateUpdateLink(StackManager stackManager, StackInfo stack, ExtensionInfo oldExtension, ExtensionInfo newExtension) throws AmbariException {
+    validateSupportedStackVersion(stack, newExtension);
+    validateServiceDuplication(stackManager, stack, oldExtension, newExtension);
+    validateRequiredExtensions(stack, newExtension);
+  }
+
   private static void validateSupportedStackVersion(StackInfo stack, ExtensionInfo extension) throws AmbariException {
     for (ExtensionMetainfoXml.Stack validStack : extension.getStacks()) {
       if (validStack.getName().equals(stack.getName())) {
@@ -93,8 +102,28 @@ public class ExtensionHelper {
   }
 
   private static void validateServiceDuplication(StackManager stackManager, StackInfo stack, ExtensionInfo extension) throws AmbariException {
+    validateServiceDuplication(stackManager, stack, extension, extension.getServices());
+  }
+
+  private static void validateServiceDuplication(StackManager stackManager, StackInfo stack, ExtensionInfo oldExtension, ExtensionInfo newExtension) throws AmbariException {
+    ArrayList<ServiceInfo> services = new ArrayList<>(newExtension.getServices().size());
+    for (ServiceInfo service : newExtension.getServices()) {
+      boolean found = false;
+      for (ServiceInfo current : oldExtension.getServices()) {
+        if (service.getName().equals(current.getName())) {
+          found = true;
+        }
+      }
+      if (!found) {
+        services.add(service);
+      }
+    }
+    validateServiceDuplication(stackManager, stack, newExtension, services);
+  }
+
+  private static void validateServiceDuplication(StackManager stackManager, StackInfo stack, ExtensionInfo extension, Collection<ServiceInfo> services) throws AmbariException {
     LOG.debug("Looking for duplicate services");
-    for (ServiceInfo service : extension.getServices()) {
+    for (ServiceInfo service : services) {
       LOG.debug("Looking for duplicate service " + service.getName());
       if (service != null) {
         ServiceInfo stackService = null;

http://git-wip-us.apache.org/repos/asf/ambari/blob/5fab8ff3/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
index 34522da..1f081ec 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
@@ -85,6 +85,9 @@ public class StackManagerExtensionTest  {
     ExtensionEntity extension3 = new ExtensionEntity();
     extension3.setExtensionName("EXT");
     extension3.setExtensionVersion("0.3");
+    ExtensionEntity extension4 = new ExtensionEntity();
+    extension4.setExtensionName("EXT");
+    extension4.setExtensionVersion("0.4");
     ExtensionLinkEntity link1 = new ExtensionLinkEntity();
     link1.setLinkId(new Long(-1));
     link1.setStack(stack1);
@@ -100,6 +103,7 @@ public class StackManagerExtensionTest  {
     expect(extensionDao.find("EXT", "0.1")).andReturn(extension1).atLeastOnce();
     expect(extensionDao.find("EXT", "0.2")).andReturn(extension2).atLeastOnce();
     expect(extensionDao.find("EXT", "0.3")).andReturn(extension3).atLeastOnce();
+    expect(extensionDao.find("EXT", "0.4")).andReturn(extension4).atLeastOnce();
 
     expect(linkDao.findByStack("HDP", "0.1")).andReturn(linkList).atLeastOnce();
     expect(linkDao.findByStack(EasyMock.anyObject(String.class),
@@ -108,6 +112,8 @@ public class StackManagerExtensionTest  {
     expect(linkDao.findByStackAndExtension("HDP", "0.2", "EXT", "0.2")).andReturn(null).atLeastOnce();
     expect(linkDao.findByStackAndExtension("HDP", "0.1", "EXT", "0.1")).andReturn(link1).atLeastOnce();
 
+    expect(linkDao.merge(link1)).andReturn(link1).atLeastOnce();
+
     replay(actionMetadata, stackDao, metaInfoDao, osFamily, extensionDao, linkDao); //linkEntity
 
     String stacks = ClassLoader.getSystemClassLoader().getResource("stacks_with_extensions").getPath();
@@ -151,7 +157,7 @@ public class StackManagerExtensionTest  {
     assertNotNull("EXT 0.2's parent: " + extension.getParentExtensionVersion(), extension.getParentExtensionVersion());
     assertEquals("EXT 0.2's parent: " + extension.getParentExtensionVersion(), "0.1", extension.getParentExtensionVersion());
     assertNotNull(extension.getService("OOZIE2"));
-    assertTrue("Extension is not set to auto link", extension.isAutoLink());
+    assertTrue("Extension is set to auto link", !extension.isAutoLink());
     oozie = extension.getService("OOZIE2");
     assertNotNull("Package dir is " + oozie.getServicePackageFolder(), oozie.getServicePackageFolder());
     assertTrue("Package dir is " + oozie.getServicePackageFolder(), oozie.getServicePackageFolder().contains("extensions/EXT/0.1/services/OOZIE2/package"));
@@ -166,18 +172,24 @@ public class StackManagerExtensionTest  {
     assertNotNull(themes);
     assertTrue("Number of themes is " + themes.size(), themes.size() == 0);
 
+    extension = stackManager.getExtension("EXT", "0.3");
+    assertTrue("Extension is not set to auto link", extension.isAutoLink());
+
     StackInfo stack = stackManager.getStack("HDP", "0.1");
     assertNotNull(stack.getService("OOZIE2"));
     oozie = stack.getService("OOZIE2");
     assertNotNull("Package dir is " + oozie.getServicePackageFolder(), oozie.getServicePackageFolder());
     assertTrue("Package dir is " + oozie.getServicePackageFolder(), oozie.getServicePackageFolder().contains("extensions/EXT/0.1/services/OOZIE2/package"));
     assertEquals(oozie.getVersion(), "3.2.0");
-
     assertTrue("Extensions found: " + stack.getExtensions().size(), stack.getExtensions().size() == 1);
     extension = stack.getExtensions().iterator().next();
     assertEquals("Extension name: " + extension.getName(), extension.getName(), "EXT");
     assertEquals("Extension version: " + extension.getVersion(), extension.getVersion(), "0.1");
 
+    ExtensionInfo extensionInfo2 = stackManager.getExtension("EXT", "0.2");
+    helper.updateExtensionLink(stackManager, link1, stack, extension, extensionInfo2);
+    assertEquals(link1.getExtension().getExtensionVersion(), link1.getExtension().getExtensionVersion(), "0.2");
+
     stack = stackManager.getStack("HDP", "0.2");
     assertTrue("Extensions found: " + stack.getExtensions().size(), stack.getExtensions().size() == 0);
 
@@ -187,15 +199,13 @@ public class StackManagerExtensionTest  {
     assertNotNull(extension.getService("OOZIE2"));
     oozie = extension.getService("OOZIE2");
     assertEquals(oozie.getVersion(), "4.0.0");
-
     assertEquals("Extension name: " + extension.getName(), extension.getName(), "EXT");
-    assertEquals("Extension version: " + extension.getVersion(), extension.getVersion(), "0.3");
+    assertEquals("Extension version: " + extension.getVersion(), extension.getVersion(), "0.4");
 
     stack = stackManager.getStack("HDP", "0.4");
     assertTrue("Extensions found: " + stack.getExtensions().size(), stack.getExtensions().size() == 1);
     extension = stack.getExtensions().iterator().next();
     assertEquals("Extension name: " + extension.getName(), extension.getName(), "EXT");
-    assertEquals("Extension version: " + extension.getVersion(), extension.getVersion(), "0.3");
+    assertEquals("Extension version: " + extension.getVersion(), extension.getVersion(), "0.4");
   }
-
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/5fab8ff3/ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml b/ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml
index c95a20f..fa84c53 100644
--- a/ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml
+++ b/ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml
@@ -20,12 +20,12 @@
     <active>true</active>
   </versions>
   <extends>0.1</extends>
-  <auto-link>true</auto-link>
+  <auto-link>false</auto-link>
   <prerequisites>
     <min-stack-versions>
       <stack>
         <name>HDP</name>
-        <version>0.3</version>
+        <version>0.1</version>
       </stack>
     </min-stack-versions>
   </prerequisites>

http://git-wip-us.apache.org/repos/asf/ambari/blob/5fab8ff3/ambari-server/src/test/resources/extensions/EXT/0.4/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/extensions/EXT/0.4/metainfo.xml b/ambari-server/src/test/resources/extensions/EXT/0.4/metainfo.xml
new file mode 100644
index 0000000..0e74813
--- /dev/null
+++ b/ambari-server/src/test/resources/extensions/EXT/0.4/metainfo.xml
@@ -0,0 +1,32 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <versions>
+    <active>true</active>
+  </versions>
+  <extends>0.3</extends>
+  <auto-link>true</auto-link>
+  <prerequisites>
+    <min-stack-versions>
+      <stack>
+        <name>HDP</name>
+        <version>0.3</version>
+      </stack>
+    </min-stack-versions>
+  </prerequisites>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/5fab8ff3/ambari-server/src/test/resources/extensions/EXT/0.4/services/OOZIE2/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/extensions/EXT/0.4/services/OOZIE2/metainfo.xml b/ambari-server/src/test/resources/extensions/EXT/0.4/services/OOZIE2/metainfo.xml
new file mode 100644
index 0000000..9176551
--- /dev/null
+++ b/ambari-server/src/test/resources/extensions/EXT/0.4/services/OOZIE2/metainfo.xml
@@ -0,0 +1,118 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>OOZIE2</name>
+      <comment>System for workflow coordination and execution of Apache Hadoop jobs</comment>
+      <version>4.0.0</version>
+
+      <components>
+        <component>
+          <name>OOZIE2_SERVER</name>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE/MAPREDUCE_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/oozie2_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>OOZIE2_CLIENT</name>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE/MAPREDUCE_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/oozie2_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>oozie2.noarch</name>
+            </package>
+            <package>
+              <name>oozie2-client.noarch</name>
+            </package>
+            <package>
+              <name>extjs-2.2-1</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>global</config-type>
+        <config-type>oozie2-site</config-type>
+      </configuration-dependencies>
+
+      <themes>
+        <theme>
+          <fileName>broken_theme.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
+
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/5fab8ff3/ambari-server/src/test/resources/extensions/EXT/0.4/services/OOZIE2/themes/broken_theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/extensions/EXT/0.4/services/OOZIE2/themes/broken_theme.json b/ambari-server/src/test/resources/extensions/EXT/0.4/services/OOZIE2/themes/broken_theme.json
new file mode 100644
index 0000000..6e8b5bf
--- /dev/null
+++ b/ambari-server/src/test/resources/extensions/EXT/0.4/services/OOZIE2/themes/broken_theme.json
@@ -0,0 +1,3 @@
+{
+  "configuration": {
+}


[02/16] ambari git commit: AMBARI-22497 Disk usage is not updated. (dgrinenko)

Posted by ao...@apache.org.
AMBARI-22497 Disk usage is not updated. (dgrinenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f7693095
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f7693095
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f7693095

Branch: refs/heads/branch-feature-AMBARI-22457
Commit: f76930957f070fe9e6126ed26de97093229cfd05
Parents: b7ed9aa
Author: Dmytro Grinenko <ha...@apache.org>
Authored: Wed Nov 22 15:54:21 2017 +0200
Committer: Dmytro Grinenko <ha...@apache.org>
Committed: Wed Nov 22 15:54:21 2017 +0200

----------------------------------------------------------------------
 .../src/main/python/ambari_agent/Hardware.py    | 130 +++++++++++--------
 .../src/main/python/ambari_agent/Heartbeat.py   |   8 +-
 .../src/main/python/ambari_agent/HostInfo.py    |  83 ++++++------
 .../test/python/ambari_agent/TestHardware.py    |  56 ++++++--
 .../python/ambari_agent/TestRegistration.py     |   5 +-
 5 files changed, 165 insertions(+), 117 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f7693095/ambari-agent/src/main/python/ambari_agent/Hardware.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/Hardware.py b/ambari-agent/src/main/python/ambari_agent/Hardware.py
index 696438e..56ce872 100644
--- a/ambari-agent/src/main/python/ambari_agent/Hardware.py
+++ b/ambari-agent/src/main/python/ambari_agent/Hardware.py
@@ -44,55 +44,72 @@ class Hardware:
   IGNORE_DEVICES = ["proc", "tmpfs", "cgroup", "mqueue", "shm"]
   LINUX_PATH_SEP = "/"
 
-  def __init__(self, config):
+  def __init__(self, config=None, cache_info=True):
+    """
+    Initialize hardware object with available metrics. Metrics cache could be
+     disabled by setting cache_info to False
+
+    :param config Ambari Agent Configuration
+    :param cache_info initialize hardware dictionary with available metrics
+
+    :type config AmbariConfig
+    :type cache_info bool
+    """
+    self.config = config
+    self._hardware = None
+
+    if cache_info:
+      self._cache_hardware_info()
+
+  def _cache_hardware_info(self):
+    """
+    Creating cache with hardware information
+    """
     logger.info("Initializing host system information.")
-    self.hardware = {
-      'mounts': Hardware.osdisks()
+    self._hardware = {
+      'mounts': self.osdisks()
     }
-    self.config = config
-    self.hardware.update(Facter(self.config).facterInfo())
-    logger.info("Host system information: %s", self.hardware)
+    self._hardware.update(Facter(self.config).facterInfo())
+    logger.info("Host system information: %s", self._hardware)
 
-  @classmethod
-  def _parse_df_line(cls, line):
+  def _parse_df(self, lines):
     """
-      Initialize data-structure from string in specific 'df' command output format
+      Generator, which parses df command output and yields parsed entities
 
       Expected string format:
        device fs_type disk_size used_size available_size capacity_used_percents mount_point
 
-    :type line str
+    :type lines list[str]
+    :rtype collections.Iterable
     """
+    titles = ["device", "type", "size", "used", "available", "percent", "mountpoint"]
 
-    line_split = line.split()
-    if len(line_split) != 7:
-      return None
+    for line in lines:
+      line_split = line.split()
+      if len(line_split) != 7:
+        continue
 
-    titles = ["device", "type", "size", "used", "available", "percent", "mountpoint"]
-    return dict(zip(titles, line_split))
+      yield dict(zip(titles, line_split))
 
-  @classmethod
-  def _get_mount_check_timeout(cls, config=None):
+  def _get_mount_check_timeout(self):
     """Return timeout for df call command"""
-    if config and config.has_option(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY) \
-      and config.get(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY) != "0":
+    if self.config and self.config.has_option(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY) \
+      and self.config.get(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY) != "0":
 
-      return config.get(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY)
+      return self.config.get(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY)
 
     return Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_DEFAULT
 
-  @classmethod
-  def _check_remote_mounts(cls, config=None):
+  def _check_remote_mounts(self):
     """Verify if remote mount allowed to be processed or not"""
-    if config and config.has_option(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_KEY) and \
-       config.get(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_KEY).lower() == "false":
+    if self.config and self.config.has_option(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_KEY) and \
+      self.config.get(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_KEY).lower() == "false":
 
       return False
 
     return True
 
-  @classmethod
-  def _is_mount_blacklisted(cls, blacklist, mount_point):
+  def _is_mount_blacklisted(self, blacklist, mount_point):
     """
     Verify if particular mount point is in the black list.
 
@@ -111,49 +128,44 @@ class Hardware:
     if not blacklist or not mount_point:
       return False
 
-    mount_point_elements = mount_point.split(cls.LINUX_PATH_SEP)
+    # in this way we excluding possibility
+    mount_point_elements = mount_point.split(self.LINUX_PATH_SEP)
 
     for el in blacklist:
-      el_list = el.split(cls.LINUX_PATH_SEP)
+      el_list = el.split(self.LINUX_PATH_SEP)
       # making patch elements comparision
       if el_list == mount_point_elements[:len(el_list)]:
         return True
 
     return False
 
-
-  @classmethod
   @OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
-  def osdisks(cls, config=None):
+  def osdisks(self):
     """ Run df to find out the disks on the host. Only works on linux
     platforms. Note that this parser ignores any filesystems with spaces
     and any mounts with spaces. """
-    timeout = cls._get_mount_check_timeout(config)
+    timeout = self._get_mount_check_timeout()
     command = ["timeout", timeout, "df", "-kPT"]
     blacklisted_mount_points = []
 
-    if config:
-      ignore_mount_value = config.get("agent", "ignore_mount_points", default="")
-      blacklisted_mount_points = [item.strip() for item in ignore_mount_value.split(",")]
+    if self.config:
+      ignore_mount_value = self.config.get("agent", "ignore_mount_points", default="")
+      blacklisted_mount_points = [item.strip() for item in ignore_mount_value.split(",") if len(item.strip()) != 0]
 
-    if not cls._check_remote_mounts(config):
+    if not self._check_remote_mounts():
       command.append("-l")
 
     try:
-      code, out, err = shell.call(command, stdout = subprocess.PIPE, stderr = subprocess.PIPE, timeout = int(timeout), quiet = True)
+      code, out, err = shell.call(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=int(timeout), quiet=True)
       dfdata = out
     except Exception as ex:
       logger.warn("Checking disk usage failed: " + str(ex))
       dfdata = ''
 
-    mounts = [cls._parse_df_line(line) for line in dfdata.splitlines() if line]
     result_mounts = []
     ignored_mounts = []
 
-    for mount in mounts:
-      if not mount:
-        continue
-
+    for mount in self._parse_df(dfdata.splitlines()):
       """
       We need to filter mounts by several parameters:
        - mounted device is not in the ignored list
@@ -161,11 +173,11 @@ class Hardware:
        - it is not file-mount (docker environment)
        - mount path or a part of mount path is not in the blacklist
       """
-      if mount["device"] not in cls.IGNORE_DEVICES and\
-         mount["mountpoint"].split("/")[0] not in cls.IGNORE_ROOT_MOUNTS and\
-         cls._chk_writable_mount(mount['mountpoint']) and\
+      if mount["device"] not in self.IGNORE_DEVICES and\
+         mount["mountpoint"].split("/")[0] not in self.IGNORE_ROOT_MOUNTS and\
+         self._chk_writable_mount(mount['mountpoint']) and\
          not path_isfile(mount["mountpoint"]) and\
-         not cls._is_mount_blacklisted(blacklisted_mount_points, mount["mountpoint"]):
+         not self._is_mount_blacklisted(blacklisted_mount_points, mount["mountpoint"]):
 
         result_mounts.append(mount)
       else:
@@ -177,8 +189,7 @@ class Hardware:
 
     return result_mounts
 
-  @classmethod
-  def _chk_writable_mount(cls, mount_point):
+  def _chk_writable_mount(self, mount_point):
     if os.geteuid() == 0:
       return os.access(mount_point, os.W_OK)
     else:
@@ -196,9 +207,8 @@ class Hardware:
         logger.exception("Exception happened while checking mount {0}".format(mount_point))
         return False
     
-  @classmethod
   @OsFamilyFuncImpl(OSConst.WINSRV_FAMILY)
-  def osdisks(cls, config=None):
+  def osdisks(self):
     mounts = []
     runner = shellRunner()
     command_result = runner.runPowershell(script_block=Hardware.WINDOWS_GET_DRIVES_CMD)
@@ -216,16 +226,28 @@ class Hardware:
 
     return mounts
 
-  def get(self):
-    return self.hardware
+  def get(self, invalidate_cache=False):
+    """
+    Getting cached hardware information
+
+    :param invalidate_cache resets hardware metrics cache
+    :type invalidate_cache bool
+    """
+    if invalidate_cache:
+      self._hardware = None
+
+    if not self._hardware:
+      self._cache_hardware_info()
+
+    return self._hardware
 
 
 def main():
   from resource_management.core.logger import Logger
   Logger.initialize_logger()
 
-  config = None
-  print Hardware(config).get()
+  print Hardware().get()
+
 
 if __name__ == '__main__':
   main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7693095/ambari-agent/src/main/python/ambari_agent/Heartbeat.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/Heartbeat.py b/ambari-agent/src/main/python/ambari_agent/Heartbeat.py
index 1e05aae..d7c0325 100644
--- a/ambari-agent/src/main/python/ambari_agent/Heartbeat.py
+++ b/ambari-agent/src/main/python/ambari_agent/Heartbeat.py
@@ -75,14 +75,11 @@ class Heartbeat:
     if int(id) == 0:
       componentsMapped = False
 
-
-
     logger.debug("Building Heartbeat: {responseId = %s, timestamp = %s, "
                 "commandsInProgress = %s, componentsMapped = %s,"
                 "recoveryTimestamp = %s}",
         str(id), str(timestamp), repr(commandsInProgress), repr(componentsMapped), str(recovery_timestamp))
 
-
     logger.debug("Heartbeat: %s", pformat(heartbeat))
 
     hostInfo = HostInfo(self.config)
@@ -93,10 +90,9 @@ class Heartbeat:
       # this must be the last step before returning heartbeat
       hostInfo.register(nodeInfo, componentsMapped, commandsInProgress)
       heartbeat['agentEnv'] = nodeInfo
-      mounts = Hardware.osdisks(self.config)
+      mounts = Hardware(config=self.config, cache_info=False).osdisks()
       heartbeat['mounts'] = mounts
 
-
       logger.debug("agentEnv: %s", str(nodeInfo))
       logger.debug("mounts: %s", str(mounts))
 
@@ -105,6 +101,7 @@ class Heartbeat:
     
     return heartbeat
 
+
 def main(argv=None):
   from ambari_agent.ActionQueue import ActionQueue
   from ambari_agent.AmbariConfig import AmbariConfig
@@ -122,5 +119,6 @@ def main(argv=None):
   heartbeat = Heartbeat(actionQueue)
   print json.dumps(heartbeat.build('3',3))
 
+
 if __name__ == '__main__':
   main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7693095/ambari-agent/src/main/python/ambari_agent/HostInfo.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/HostInfo.py b/ambari-agent/src/main/python/ambari_agent/HostInfo.py
index 4b7bfd7..6612c27 100644
--- a/ambari-agent/src/main/python/ambari_agent/HostInfo.py
+++ b/ambari-agent/src/main/python/ambari_agent/HostInfo.py
@@ -56,6 +56,9 @@ class HostInfo(object):
     self.config = config
     self.reportFileHandler = HostCheckReportFileHandler(config)
 
+  def register(self, dict_obj, componentsMapped=True, commandsInProgress=True):
+    raise NotImplementedError()
+
   def dirType(self, path):
     if not os.path.exists(path):
       return 'not_exist'
@@ -229,21 +232,20 @@ class HostInfoLinux(HostInfo):
         cmd = cmd.replace('\0', ' ')
         if not 'AmbariServer' in cmd:
           if 'java' in cmd:
-            dict = {}
-            dict['pid'] = int(pid)
-            dict['hadoop'] = False
+            metrics = {}
+            metrics['pid'] = int(pid)
+            metrics['hadoop'] = False
             for filter in self.PROC_FILTER:
               if filter in cmd:
-                dict['hadoop'] = True
-            dict['command'] = unicode(cmd.strip(), errors='ignore')
+                metrics['hadoop'] = True
+            metrics['command'] = unicode(cmd.strip(), errors='ignore')
             for line in open(os.path.join('/proc', pid, 'status')):
               if line.startswith('Uid:'):
                 uid = int(line.split()[1])
-                dict['user'] = pwd.getpwuid(uid).pw_name
-            list.append(dict)
+                metrics['user'] = pwd.getpwuid(uid).pw_name
+            list.append(metrics)
     except:
       logger.exception("Checking java processes failed")
-    pass
 
   def getTransparentHugePage(self):
     thp_regex = "\[(.+)\]"
@@ -296,54 +298,54 @@ class HostInfoLinux(HostInfo):
       logger.exception('Unable to get information about JCE')
       return None
 
-  def register(self, dict, componentsMapped=True, commandsInProgress=True):
+  def register(self, metrics, componentsMapped=True, commandsInProgress=True):
     """ Return various details about the host
     componentsMapped: indicates if any components are mapped to this host
     commandsInProgress: indicates if any commands are in progress
     """
 
-    dict['hostHealth'] = {}
+    metrics['hostHealth'] = {}
 
     java = []
     self.javaProcs(java)
-    dict['hostHealth']['activeJavaProcs'] = java
+    metrics['hostHealth']['activeJavaProcs'] = java
 
     liveSvcs = []
     self.checkLiveServices(self.DEFAULT_LIVE_SERVICES, liveSvcs)
-    dict['hostHealth']['liveServices'] = liveSvcs
+    metrics['hostHealth']['liveServices'] = liveSvcs
 
-    dict['umask'] = str(self.getUMask())
+    metrics['umask'] = str(self.getUMask())
 
-    dict['transparentHugePage'] = self.getTransparentHugePage()
-    dict['firewallRunning'] = self.checkFirewall()
-    dict['firewallName'] = self.getFirewallName()
-    dict['reverseLookup'] = self.checkReverseLookup()
-    dict['hasUnlimitedJcePolicy'] = self.checkUnlimitedJce()
+    metrics['transparentHugePage'] = self.getTransparentHugePage()
+    metrics['firewallRunning'] = self.checkFirewall()
+    metrics['firewallName'] = self.getFirewallName()
+    metrics['reverseLookup'] = self.checkReverseLookup()
+    metrics['hasUnlimitedJcePolicy'] = self.checkUnlimitedJce()
     # If commands are in progress or components are already mapped to this host
     # Then do not perform certain expensive host checks
     if componentsMapped or commandsInProgress:
-      dict['alternatives'] = []
-      dict['stackFoldersAndFiles'] = []
-      dict['existingUsers'] = []
+      metrics['alternatives'] = []
+      metrics['stackFoldersAndFiles'] = []
+      metrics['existingUsers'] = []
 
     else:
       etcs = []
       self.etcAlternativesConf(self.DEFAULT_PROJECT_NAMES, etcs)
-      dict['alternatives'] = etcs
+      metrics['alternatives'] = etcs
 
       existingUsers = []
       self.checkUsers(self.DEFAULT_USERS, existingUsers)
-      dict['existingUsers'] = existingUsers
+      metrics['existingUsers'] = existingUsers
 
       dirs = []
       self.checkFolders(self.DEFAULT_BASEDIRS, self.DEFAULT_PROJECT_NAMES, self.EXACT_DIRECTORIES, existingUsers, dirs)
-      dict['stackFoldersAndFiles'] = dirs
+      metrics['stackFoldersAndFiles'] = dirs
 
-      self.reportFileHandler.writeHostCheckFile(dict)
+      self.reportFileHandler.writeHostCheckFile(metrics)
       pass
 
     # The time stamp must be recorded at the end
-    dict['hostHealth']['agentTimeStampAtReporting'] = int(time.time() * 1000)
+    metrics['hostHealth']['agentTimeStampAtReporting'] = int(time.time() * 1000)
 
     pass
 
@@ -418,43 +420,42 @@ class HostInfoWindows(HostInfo):
     code, out, err = run_powershell_script(self.SERVICE_STATUS_CMD.format(serivce_name))
     return out, err, code
 
-  def register(self, dict, componentsMapped=True, commandsInProgress=True):
+  def register(self, metrics, componentsMapped=True, commandsInProgress=True):
     """ Return various details about the host
     componentsMapped: indicates if any components are mapped to this host
     commandsInProgress: indicates if any commands are in progress
     """
-    dict['hostHealth'] = {}
+    metrics['hostHealth'] = {}
 
     java = []
     self.javaProcs(java)
-    dict['hostHealth']['activeJavaProcs'] = java
+    metrics['hostHealth']['activeJavaProcs'] = java
 
     liveSvcs = []
     self.checkLiveServices(self.DEFAULT_LIVE_SERVICES, liveSvcs)
-    dict['hostHealth']['liveServices'] = liveSvcs
+    metrics['hostHealth']['liveServices'] = liveSvcs
 
-    dict['umask'] = str(self.getUMask())
+    metrics['umask'] = str(self.getUMask())
 
-    dict['firewallRunning'] = self.checkFirewall()
-    dict['firewallName'] = self.getFirewallName()
-    dict['reverseLookup'] = self.checkReverseLookup()
+    metrics['firewallRunning'] = self.checkFirewall()
+    metrics['firewallName'] = self.getFirewallName()
+    metrics['reverseLookup'] = self.checkReverseLookup()
     # If commands are in progress or components are already mapped to this host
     # Then do not perform certain expensive host checks
     if componentsMapped or commandsInProgress:
-      dict['alternatives'] = []
-      dict['stackFoldersAndFiles'] = []
-      dict['existingUsers'] = []
+      metrics['alternatives'] = []
+      metrics['stackFoldersAndFiles'] = []
+      metrics['existingUsers'] = []
     else:
       existingUsers = []
       self.checkUsers(self.DEFAULT_USERS, existingUsers)
-      dict['existingUsers'] = existingUsers
+      metrics['existingUsers'] = existingUsers
       # TODO check HDP stack and folders here
-      self.reportFileHandler.writeHostCheckFile(dict)
+      self.reportFileHandler.writeHostCheckFile(metrics)
       pass
 
     # The time stamp must be recorded at the end
-    dict['hostHealth']['agentTimeStampAtReporting'] = int(time.time() * 1000)
-
+    metrics['hostHealth']['agentTimeStampAtReporting'] = int(time.time() * 1000)
 
 
 def main(argv=None):

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7693095/ambari-agent/src/test/python/ambari_agent/TestHardware.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestHardware.py b/ambari-agent/src/test/python/ambari_agent/TestHardware.py
index 5400e26..e78f8f2 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestHardware.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestHardware.py
@@ -26,14 +26,12 @@ import unittest
 import platform
 import socket
 import subprocess
-import os
 from only_for_platform import not_for_platform, PLATFORM_WINDOWS
 from ambari_agent import hostname
 from ambari_agent.Hardware import Hardware
 from ambari_agent.AmbariConfig import AmbariConfig
 from ambari_agent.Facter import Facter, FacterLinux
 from ambari_commons import OSCheck
-from resource_management.core import shell
 
 
 @not_for_platform(PLATFORM_WINDOWS)
@@ -61,10 +59,10 @@ class TestHardware(TestCase):
   def test_build(self, get_os_version_mock, get_os_type_mock):
     get_os_type_mock.return_value = "suse"
     get_os_version_mock.return_value = "11"
-    config = None
-    hardware = Hardware(config)
+    hardware = Hardware()
     result = hardware.get()
     osdisks = hardware.osdisks()
+
     for dev_item in result['mounts']:
       self.assertTrue(dev_item['available'] >= 0)
       self.assertTrue(dev_item['used'] >= 0)
@@ -113,7 +111,33 @@ class TestHardware(TestCase):
     chk_writable_mount_mock.side_effect = chk_writable_mount_side_effect
     shell_call_mock.return_value = (0, df_output, '')
 
-    result = Hardware.osdisks()
+    result = Hardware(cache_info=False).osdisks()
+
+    self.assertEquals(1, len(result))
+
+    expected_mounts_left = ["/"]
+    mounts_left = [item["mountpoint"] for item in result]
+
+    self.assertEquals(expected_mounts_left, mounts_left)
+
+  @patch.object(Hardware, "_chk_writable_mount")
+  @patch("ambari_agent.Hardware.path_isfile")
+  @patch("resource_management.core.shell.call")
+  def test_osdisks_no_ignore_property(self, shell_call_mock, isfile_mock, chk_writable_mount_mock):
+    df_output = \
+      """Filesystem                                                                                        Type  1024-blocks     Used Available Capacity Mounted on
+      /dev/mapper/docker-253:0-4980899-d45c264d37ab18c8ed14f890f4d59ac2b81e1c52919eb36a79419787209515f3 xfs      31447040  1282384  30164656       5% /
+      """
+
+    isfile_mock.return_value = False
+    chk_writable_mount_mock.return_value = True
+    shell_call_mock.return_value = (0, df_output, '')
+    config = AmbariConfig()
+
+    # check, that config do not define ignore_mount_points property
+    self.assertEquals("test", config.get('agent', 'ignore_mount_points', default="test"))
+
+    result = Hardware(config=config, cache_info=False).osdisks()
 
     self.assertEquals(1, len(result))
 
@@ -128,35 +152,35 @@ class TestHardware(TestCase):
   def test_osdisks_remote(self, shell_call_mock, get_os_version_mock, get_os_type_mock):
     get_os_type_mock.return_value = "suse"
     get_os_version_mock.return_value = "11"
-    Hardware.osdisks()
+    Hardware(cache_info=False).osdisks()
     timeout = 10
     shell_call_mock.assert_called_with(['timeout', str(timeout), "df", "-kPT"], stdout = subprocess.PIPE, stderr = subprocess.PIPE, timeout = timeout, quiet = True)
 
     config = AmbariConfig()
-    Hardware.osdisks(config)
+    Hardware(config=config, cache_info=False).osdisks()
     shell_call_mock.assert_called_with(['timeout', str(timeout), "df", "-kPT"], stdout = subprocess.PIPE, stderr = subprocess.PIPE, timeout = timeout, quiet = True)
 
     config.add_section(AmbariConfig.AMBARI_PROPERTIES_CATEGORY)
     config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_KEY, "true")
-    Hardware.osdisks(config)
+    Hardware(config=config, cache_info=False).osdisks()
     shell_call_mock.assert_called_with(['timeout', str(timeout), "df", "-kPT"], stdout = subprocess.PIPE, stderr = subprocess.PIPE, timeout = timeout, quiet = True)
 
     config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_KEY, "false")
-    Hardware.osdisks(config)
+    Hardware(config=config, cache_info=False).osdisks()
     shell_call_mock.assert_called_with(['timeout', str(timeout), "df", "-kPT", "-l"], stdout = subprocess.PIPE, stderr = subprocess.PIPE, timeout = timeout, quiet = True)
 
     config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY, "0")
-    Hardware.osdisks(config)
+    Hardware(config=config, cache_info=False).osdisks()
     shell_call_mock.assert_called_with(['timeout', str(timeout), "df", "-kPT", "-l"], stdout = subprocess.PIPE, stderr = subprocess.PIPE, timeout = timeout, quiet = True)
 
     timeout = 1
     config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY, str(timeout))
-    Hardware.osdisks(config)
+    Hardware(config=config, cache_info=False).osdisks()
     shell_call_mock.assert_called_with(['timeout', str(timeout), "df", "-kPT", "-l"], stdout = subprocess.PIPE, stderr = subprocess.PIPE, timeout = timeout, quiet = True)
 
     timeout = 2
     config.set(AmbariConfig.AMBARI_PROPERTIES_CATEGORY, Hardware.CHECK_REMOTE_MOUNTS_TIMEOUT_KEY, str(timeout))
-    Hardware.osdisks(config)
+    Hardware(config=config, cache_info=False).osdisks()
     shell_call_mock.assert_called_with(['timeout', str(timeout), "df", "-kPT", "-l"], stdout = subprocess.PIPE, stderr = subprocess.PIPE, timeout = timeout, quiet = True)
 
   def test_parse_df_line(self):
@@ -182,7 +206,11 @@ class TestHardware(TestCase):
     ]
 
     for sample in samples:
-      result = Hardware._parse_df_line(sample["sample"])
+      try:
+        result = Hardware(cache_info=False)._parse_df([sample["sample"]]).next()
+      except StopIteration:
+        result = None
+
       self.assertEquals(result, sample["expected"], "Failed with sample: '{0}', expected: {1}, got: {2}".format(
         sample["sample"],
         sample["expected"],
@@ -430,7 +458,7 @@ SwapFree:        1598676 kB
     }
     conf.configure_mock(**attr)
 
-    result = Hardware.osdisks(conf)
+    result = Hardware(config=conf, cache_info=False).osdisks()
 
     self.assertEquals(1, len(result))
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7693095/ambari-agent/src/test/python/ambari_agent/TestRegistration.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestRegistration.py b/ambari-agent/src/test/python/ambari_agent/TestRegistration.py
index a5c23fa..fada29c 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestRegistration.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestRegistration.py
@@ -19,7 +19,6 @@ limitations under the License.
 '''
 
 from unittest import TestCase
-import os
 import tempfile
 from mock.mock import patch
 from mock.mock import MagicMock
@@ -35,13 +34,14 @@ class TestRegistration(TestCase):
 
   @patch("subprocess.Popen")
   @patch.object(Hardware, "_chk_writable_mount", new = MagicMock(return_value=True))
+  @patch("__builtin__.open", new=MagicMock())
   @patch.object(FacterLinux, "facterInfo", new = MagicMock(return_value={}))
   @patch.object(FacterLinux, "__init__", new = MagicMock(return_value = None))
   @patch("resource_management.core.shell.call")
   @patch.object(OSCheck, "get_os_type")
   @patch.object(OSCheck, "get_os_version")
   def test_registration_build(self, get_os_version_mock, get_os_type_mock, run_os_cmd_mock, Popen_mock):
-    config = AmbariConfig().getConfig()
+    config = AmbariConfig()
     tmpdir = tempfile.gettempdir()
     config.set('agent', 'prefix', tmpdir)
     config.set('agent', 'current_ping_port', '33777')
@@ -58,7 +58,6 @@ class TestRegistration(TestCase):
     self.assertEquals(data['timestamp'] > 1353678475465L, True, "timestamp should not be empty")
     self.assertEquals(len(data['agentEnv']) > 0, True, "agentEnv should not be empty")
     self.assertEquals(data['agentVersion'], reference_version, "agentVersion should not be empty")
-    print data['agentEnv']['umask']
     self.assertEquals(not data['agentEnv']['umask']== "", True, "agents umask should not be empty")
     self.assertEquals(data['currentPingPort'] == 33777, True, "current ping port should be 33777")
     self.assertEquals(data['prefix'], config.get('agent', 'prefix'), 'The prefix path does not match')


[16/16] ambari git commit: Merge branch 'branch-2.6' into branch-feature-AMBARI-22457

Posted by ao...@apache.org.
Merge branch 'branch-2.6' into branch-feature-AMBARI-22457


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/606d876b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/606d876b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/606d876b

Branch: refs/heads/branch-feature-AMBARI-22457
Commit: 606d876b62cafe64e51022d722128475643116da
Parents: bc8b391 c6825e9
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Mon Nov 27 14:58:58 2017 +0200
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Mon Nov 27 14:58:58 2017 +0200

----------------------------------------------------------------------
 .../src/main/python/ambari_agent/Hardware.py    | 130 +++---
 .../src/main/python/ambari_agent/Heartbeat.py   |   8 +-
 .../src/main/python/ambari_agent/HostInfo.py    |  83 ++--
 .../test/python/ambari_agent/TestHardware.py    |  56 ++-
 .../python/ambari_agent/TestRegistration.py     |   5 +-
 .../core/providers/package/yumrpm.py            |   4 +-
 .../controller/AmbariManagementController.java  |   6 +-
 .../AmbariManagementControllerImpl.java         | 104 ++---
 .../controller/AmbariManagementHelper.java      | 201 ++++++++++
 .../internal/ExtensionLinkResourceProvider.java |  15 +
 .../ambari/server/orm/dao/ExtensionLinkDAO.java |  36 +-
 .../orm/entities/ExtensionLinkEntity.java       |   1 +
 .../ambari/server/stack/ExtensionHelper.java    |  88 ++++-
 .../ambari/server/stack/ExtensionModule.java    |   2 +
 .../ambari/server/stack/StackManager.java       |  81 +++-
 .../apache/ambari/server/stack/StackModule.java |   8 +-
 .../ambari/server/state/ExtensionInfo.java      |  26 +-
 .../apache/ambari/server/state/StackInfo.java   |  27 +-
 .../state/stack/ExtensionMetainfoXml.java       |  11 +
 .../server/upgrade/UpgradeCatalog260.java       | 171 +++++++-
 .../HIVE/0.12.0.2.0/package/scripts/hive.py     |  12 +-
 .../package/scripts/hive_server_interactive.py  |  20 +-
 .../stacks/HDP/2.5/services/YARN/kerberos.json  |  12 +-
 .../stacks/HDP/2.6/services/YARN/kerberos.json  |  24 +-
 .../stack/StackManagerCommonServicesTest.java   |   4 +-
 .../server/stack/StackManagerExtensionTest.java | 116 ++++--
 .../server/stack/StackManagerMiscTest.java      |  13 +-
 .../ambari/server/stack/StackManagerMock.java   |   5 +-
 .../ambari/server/stack/StackManagerTest.java   |  13 +-
 .../server/upgrade/UpgradeCatalog260Test.java   | 129 +++++-
 .../stacks/2.0.6/HIVE/test_hive_server.py       |   2 +-
 .../HIVE/running_withMOTDmsg_andTrailingMsg.txt |  46 +++
 .../stacks/2.5/HIVE/test_hive_server_int.py     |  21 +
 .../resources/extensions/EXT/0.1/metainfo.xml   |   2 +-
 .../resources/extensions/EXT/0.2/metainfo.xml   |   3 +-
 .../resources/extensions/EXT/0.3/metainfo.xml   |  32 ++
 .../EXT/0.3/services/OOZIE2/metainfo.xml        | 118 ++++++
 .../services/OOZIE2/themes/broken_theme.json    |   3 +
 .../resources/extensions/EXT/0.4/metainfo.xml   |  32 ++
 .../EXT/0.4/services/OOZIE2/metainfo.xml        | 118 ++++++
 .../services/OOZIE2/themes/broken_theme.json    |   3 +
 .../test_kerberos_descriptor_ranger_kms.json    | 286 ++++++++++++++
 .../stacks_with_extensions/HDP/0.3/metainfo.xml |  22 ++
 .../HDP/0.3/repos/repoinfo.xml                  |  63 +++
 .../HDP/0.3/services/HBASE/metainfo.xml         |  26 ++
 .../0.3/services/HDFS/configuration/global.xml  | 145 +++++++
 .../services/HDFS/configuration/hadoop-env.xml  | 223 +++++++++++
 .../services/HDFS/configuration/hbase-site.xml  | 137 +++++++
 .../services/HDFS/configuration/hdfs-log4j.xml  | 199 ++++++++++
 .../services/HDFS/configuration/hdfs-site.xml   | 396 +++++++++++++++++++
 .../HDP/0.3/services/HDFS/metainfo.xml          |  30 ++
 .../0.3/services/HDFS/package/dummy-script.py   |  20 +
 .../HDP/0.3/services/HIVE/metainfo.xml          |  26 ++
 .../HDP/0.3/services/MAPREDUCE/metainfo.xml     |  23 ++
 .../HDP/0.3/services/ZOOKEEPER/metainfo.xml     |  26 ++
 .../stacks_with_extensions/HDP/0.4/metainfo.xml |  22 ++
 .../HDP/0.4/repos/repoinfo.xml                  |  63 +++
 .../HDP/0.4/services/HBASE/metainfo.xml         |  26 ++
 .../0.4/services/HDFS/configuration/global.xml  | 145 +++++++
 .../services/HDFS/configuration/hadoop-env.xml  | 223 +++++++++++
 .../services/HDFS/configuration/hbase-site.xml  | 137 +++++++
 .../services/HDFS/configuration/hdfs-log4j.xml  | 199 ++++++++++
 .../services/HDFS/configuration/hdfs-site.xml   | 396 +++++++++++++++++++
 .../HDP/0.4/services/HDFS/metainfo.xml          |  30 ++
 .../0.4/services/HDFS/package/dummy-script.py   |  20 +
 .../HDP/0.4/services/HIVE/metainfo.xml          |  26 ++
 .../HDP/0.4/services/MAPREDUCE/metainfo.xml     |  23 ++
 .../HDP/0.4/services/ZOOKEEPER/metainfo.xml     |  26 ++
 .../main/admin/kerberos/step4_controller.js     |   6 +
 .../app/controllers/wizard/step3_controller.js  |   2 +-
 ambari-web/app/messages.js                      |   5 +-
 ambari-web/app/mixins/common/serverValidator.js |  41 +-
 ambari-web/app/styles/application.less          |   3 +-
 .../config_recommendation_popup.hbs             | 102 +++--
 .../config_validation_popup.js                  |   7 +-
 .../test/mixins/common/serverValidator_test.js  |  15 +-
 .../apache/ambari/view/utils/hdfs/HdfsApi.java  |  16 +-
 .../apache/ambari/view/utils/hdfs/HdfsUtil.java |  17 +-
 .../ui/app/domain/workflow-importer.js          |   3 +-
 79 files changed, 4584 insertions(+), 382 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/606d876b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/606d876b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
----------------------------------------------------------------------


[07/16] ambari git commit: AMBARI-22502.Workflow Manager View - FS node will overwrite internal commands and replace them with blank "move" commands when reopening the node(Venkata Sairam)

Posted by ao...@apache.org.
AMBARI-22502.Workflow Manager View - FS node will overwrite internal commands and replace them with blank "move" commands when reopening the node(Venkata Sairam)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/099e0185
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/099e0185
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/099e0185

Branch: refs/heads/branch-feature-AMBARI-22457
Commit: 099e0185dc7d9b8d14f267dce0c113f819275ded
Parents: 677e27e
Author: Venkata Sairam <ve...@gmail.com>
Authored: Thu Nov 23 15:36:57 2017 +0530
Committer: Venkata Sairam <ve...@gmail.com>
Committed: Thu Nov 23 15:40:57 2017 +0530

----------------------------------------------------------------------
 .../src/main/resources/ui/app/domain/workflow-importer.js         | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/099e0185/contrib/views/wfmanager/src/main/resources/ui/app/domain/workflow-importer.js
----------------------------------------------------------------------
diff --git a/contrib/views/wfmanager/src/main/resources/ui/app/domain/workflow-importer.js b/contrib/views/wfmanager/src/main/resources/ui/app/domain/workflow-importer.js
index 84a789d..2afc304 100644
--- a/contrib/views/wfmanager/src/main/resources/ui/app/domain/workflow-importer.js
+++ b/contrib/views/wfmanager/src/main/resources/ui/app/domain/workflow-importer.js
@@ -112,7 +112,8 @@ var WorkflowImporter= Ember.Object.extend({
       if (nodeHandler){
         if (Ember.isArray(workflowAppJson[key])){
           workflowAppJson[key].forEach(function(jsonObj){
-            var node=nodeHandler.handleImportNode(key,jsonObj,workflow,xmlDoc);
+            var actionDom = xmlDoc.find("action[name='" + jsonObj._name + "']");
+            var node = nodeHandler.handleImportNode(key,jsonObj,workflow,actionDom);
             nodeMap.set(jsonObj._name,{json:jsonObj,node:node});
           });
         }else{


[04/16] ambari git commit: AMBARI-22472. Ambari Upgrade 2.5 -> 2.6 : Update NodeManager's HSI identity 'llap_zk_hive' and 'llap_task_hive' to use '/HIVE/HIVE_SERVER/hive_server_hive' reference instead of creating the same identity again.

Posted by ao...@apache.org.
AMBARI-22472. Ambari Upgrade 2.5 -> 2.6 : Update NodeManager's HSI identity 'llap_zk_hive' and 'llap_task_hive' to use '/HIVE/HIVE_SERVER/hive_server_hive' reference instead of creating the same identity again.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7f5b7d7c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7f5b7d7c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7f5b7d7c

Branch: refs/heads/branch-feature-AMBARI-22457
Commit: 7f5b7d7cef41162686e4ff9e3b620de990a6bd3f
Parents: 92e362b
Author: Swapan Shridhar <ss...@hortonworks.com>
Authored: Mon Nov 20 12:26:48 2017 -0800
Committer: Swapan Shridhar <ss...@hortonworks.com>
Committed: Wed Nov 22 10:10:49 2017 -0800

----------------------------------------------------------------------
 .../server/upgrade/UpgradeCatalog260.java       | 141 +++++++++
 .../stacks/HDP/2.5/services/YARN/kerberos.json  |  12 +-
 .../stacks/HDP/2.6/services/YARN/kerberos.json  |  24 +-
 .../server/upgrade/UpgradeCatalog260Test.java   | 129 ++++++++-
 .../test_kerberos_descriptor_ranger_kms.json    | 286 +++++++++++++++++++
 5 files changed, 556 insertions(+), 36 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/7f5b7d7c/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog260.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog260.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog260.java
index 25635b6..5831565 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog260.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog260.java
@@ -45,7 +45,9 @@ import org.apache.ambari.server.state.kerberos.KerberosComponentDescriptor;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptorFactory;
 import org.apache.ambari.server.state.kerberos.KerberosIdentityDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosKeytabDescriptor;
 import org.apache.ambari.server.state.kerberos.KerberosPrincipalDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosPrincipalType;
 import org.apache.ambari.server.state.kerberos.KerberosServiceDescriptor;
 import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
@@ -135,6 +137,19 @@ public class UpgradeCatalog260 extends AbstractUpgradeCatalog {
   private static final String CORE_SITE = "core-site";
   public static final String AMS_SSL_CLIENT = "ams-ssl-client";
   public static final String METRIC_TRUSTSTORE_ALIAS = "ssl.client.truststore.alias";
+
+  private static final String HIVE_INTERACTIVE_SITE = "hive-interactive-site";
+  public static final String HIVE_LLAP_DAEMON_KEYTAB_FILE = "hive.llap.daemon.keytab.file";
+  public static final String HIVE_LLAP_ZK_SM_KEYTAB_FILE = "hive.llap.zk.sm.keytab.file";
+  public static final String HIVE_LLAP_TASK_KEYTAB_FILE = "hive.llap.task.keytab.file";
+  public static final String HIVE_SERVER_KERBEROS_PREFIX = "/HIVE/HIVE_SERVER/";
+  public static final String YARN_LLAP_ZK_HIVE_KERBEROS_IDENTITY = "llap_zk_hive";
+  public static final String YARN_LLAP_TASK_HIVE_KERBEROS_IDENTITY = "llap_task_hive";
+  public static final String HIVE_SERVER_HIVE_KERBEROS_IDENTITY = "hive_server_hive";
+
+  // Used to track whether YARN -> NODEMANAGER -> 'llap_zk_hive' kerberos descriptor was updated or not.
+  private List<String> yarnKerberosDescUpdatedList = new ArrayList<>();
+
   /**
    * Logger.
    */
@@ -497,6 +512,7 @@ public class UpgradeCatalog260 extends AbstractUpgradeCatalog {
     ensureZeppelinProxyUserConfigs();
     updateKerberosDescriptorArtifacts();
     updateAmsConfigs();
+    updateHiveConfigs();
     updateHDFSWidgetDefinition();
     updateExistingRepositoriesToBeResolved();
   }
@@ -636,6 +652,7 @@ public class UpgradeCatalog260 extends AbstractUpgradeCatalog {
         if (kerberosDescriptor != null) {
           fixRangerKMSKerberosDescriptor(kerberosDescriptor);
           fixIdentityReferences(getCluster(artifactEntity), kerberosDescriptor);
+          fixYarnHsiKerberosDescriptorAndSiteConfig(getCluster(artifactEntity), kerberosDescriptor);
 
           artifactEntity.setArtifactData(kerberosDescriptor.toMap());
           artifactDAO.merge(artifactEntity);
@@ -662,6 +679,130 @@ public class UpgradeCatalog260 extends AbstractUpgradeCatalog {
     }
   }
 
+  /**
+   * Updates YARN's NM 'llap_zk_hive' kerberos descriptor as reference and the associated config
+   * hive-interactive-site/hive.llap.zk.sm.keytab.file
+   */
+  protected void fixYarnHsiKerberosDescriptorAndSiteConfig(Cluster cluster, KerberosDescriptor kerberosDescriptor) {
+    LOG.info("Updating YARN's HSI Kerberos Descriptor ....");
+
+    // Step 1. Get Hive -> HIVE_SERVER -> 'hive_server_hive' kerberos description for referencing later
+    KerberosServiceDescriptor hiveServiceDescriptor = kerberosDescriptor.getService("HIVE");
+    KerberosIdentityDescriptor hsh_identityDescriptor = null;
+    KerberosPrincipalDescriptor hsh_principalDescriptor = null;
+    KerberosKeytabDescriptor hsh_keytabDescriptor = null;
+    if (hiveServiceDescriptor != null) {
+      KerberosComponentDescriptor hiveServerKerberosDescriptor = hiveServiceDescriptor.getComponent("HIVE_SERVER");
+      if (hiveServerKerberosDescriptor != null) {
+        hsh_identityDescriptor = hiveServerKerberosDescriptor.getIdentity(HIVE_SERVER_HIVE_KERBEROS_IDENTITY);
+        if (hsh_identityDescriptor != null) {
+          LOG.info("  Retrieved HIVE->HIVE_SERVER kerberos descriptor. Name = " + hsh_identityDescriptor.getName());
+          hsh_principalDescriptor = hsh_identityDescriptor.getPrincipalDescriptor();
+          hsh_keytabDescriptor = hsh_identityDescriptor.getKeytabDescriptor();
+        }
+      }
+
+      // Step 2. Update YARN -> NODEMANAGER's : (1). 'llap_zk_hive' and (2). 'llap_task_hive' kerberos descriptor as reference to
+      // HIVE -> HIVE_SERVER -> 'hive_server_hive' (Same as YARN -> NODEMANAGER -> 'yarn_nodemanager_hive_server_hive')
+      if (hsh_principalDescriptor != null && hsh_keytabDescriptor != null) {
+        KerberosServiceDescriptor yarnServiceDescriptor = kerberosDescriptor.getService("YARN");
+        if (yarnServiceDescriptor != null) {
+          KerberosComponentDescriptor yarnNmKerberosDescriptor = yarnServiceDescriptor.getComponent("NODEMANAGER");
+          if (yarnNmKerberosDescriptor != null) {
+            String[] identities = {YARN_LLAP_ZK_HIVE_KERBEROS_IDENTITY, YARN_LLAP_TASK_HIVE_KERBEROS_IDENTITY};
+            for (String identity : identities) {
+              KerberosIdentityDescriptor identityDescriptor = yarnNmKerberosDescriptor.getIdentity(identity);
+
+              KerberosPrincipalDescriptor principalDescriptor = null;
+              KerberosKeytabDescriptor keytabDescriptor = null;
+              if (identityDescriptor != null) {
+                LOG.info("  Retrieved YARN->NODEMANAGER kerberos descriptor to be updated. Name = " + identityDescriptor.getName());
+                principalDescriptor = identityDescriptor.getPrincipalDescriptor();
+                keytabDescriptor = identityDescriptor.getKeytabDescriptor();
+
+                identityDescriptor.setReference(HIVE_SERVER_KERBEROS_PREFIX + hsh_identityDescriptor.getName());
+                LOG.info("    Updated '" + YARN_LLAP_ZK_HIVE_KERBEROS_IDENTITY + "' identity descriptor reference = '"
+                        + identityDescriptor.getReference() + "'");
+                principalDescriptor.setValue(null);
+                LOG.info("    Updated '" + YARN_LLAP_ZK_HIVE_KERBEROS_IDENTITY + "' principal descriptor value = '"
+                        + principalDescriptor.getValue() + "'");
+
+                // Updating keytabs now
+                keytabDescriptor.setFile(null);
+                LOG.info("    Updated '" + YARN_LLAP_ZK_HIVE_KERBEROS_IDENTITY + "' keytab descriptor file = '"
+                        + keytabDescriptor.getFile() + "'");
+                keytabDescriptor.setOwnerName(null);
+                LOG.info("    Updated '" + YARN_LLAP_ZK_HIVE_KERBEROS_IDENTITY + "' keytab descriptor owner name = '" + keytabDescriptor.getOwnerName() + "'");
+                keytabDescriptor.setOwnerAccess(null);
+                LOG.info("    Updated '" + YARN_LLAP_ZK_HIVE_KERBEROS_IDENTITY + "' keytab descriptor owner access = '" + keytabDescriptor.getOwnerAccess() + "'");
+                keytabDescriptor.setGroupName(null);
+                LOG.info("    Updated '" + YARN_LLAP_ZK_HIVE_KERBEROS_IDENTITY + "' keytab descriptor group name = '" + keytabDescriptor.getGroupName() + "'");
+                keytabDescriptor.setGroupAccess(null);
+                LOG.info("    Updated '" + YARN_LLAP_ZK_HIVE_KERBEROS_IDENTITY + "' keytab descriptor group access = '" + keytabDescriptor.getGroupAccess() + "'");
+
+                // Need this as trigger to update the HIVE_LLAP_ZK_SM_KEYTAB_FILE configs later.
+
+                // Get the keytab file 'config name'.
+                String[] splits = keytabDescriptor.getConfiguration().split("/");
+                if (splits != null && splits.length == 2) {
+                  updateYarnKerberosDescUpdatedList(splits[1]);
+                  LOG.info("    Updated 'yarnKerberosDescUpdatedList' = " + getYarnKerberosDescUpdatedList());
+                }
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+
+  public void updateYarnKerberosDescUpdatedList(String val) {
+    yarnKerberosDescUpdatedList.add(val);
+  }
+
+  public List<String> getYarnKerberosDescUpdatedList() {
+    return yarnKerberosDescUpdatedList;
+  }
+
+  protected void updateHiveConfigs() throws AmbariException {
+    AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
+    Clusters clusters = ambariManagementController.getClusters();
+    if (clusters != null) {
+      Map<String, Cluster> clusterMap = getCheckedClusterMap(clusters);
+      if (clusterMap != null && !clusterMap.isEmpty()) {
+        for (final Cluster cluster : clusterMap.values()) {
+          // Updating YARN->NodeManager kerebros descriptor : (1). 'llap_zk_hive' and (2). 'llap_task_hive''s associated configs
+          // hive-interactive-site/hive.llap.zk.sm.keytab.file and hive-interactive-site/hive.llap.task.keytab.file respectively,
+          // based on what hive-interactive-site/hive.llap.daemon.keytab.file has.
+          Config hsiSiteConfig = cluster.getDesiredConfigByType(HIVE_INTERACTIVE_SITE);
+          Map<String, String> hsiSiteConfigProperties = hsiSiteConfig.getProperties();
+          if (hsiSiteConfigProperties != null &&
+                  hsiSiteConfigProperties.containsKey(HIVE_LLAP_DAEMON_KEYTAB_FILE)) {
+            String[] identities = {HIVE_LLAP_ZK_SM_KEYTAB_FILE, HIVE_LLAP_TASK_KEYTAB_FILE};
+            Map<String, String> newProperties = new HashMap<>();
+            for (String identity : identities) {
+              // Update only if we were able to modify the corresponding kerberos descriptor,
+              // reflected in list 'getYarnKerberosDescUpdatedList'.
+              if (getYarnKerberosDescUpdatedList().contains(identity) && hsiSiteConfigProperties.containsKey(identity)) {
+                newProperties.put(identity, hsiSiteConfigProperties.get(HIVE_LLAP_DAEMON_KEYTAB_FILE));
+              }
+            }
+
+            // Update step.
+            if (newProperties.size() > 0) {
+              try {
+                updateConfigurationPropertiesForCluster(cluster, HIVE_INTERACTIVE_SITE, newProperties, true, false);
+                LOG.info("Updated HSI config(s) : " + newProperties.keySet() + " with value(s) = " + newProperties.values()+" respectively.");
+              } catch (AmbariException e) {
+                e.printStackTrace();
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+
   protected void updateAmsConfigs() throws AmbariException {
     AmbariManagementController ambariManagementController = injector.getInstance(AmbariManagementController.class);
     Clusters clusters = ambariManagementController.getClusters();

http://git-wip-us.apache.org/repos/asf/ambari/blob/7f5b7d7c/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/kerberos.json
index af6bda6..a94aa0e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.5/services/YARN/kerberos.json
@@ -102,21 +102,11 @@
             },
             {
               "name": "llap_zk_hive",
+              "reference": "/HIVE/HIVE_SERVER/hive_server_hive",
               "principal": {
-                "value": "hive/_HOST@${realm}",
-                "type" : "service",
                 "configuration": "hive-interactive-site/hive.llap.zk.sm.principal"
               },
               "keytab": {
-                "file": "${keytab_dir}/hive.llap.zk.sm.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": "r"
-                },
                 "configuration": "hive-interactive-site/hive.llap.zk.sm.keytab.file"
               },
               "when" : {

http://git-wip-us.apache.org/repos/asf/ambari/blob/7f5b7d7c/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json
index e0417bf..bd6798c 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json
@@ -107,21 +107,11 @@
             },
             {
               "name": "llap_task_hive",
+              "reference": "/HIVE/HIVE_SERVER/hive_server_hive",
               "principal": {
-                "value": "hive/_HOST@${realm}",
-                "type" : "service",
                 "configuration": "hive-interactive-site/hive.llap.task.principal"
               },
               "keytab": {
-                "file": "${keytab_dir}/hive.llap.task.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": "r"
-                },
                 "configuration": "hive-interactive-site/hive.llap.task.keytab.file"
               },
               "when" : {
@@ -130,21 +120,11 @@
             },
             {
               "name": "llap_zk_hive",
+              "reference": "/HIVE/HIVE_SERVER/hive_server_hive",
               "principal": {
-                "value": "hive/_HOST@${realm}",
-                "type" : "service",
                 "configuration": "hive-interactive-site/hive.llap.zk.sm.principal"
               },
               "keytab": {
-                "file": "${keytab_dir}/hive.llap.zk.sm.keytab",
-                "owner": {
-                  "name": "${yarn-env/yarn_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": "r"
-                },
                 "configuration": "hive-interactive-site/hive.llap.zk.sm.keytab.file"
               },
               "when" : {

http://git-wip-us.apache.org/repos/asf/ambari/blob/7f5b7d7c/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog260Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog260Test.java b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog260Test.java
index be04cd5..c611171 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog260Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog260Test.java
@@ -74,6 +74,9 @@ import org.apache.ambari.server.state.kerberos.KerberosComponentDescriptor;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptor;
 import org.apache.ambari.server.state.kerberos.KerberosDescriptorFactory;
 import org.apache.ambari.server.state.kerberos.KerberosIdentityDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosKeytabDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosPrincipalDescriptor;
+import org.apache.ambari.server.state.kerberos.KerberosPrincipalType;
 import org.apache.ambari.server.state.kerberos.KerberosServiceDescriptor;
 import org.apache.ambari.server.state.stack.OsFamily;
 import org.apache.commons.io.FileUtils;
@@ -647,7 +650,7 @@ public class UpgradeCatalog260Test {
     expect(artifactEntity.getArtifactData()).andReturn(kerberosDescriptor.toMap()).once();
 
     Capture<Map<String, Object>> captureMap = newCapture();
-    expect(artifactEntity.getForeignKeys()).andReturn(Collections.singletonMap("cluster", "2"));
+    expect(artifactEntity.getForeignKeys()).andReturn(Collections.singletonMap("cluster", "2")).times(2);
     artifactEntity.setArtifactData(capture(captureMap));
     expectLastCall().once();
 
@@ -664,11 +667,26 @@ public class UpgradeCatalog260Test {
     expect(config.getTag()).andReturn("version1").anyTimes();
     expect(config.getType()).andReturn("ranger-kms-audit").anyTimes();
 
+    Map<String, String> hsiProperties = new HashMap<>();
+    hsiProperties.put("hive.llap.daemon.keytab.file", "/etc/security/keytabs/hive.service.keytab");
+    hsiProperties.put("hive.llap.zk.sm.keytab.file", "/etc/security/keytabs/hive.llap.zk.sm.keytab");
+
+    Config hsiConfig = createMock(Config.class);
+    expect(hsiConfig.getProperties()).andReturn(hsiProperties).anyTimes();
+    expect(hsiConfig.getPropertiesAttributes()).andReturn(Collections.<String, Map<String, String>>emptyMap()).anyTimes();
+    expect(hsiConfig.getTag()).andReturn("version1").anyTimes();
+    expect(hsiConfig.getType()).andReturn("hive-interactive-site").anyTimes();
+
     Config newConfig = createMock(Config.class);
     expect(newConfig.getTag()).andReturn("version2").anyTimes();
     expect(newConfig.getType()).andReturn("ranger-kms-audit").anyTimes();
 
+    Config newHsiConfig = createMock(Config.class);
+    expect(newHsiConfig.getTag()).andReturn("version2").anyTimes();
+    expect(newHsiConfig.getType()).andReturn("hive-interactive-site").anyTimes();
+
     ServiceConfigVersionResponse response = createMock(ServiceConfigVersionResponse.class);
+    ServiceConfigVersionResponse response1 = createMock(ServiceConfigVersionResponse.class);
 
     StackId stackId = createMock(StackId.class);
 
@@ -682,6 +700,12 @@ public class UpgradeCatalog260Test {
     expect(cluster.getConfig(eq("ranger-kms-audit"), anyString())).andReturn(newConfig).once();
     expect(cluster.addDesiredConfig("ambari-upgrade", Collections.singleton(newConfig))).andReturn(response).once();
 
+    //HIVE
+    expect(cluster.getDesiredConfigByType("hive-site")).andReturn(hsiConfig).anyTimes();
+    expect(cluster.getDesiredConfigByType("hive-interactive-site")).andReturn(hsiConfig).anyTimes();
+    expect(cluster.getConfigsByType("hive-interactive-site")).andReturn(Collections.singletonMap("version1", hsiConfig)).anyTimes();
+    expect(cluster.getServiceByConfigType("hive-interactive-site")).andReturn("HIVE").anyTimes();
+
     final Clusters clusters = injector.getInstance(Clusters.class);
     expect(clusters.getCluster(2L)).andReturn(cluster).anyTimes();
 
@@ -692,11 +716,11 @@ public class UpgradeCatalog260Test {
         .andReturn(null)
         .once();
 
-    replay(artifactDAO, artifactEntity, cluster, clusters, config, newConfig, response, controller);
+    replay(artifactDAO, artifactEntity, cluster, clusters, config, newConfig, hsiConfig, newHsiConfig, response, response1, controller);
 
     UpgradeCatalog260 upgradeCatalog260 = injector.getInstance(UpgradeCatalog260.class);
     upgradeCatalog260.updateKerberosDescriptorArtifact(artifactDAO, artifactEntity);
-    verify(artifactDAO, artifactEntity, cluster, clusters, config, newConfig, response, controller);
+    verify(artifactDAO, artifactEntity, cluster, clusters, config, newConfig, hsiConfig, newHsiConfig, response, response1, controller);
 
     KerberosDescriptor kerberosDescriptorUpdated = new KerberosDescriptorFactory().createInstance(captureMap.getValue());
     Assert.assertNotNull(kerberosDescriptorUpdated);
@@ -721,6 +745,39 @@ public class UpgradeCatalog260Test {
     Assert.assertTrue(captureProperties.hasCaptured());
     Map<String, String> newProperties = captureProperties.getValue();
     Assert.assertEquals("correct_value@EXAMPLE.COM", newProperties.get("xasecure.audit.jaas.Client.option.principal"));
+
+    // YARN's NodeManager identities (1). 'llap_zk_hive' and (2). 'llap_task_hive' checks after modifications.
+    Map<String, List<String>> identitiesMap = new HashMap<>();
+    identitiesMap.put("llap_zk_hive", new ArrayList<String>() {{
+      add("hive-interactive-site/hive.llap.zk.sm.keytab.file");
+      add("hive-interactive-site/hive.llap.zk.sm.principal");
+    }});
+    identitiesMap.put("llap_task_hive", new ArrayList<String>() {{
+      add("hive-interactive-site/hive.llap.task.keytab.file");
+      add("hive-interactive-site/hive.llap.task.principal");
+    }});
+    for (String llapIdentity : identitiesMap.keySet()) {
+      KerberosIdentityDescriptor yarnKerberosIdentityDescriptor = kerberosDescriptorUpdated.getService("YARN").getComponent("NODEMANAGER").getIdentity(llapIdentity);
+      Assert.assertNotNull(yarnKerberosIdentityDescriptor);
+      Assert.assertEquals("/HIVE/HIVE_SERVER/hive_server_hive", yarnKerberosIdentityDescriptor.getReference());
+
+      KerberosKeytabDescriptor yarnKerberosKeytabDescriptor = yarnKerberosIdentityDescriptor.getKeytabDescriptor();
+      Assert.assertNotNull(yarnKerberosKeytabDescriptor);
+
+      Assert.assertEquals(null, yarnKerberosKeytabDescriptor.getGroupAccess());
+      Assert.assertEquals(null, yarnKerberosKeytabDescriptor.getGroupName());
+      Assert.assertEquals(null, yarnKerberosKeytabDescriptor.getOwnerAccess());
+      Assert.assertEquals(null, yarnKerberosKeytabDescriptor.getOwnerName());
+      Assert.assertEquals(null, yarnKerberosKeytabDescriptor.getFile());
+      Assert.assertEquals(identitiesMap.get(llapIdentity).get(0), yarnKerberosKeytabDescriptor.getConfiguration());
+
+      KerberosPrincipalDescriptor yarnKerberosPrincipalDescriptor = yarnKerberosIdentityDescriptor.getPrincipalDescriptor();
+      Assert.assertNotNull(yarnKerberosPrincipalDescriptor);
+      Assert.assertEquals(null, yarnKerberosPrincipalDescriptor.getName());
+      Assert.assertEquals(KerberosPrincipalType.SERVICE, yarnKerberosPrincipalDescriptor.getType());
+      Assert.assertEquals(null, yarnKerberosPrincipalDescriptor.getValue());
+      Assert.assertEquals(identitiesMap.get(llapIdentity).get(1), yarnKerberosPrincipalDescriptor.getConfiguration());
+    }
   }
 
   @Test
@@ -780,6 +837,72 @@ public class UpgradeCatalog260Test {
   }
 
   @Test
+  public void testUpdateHiveConfigs() throws Exception {
+
+    Map<String, String> oldProperties = new HashMap<String, String>() {
+      {
+        put("hive.llap.zk.sm.keytab.file", "/etc/security/keytabs/hive.llap.zk.sm.keytab");
+        put("hive.llap.daemon.keytab.file", "/etc/security/keytabs/hive.service.keytab");
+        put("hive.llap.task.keytab.file", "/etc/security/keytabs/hive.llap.task.keytab");
+      }
+    };
+    Map<String, String> newProperties = new HashMap<String, String>() {
+      {
+        put("hive.llap.zk.sm.keytab.file", "/etc/security/keytabs/hive.service.keytab");
+        put("hive.llap.daemon.keytab.file", "/etc/security/keytabs/hive.service.keytab");
+        put("hive.llap.task.keytab.file", "/etc/security/keytabs/hive.service.keytab");
+      }
+    };
+
+    EasyMockSupport easyMockSupport = new EasyMockSupport();
+
+    Clusters clusters = easyMockSupport.createNiceMock(Clusters.class);
+    final Cluster cluster = easyMockSupport.createNiceMock(Cluster.class);
+    Config mockHsiConfigs = easyMockSupport.createNiceMock(Config.class);
+
+    expect(clusters.getClusters()).andReturn(new HashMap<String, Cluster>() {{
+      put("normal", cluster);
+    }}).once();
+    expect(cluster.getDesiredConfigByType("hive-interactive-site")).andReturn(mockHsiConfigs).atLeastOnce();
+    expect(mockHsiConfigs.getProperties()).andReturn(oldProperties).anyTimes();
+
+    Injector injector = easyMockSupport.createNiceMock(Injector.class);
+    expect(injector.getInstance(Gson.class)).andReturn(null).anyTimes();
+    expect(injector.getInstance(MaintenanceStateHelper.class)).andReturn(null).anyTimes();
+
+    replay(injector, clusters, mockHsiConfigs, cluster);
+
+    AmbariManagementControllerImpl controller = createMockBuilder(AmbariManagementControllerImpl.class)
+            .addMockedMethod("createConfiguration")
+            .addMockedMethod("getClusters", new Class[] { })
+            .addMockedMethod("createConfig")
+            .withConstructor(createNiceMock(ActionManager.class), clusters, injector)
+            .createNiceMock();
+
+    Injector injector2 = easyMockSupport.createNiceMock(Injector.class);
+    Capture<Map> propertiesCapture = EasyMock.newCapture();
+
+    expect(injector2.getInstance(AmbariManagementController.class)).andReturn(controller).anyTimes();
+    expect(controller.getClusters()).andReturn(clusters).anyTimes();
+    expect(controller.createConfig(anyObject(Cluster.class), anyObject(StackId.class), anyString(), capture(propertiesCapture), anyString(),
+            anyObject(Map.class))).andReturn(createNiceMock(Config.class)).once();
+    replay(controller, injector2);
+
+    // This tests the update of HSI config 'hive.llap.daemon.keytab.file'.
+    UpgradeCatalog260  upgradeCatalog260 = new UpgradeCatalog260(injector2);
+    // Set 'isYarnKerberosDescUpdated' value to true, implying kerberos descriptor was updated.
+    upgradeCatalog260.updateYarnKerberosDescUpdatedList("hive.llap.zk.sm.keytab.file");
+    upgradeCatalog260.updateYarnKerberosDescUpdatedList("hive.llap.task.keytab.file");
+
+    upgradeCatalog260.updateHiveConfigs();
+
+    easyMockSupport.verifyAll();
+
+    Map<String, String> updatedProperties = propertiesCapture.getValue();
+    assertTrue(Maps.difference(newProperties, updatedProperties).areEqual());
+  }
+
+  @Test
   public void testHDFSWidgetUpdate() throws Exception {
     final Clusters clusters = createNiceMock(Clusters.class);
     final Cluster cluster = createNiceMock(Cluster.class);

http://git-wip-us.apache.org/repos/asf/ambari/blob/7f5b7d7c/ambari-server/src/test/resources/kerberos/test_kerberos_descriptor_ranger_kms.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/kerberos/test_kerberos_descriptor_ranger_kms.json b/ambari-server/src/test/resources/kerberos/test_kerberos_descriptor_ranger_kms.json
index e17e121..8c27a9a 100644
--- a/ambari-server/src/test/resources/kerberos/test_kerberos_descriptor_ranger_kms.json
+++ b/ambari-server/src/test/resources/kerberos/test_kerberos_descriptor_ranger_kms.json
@@ -104,6 +104,292 @@
           ]
         }
       ]
+    },
+    {
+      "name": "YARN",
+      "identities": [
+        {
+          "name": "yarn_spnego",
+          "reference": "/spnego"
+        },
+        {
+          "name": "yarn_smokeuser",
+          "reference": "/smokeuser"
+        }
+      ],
+      "configurations": [
+        {
+          "yarn-site": {
+            "yarn.timeline-service.enabled": "true",
+            "yarn.timeline-service.http-authentication.type": "kerberos",
+            "yarn.acl.enable": "true",
+            "yarn.admin.acl": "${yarn-env/yarn_user},dr.who",
+            "yarn.timeline-service.http-authentication.signature.secret": "",
+            "yarn.timeline-service.http-authentication.signature.secret.file": "",
+            "yarn.timeline-service.http-authentication.signer.secret.provider": "",
+            "yarn.timeline-service.http-authentication.signer.secret.provider.object": "",
+            "yarn.timeline-service.http-authentication.token.validity": "",
+            "yarn.timeline-service.http-authentication.cookie.domain": "",
+            "yarn.timeline-service.http-authentication.cookie.path": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.hosts": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.users": "",
+            "yarn.timeline-service.http-authentication.proxyuser.*.groups": "",
+            "yarn.timeline-service.http-authentication.kerberos.name.rules": "",
+            "yarn.resourcemanager.proxyuser.*.groups": "",
+            "yarn.resourcemanager.proxyuser.*.hosts": "",
+            "yarn.resourcemanager.proxyuser.*.users": "",
+            "yarn.resourcemanager.proxy-user-privileges.enabled": "true",
+            "yarn.resourcemanager.zk-acl" : "sasl:${principals/YARN/RESOURCEMANAGER/resource_manager_rm|principalPrimary()}:rwcda",
+            "hadoop.registry.secure" : "true",
+            "hadoop.registry.system.accounts" : "sasl:${principals/YARN/APP_TIMELINE_SERVER/app_timeline_server_yarn|principalPrimary()},sasl:${principals/MAPREDUCE2/HISTORYSERVER/history_server_jhs|principalPrimary()},sasl:${principals/HDFS/NAMENODE/hdfs|principalPrimary()},sasl:${principals/YARN/RESOURCEMANAGER/resource_manager_rm|principalPrimary()},sasl:${principals/HIVE/HIVE_SERVER/hive_server_hive|principalPrimary()}",
+            "hadoop.registry.client.auth" : "kerberos",
+            "hadoop.registry.jaas.context" : "Client"
+          }
+        },
+        {
+          "core-site": {
+            "hadoop.proxyuser.${yarn-env/yarn_user}.groups": "*",
+            "hadoop.proxyuser.${yarn-env/yarn_user}.hosts": "${clusterHostInfo/rm_host}"
+          }
+        },
+        {
+          "capacity-scheduler": {
+            "yarn.scheduler.capacity.root.acl_administer_queue": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.default.acl_administer_queue": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.acl_administer_jobs": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.default.acl_administer_jobs": "${yarn-env/yarn_user}",
+            "yarn.scheduler.capacity.root.default.acl_submit_applications": "${yarn-env/yarn_user}"
+          }
+        },
+        {
+          "ranger-yarn-audit": {
+            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
+            "xasecure.audit.jaas.Client.option.storeKey": "false",
+            "xasecure.audit.jaas.Client.option.serviceName": "solr",
+            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "NODEMANAGER",
+          "identities": [
+            {
+              "name": "nodemanager_nm",
+              "principal": {
+                "value": "nm/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "yarn-site/yarn.nodemanager.principal",
+                "local_username": "${yarn-env/yarn_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/nm.service.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "yarn-site/yarn.nodemanager.keytab"
+              }
+            },
+            {
+              "name": "yarn_nodemanager_hive_server_hive",
+              "reference": "/HIVE/HIVE_SERVER/hive_server_hive",
+              "principal": {
+                "configuration": "hive-interactive-site/hive.llap.daemon.service.principal"
+              },
+              "keytab": {
+                "configuration": "hive-interactive-site/hive.llap.daemon.keytab.file"
+              },
+              "when" : {
+                "contains" : ["services", "HIVE"]
+              }
+            },
+            {
+              "name": "llap_task_hive",
+              "principal": {
+                "value": "hive/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hive-interactive-site/hive.llap.task.principal"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hive.llap.task.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": "r"
+                },
+                "configuration": "hive-interactive-site/hive.llap.task.keytab.file"
+              },
+              "when" : {
+                "contains" : ["services", "HIVE"]
+              }
+            },
+            {
+              "name": "llap_zk_hive",
+              "principal": {
+                "value": "hive/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "hive-interactive-site/hive.llap.zk.sm.principal"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hive.llap.zk.sm.keytab",
+                "owner": {
+                  "name": "${yarn-env/yarn_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": "r"
+                },
+                "configuration": "hive-interactive-site/hive.llap.zk.sm.keytab.file"
+              },
+              "when" : {
+                "contains" : ["services", "HIVE"]
+              }
+            },
+            {
+              "name": "yarn_nodemanager_spnego",
+              "reference": "/spnego",
+              "principal": {
+                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-principal"
+              },
+              "keytab": {
+                "configuration": "yarn-site/yarn.nodemanager.webapp.spnego-keytab-file"
+              }
+            }
+          ],
+          "configurations": [
+            {
+              "yarn-site": {
+                "yarn.nodemanager.container-executor.class": "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor"
+              }
+            }
+          ]
+        }
+      ]
+    },
+    {
+      "name": "HIVE",
+      "identities": [
+        {
+          "name": "hive_spnego",
+          "reference": "/spnego"
+        },
+        {
+          "name": "hive_smokeuser",
+          "reference": "/smokeuser"
+        }
+      ],
+      "configurations": [
+        {
+          "hive-site": {
+            "hive.metastore.sasl.enabled": "true",
+            "hive.server2.authentication": "KERBEROS"
+          }
+        },
+        {
+          "ranger-hive-audit": {
+            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
+            "xasecure.audit.jaas.Client.option.storeKey": "false",
+            "xasecure.audit.jaas.Client.option.serviceName": "solr",
+            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "HIVE_SERVER",
+          "identities": [
+            {
+              "name": "hive_hive_server_hdfs",
+              "reference": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "hive_server_hive",
+              "principal": {
+                "value": "hive/_HOST@${realm}",
+                "type": "service",
+                "configuration": "hive-site/hive.server2.authentication.kerberos.principal",
+                "local_username": "${hive-env/hive_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hive.service.keytab",
+                "owner": {
+                  "name": "${hive-env/hive_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": "r"
+                },
+                "configuration": "hive-site/hive.server2.authentication.kerberos.keytab"
+              }
+            },
+            {
+              "name": "atlas_kafka",
+              "reference": "/HIVE/HIVE_SERVER/hive_server_hive",
+              "principal": {
+                "configuration": "hive-atlas-application.properties/atlas.jaas.KafkaClient.option.principal"
+              },
+              "keytab": {
+                "configuration": "hive-atlas-application.properties/atlas.jaas.KafkaClient.option.keyTab"
+              }
+            },
+            {
+              "name": "hive_hive_server_spnego",
+              "reference": "/spnego",
+              "principal": {
+                "configuration": "hive-site/hive.server2.authentication.spnego.principal"
+              },
+              "keytab": {
+                "configuration": "hive-site/hive.server2.authentication.spnego.keytab"
+              }
+            },
+            {
+              "name": "ranger_audit",
+              "reference": "/HIVE/HIVE_SERVER/hive_server_hive",
+              "principal": {
+                "configuration": "ranger-hive-audit/xasecure.audit.jaas.Client.option.principal"
+              },
+              "keytab": {
+                "configuration": "ranger-hive-audit/xasecure.audit.jaas.Client.option.keyTab"
+              }
+            }
+          ]
+        },
+        {
+          "name": "HIVE_SERVER_INTERACTIVE",
+          "identities": [
+            {
+              "name": "hive_hive_server_interactive_hdfs",
+              "reference": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "hive_hive_server_interactive_hive_server_hive",
+              "reference": "/HIVE/HIVE_SERVER/hive_server_hive"
+            },
+            {
+              "name": "hive_hive_server_interactive_spnego",
+              "reference": "/HIVE/HIVE_SERVER/spnego"
+            },
+            {
+              "name": "hive_hive_server_interactive_llap_zk_hive",
+              "reference": "/YARN/NODEMANAGER/llap_zk_hive"
+            }
+          ]
+        }
+      ]
     }
   ]
 }
\ No newline at end of file


[05/16] ambari git commit: AMBARI-22501. stack advisor error while adding Druid service (alexantonenko)

Posted by ao...@apache.org.
AMBARI-22501. stack advisor error while adding Druid service (alexantonenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/416570db
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/416570db
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/416570db

Branch: refs/heads/branch-feature-AMBARI-22457
Commit: 416570dbd947c5bd366abf8aebaa9b44ac9cfecf
Parents: 7f5b7d7
Author: Alex Antonenko <aa...@hortonworks.com>
Authored: Wed Nov 22 21:20:46 2017 +0300
Committer: Alex Antonenko <aa...@hortonworks.com>
Committed: Wed Nov 22 21:20:46 2017 +0300

----------------------------------------------------------------------
 .../app/controllers/main/admin/kerberos/step4_controller.js    | 6 ++++++
 1 file changed, 6 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/416570db/ambari-web/app/controllers/main/admin/kerberos/step4_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/admin/kerberos/step4_controller.js b/ambari-web/app/controllers/main/admin/kerberos/step4_controller.js
index f23814c..ba66d87 100644
--- a/ambari-web/app/controllers/main/admin/kerberos/step4_controller.js
+++ b/ambari-web/app/controllers/main/admin/kerberos/step4_controller.js
@@ -502,6 +502,12 @@ App.KerberosWizardStep4Controller = App.WizardStep7Controller.extend(App.AddSecu
       return p;
     }, {});
 
+    if (this.get('isWithinAddService')) {
+      this.get('content.masterComponentHosts').filterProperty('isInstalled', false).forEach(function(item) {
+        var hostGroupName = blueprintUtils.getHostGroupByFqdn(recommendations, item.hostName);
+        blueprintUtils.addComponentToHostGroup(recommendations, item.component, hostGroupName);
+      }, this);
+    }
     return recommendations;
   },
 


[08/16] ambari git commit: AMBARI-21569.Users randomly getting "HDFS020 Could not write file" exceptions while running query from Hive View(Venkata Sairam)

Posted by ao...@apache.org.
AMBARI-21569.Users randomly getting "HDFS020 Could not write file" exceptions while running query from Hive View(Venkata Sairam)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c57e243d
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c57e243d
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c57e243d

Branch: refs/heads/branch-feature-AMBARI-22457
Commit: c57e243d2d0c2f480b56693a39e97bb81e258da6
Parents: 099e018
Author: Venkata Sairam <ve...@gmail.com>
Authored: Thu Nov 23 15:52:55 2017 +0530
Committer: Venkata Sairam <ve...@gmail.com>
Committed: Thu Nov 23 15:52:55 2017 +0530

----------------------------------------------------------------------
 .../org/apache/ambari/view/utils/hdfs/HdfsApi.java | 16 +++++++++++++++-
 .../apache/ambari/view/utils/hdfs/HdfsUtil.java    | 17 ++++++++++++-----
 2 files changed, 27 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/c57e243d/contrib/views/utils/src/main/java/org/apache/ambari/view/utils/hdfs/HdfsApi.java
----------------------------------------------------------------------
diff --git a/contrib/views/utils/src/main/java/org/apache/ambari/view/utils/hdfs/HdfsApi.java b/contrib/views/utils/src/main/java/org/apache/ambari/view/utils/hdfs/HdfsApi.java
index 3db2081..812cd54 100644
--- a/contrib/views/utils/src/main/java/org/apache/ambari/view/utils/hdfs/HdfsApi.java
+++ b/contrib/views/utils/src/main/java/org/apache/ambari/view/utils/hdfs/HdfsApi.java
@@ -485,7 +485,20 @@ public class HdfsApi {
    * @throws IOException
    * @throws InterruptedException
    */
-  public <T> T execute(PrivilegedExceptionAction<T> action)
+  public <T> T execute(PrivilegedExceptionAction<T> action) throws IOException, InterruptedException {
+    return this.execute(action, false);
+  }
+
+
+  /**
+   * Executes action on HDFS using doAs
+   * @param action strategy object
+   * @param <T> result type
+   * @return result of operation
+   * @throws IOException
+   * @throws InterruptedException
+   */
+  public <T> T execute(PrivilegedExceptionAction<T> action, boolean alwaysRetry)
       throws IOException, InterruptedException {
     T result = null;
 
@@ -508,6 +521,7 @@ public class HdfsApi {
         }
         LOG.info("HDFS threw 'IOException: Cannot obtain block length' exception. " +
             "Retrying... Try #" + (tryNumber + 1));
+        LOG.error("Retrying: " + ex.getMessage(),ex);
         Thread.sleep(1000);  //retry after 1 second
       }
     } while (!succeeded);

http://git-wip-us.apache.org/repos/asf/ambari/blob/c57e243d/contrib/views/utils/src/main/java/org/apache/ambari/view/utils/hdfs/HdfsUtil.java
----------------------------------------------------------------------
diff --git a/contrib/views/utils/src/main/java/org/apache/ambari/view/utils/hdfs/HdfsUtil.java b/contrib/views/utils/src/main/java/org/apache/ambari/view/utils/hdfs/HdfsUtil.java
index 0670f1a..810129b 100644
--- a/contrib/views/utils/src/main/java/org/apache/ambari/view/utils/hdfs/HdfsUtil.java
+++ b/contrib/views/utils/src/main/java/org/apache/ambari/view/utils/hdfs/HdfsUtil.java
@@ -27,6 +27,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
 import java.util.Map;
 
 public class HdfsUtil {
@@ -38,13 +39,19 @@ public class HdfsUtil {
    * @param filePath path to file
    * @param content new content of file
    */
-  public static void putStringToFile(HdfsApi hdfs, String filePath, String content) throws HdfsApiException {
-    FSDataOutputStream stream;
+  public static void putStringToFile(final HdfsApi hdfs,final String filePath, final String content) throws HdfsApiException {
+
     try {
       synchronized (hdfs) {
-        stream = hdfs.create(filePath, true);
-        stream.write(content.getBytes());
-        stream.close();
+        hdfs.execute(new PrivilegedExceptionAction<Void>() {
+          @Override
+          public Void run() throws Exception {
+            final FSDataOutputStream stream = hdfs.create(filePath, true);
+            stream.write(content.getBytes());
+            stream.close();
+            return null;
+          }
+        }, true);
       }
     } catch (IOException e) {
       throw new HdfsApiException("HDFS020 Could not write file " + filePath, e);


[03/16] ambari git commit: AMBARI-22469. Ambari upgrade failed (dlysnichenko)

Posted by ao...@apache.org.
AMBARI-22469. Ambari upgrade failed (dlysnichenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/92e362b7
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/92e362b7
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/92e362b7

Branch: refs/heads/branch-feature-AMBARI-22457
Commit: 92e362b718c0b8511ec95beb7268368f24fe92b0
Parents: f769309
Author: Lisnichenko Dmitro <dl...@hortonworks.com>
Authored: Wed Nov 22 17:04:42 2017 +0200
Committer: Lisnichenko Dmitro <dl...@hortonworks.com>
Committed: Wed Nov 22 17:04:42 2017 +0200

----------------------------------------------------------------------
 .../server/upgrade/UpgradeCatalog260.java       | 28 ++++++++++++++------
 1 file changed, 20 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/92e362b7/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog260.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog260.java b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog260.java
index 96ce807..25635b6 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog260.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog260.java
@@ -191,7 +191,7 @@ public class UpgradeCatalog260 extends AbstractUpgradeCatalog {
    */
   @Override
   protected void executeDDLUpdates() throws AmbariException, SQLException {
-    int currentVersionID = getCurrentVersionID();
+    Integer currentVersionID = getCurrentVersionID();
     dropBrokenFK();
     updateServiceComponentDesiredStateTable(currentVersionID);
     updateServiceDesiredStateTable(currentVersionID);
@@ -358,10 +358,13 @@ public class UpgradeCatalog260 extends AbstractUpgradeCatalog {
    * Removes {@value #FK_SDS_DESIRED_STACK_ID} foreign key.
    * adds {@value #FK_REPO_VERSION_ID} foreign key.
    *
+   * @param currentRepoID id of current repo_version. Can be null if there are no cluster repo versions
+   *                      (in this case {@value #SERVICE_DESIRED_STATE_TABLE} table must be empty)
+   *
    * @throws java.sql.SQLException
    */
-  private void updateServiceDesiredStateTable(int currentRepoID) throws SQLException {
-
+  private void updateServiceDesiredStateTable(Integer currentRepoID) throws SQLException {
+    //in case if currentRepoID is null {@value #SERVICE_DESIRED_STATE_TABLE} table must be empty and null defaultValue is ok for non-nullable column
     dbAccessor.addColumn(SERVICE_DESIRED_STATE_TABLE,
         new DBAccessor.DBColumnInfo(DESIRED_REPO_VERSION_ID_COLUMN, Long.class, null, currentRepoID, false));
     dbAccessor.alterColumn(SERVICE_DESIRED_STATE_TABLE,
@@ -411,9 +414,13 @@ public class UpgradeCatalog260 extends AbstractUpgradeCatalog {
    * Removes {@value #FK_SCDS_DESIRED_STACK_ID} foreign key.
    * adds {@value #FK_SCDS_DESIRED_REPO_ID} foreign key.
    *
+   * @param currentRepoID id of current repo_version. Can be null if there are no cluster repo versions
+   *                      (in this case {@value #SERVICE_DESIRED_STATE_TABLE} table must be empty)
+   *
    * @throws java.sql.SQLException
    */
-  private void updateServiceComponentDesiredStateTable(int currentRepoID) throws SQLException {
+  private void updateServiceComponentDesiredStateTable(Integer currentRepoID) throws SQLException {
+    //in case if currentRepoID is null {@value #SERVICE_DESIRED_STATE_TABLE} table must be empty and null defaultValue is ok for non-nullable column
     dbAccessor.addColumn(SERVICE_COMPONENT_DESIRED_STATE_TABLE,
         new DBAccessor.DBColumnInfo(DESIRED_REPO_VERSION_ID_COLUMN, Long.class, null, currentRepoID, false));
     dbAccessor.alterColumn(SERVICE_COMPONENT_DESIRED_STATE_TABLE,
@@ -499,15 +506,20 @@ public class UpgradeCatalog260 extends AbstractUpgradeCatalog {
    * where {@value #STATE_COLUMN} = {@value #CURRENT}
    * and validate it
    *
-   * @return current version ID
-   * @throws AmbariException
+   * @return current version ID or null if no cluster versions do exist
+   * @throws AmbariException if cluster versions are present, but current is not selected
    * @throws SQLException
    */
-  public int getCurrentVersionID() throws AmbariException, SQLException {
+  public Integer getCurrentVersionID() throws AmbariException, SQLException {
     List<Integer> currentVersionList = dbAccessor.getIntColumnValues(CLUSTER_VERSION_TABLE, REPO_VERSION_ID_COLUMN,
         new String[]{STATE_COLUMN}, new String[]{CURRENT}, false);
     if (currentVersionList.isEmpty()) {
-      throw new AmbariException("Unable to find any CURRENT repositories.");
+      List<Integer> allVersionList = dbAccessor.getIntColumnValues(CLUSTER_VERSION_TABLE, REPO_VERSION_ID_COLUMN, null, null,false);
+      if (allVersionList.isEmpty()){
+        return null;
+      } else {
+        throw new AmbariException("Unable to find any CURRENT repositories.");
+      }
     } else if (currentVersionList.size() != 1) {
       throw new AmbariException("The following repositories were found to be CURRENT: ".concat(StringUtils.join(currentVersionList, ",")));
     }


[13/16] ambari git commit: AMBARI-22137 - Different stack versions should be able to link to different extension versions

Posted by ao...@apache.org.
AMBARI-22137 - Different stack versions should be able to link to different extension versions


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/d7b25eec
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/d7b25eec
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/d7b25eec

Branch: refs/heads/branch-feature-AMBARI-22457
Commit: d7b25eec2629822f90cac6de510f806b1004a8f7
Parents: 7c56924
Author: Tim Thorpe <tt...@apache.org>
Authored: Tue Oct 10 13:54:01 2017 -0700
Committer: Tim Thorpe <tt...@apache.org>
Committed: Fri Nov 24 12:13:01 2017 -0800

----------------------------------------------------------------------
 .../controller/AmbariManagementHelper.java      |   2 +-
 .../ambari/server/stack/ExtensionHelper.java    |  57 ++-
 .../apache/ambari/server/stack/StackModule.java |   4 +
 .../server/stack/StackManagerExtensionTest.java |  31 +-
 .../resources/extensions/EXT/0.2/metainfo.xml   |   2 +-
 .../resources/extensions/EXT/0.3/metainfo.xml   |   2 +-
 .../stacks_with_extensions/HDP/0.4/metainfo.xml |  22 ++
 .../HDP/0.4/repos/repoinfo.xml                  |  63 +++
 .../HDP/0.4/services/HBASE/metainfo.xml         |  26 ++
 .../0.4/services/HDFS/configuration/global.xml  | 145 +++++++
 .../services/HDFS/configuration/hadoop-env.xml  | 223 +++++++++++
 .../services/HDFS/configuration/hbase-site.xml  | 137 +++++++
 .../services/HDFS/configuration/hdfs-log4j.xml  | 199 ++++++++++
 .../services/HDFS/configuration/hdfs-site.xml   | 396 +++++++++++++++++++
 .../HDP/0.4/services/HDFS/metainfo.xml          |  30 ++
 .../0.4/services/HDFS/package/dummy-script.py   |  20 +
 .../HDP/0.4/services/HIVE/metainfo.xml          |  26 ++
 .../HDP/0.4/services/MAPREDUCE/metainfo.xml     |  23 ++
 .../HDP/0.4/services/ZOOKEEPER/metainfo.xml     |  26 ++
 19 files changed, 1425 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/d7b25eec/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementHelper.java
index 2dd6f12..0c8edfe 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementHelper.java
@@ -70,7 +70,7 @@ public class AmbariManagementHelper {
    */
   public void createExtensionLink(StackManager stackManager, StackInfo stackInfo, ExtensionInfo extensionInfo) throws AmbariException {
     validateCreateExtensionLinkRequest(stackInfo, extensionInfo);
-    ExtensionHelper.validateCreateLink(stackInfo, extensionInfo);
+    ExtensionHelper.validateCreateLink(stackManager, stackInfo, extensionInfo);
     ExtensionLinkEntity linkEntity = createExtensionLinkEntity(stackInfo, extensionInfo);
     stackManager.linkStackToExtension(stackInfo, extensionInfo);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/d7b25eec/ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionHelper.java
index cd4d9f3..8e1d989 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionHelper.java
@@ -27,6 +27,8 @@ import org.apache.ambari.server.state.ServiceInfo;
 import org.apache.ambari.server.state.StackInfo;
 import org.apache.ambari.server.state.stack.ExtensionMetainfoXml;
 import org.apache.ambari.server.utils.VersionUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * An extension version is like a stack version but it contains custom services.  Linking an extension
@@ -35,6 +37,8 @@ import org.apache.ambari.server.utils.VersionUtils;
  */
 public class ExtensionHelper {
 
+  private final static Logger LOG = LoggerFactory.getLogger(ExtensionHelper.class);
+
   public static void validateDeleteLink(Clusters clusters, StackInfo stack, ExtensionInfo extension) throws AmbariException {
     validateNotRequiredExtension(stack, extension);
     validateServicesNotInstalled(clusters, stack, extension);
@@ -62,9 +66,9 @@ public class ExtensionHelper {
     }
   }
 
-  public static void validateCreateLink(StackInfo stack, ExtensionInfo extension) throws AmbariException {
+  public static void validateCreateLink(StackManager stackManager, StackInfo stack, ExtensionInfo extension) throws AmbariException {
     validateSupportedStackVersion(stack, extension);
-    validateServiceDuplication(stack, extension);
+    validateServiceDuplication(stackManager, stack, extension);
     validateRequiredExtensions(stack, extension);
   }
 
@@ -88,15 +92,24 @@ public class ExtensionHelper {
     throw new AmbariException(message);
   }
 
-  private static void validateServiceDuplication(StackInfo stack, ExtensionInfo extension) throws AmbariException {
+  private static void validateServiceDuplication(StackManager stackManager, StackInfo stack, ExtensionInfo extension) throws AmbariException {
+    LOG.debug("Looking for duplicate services");
     for (ServiceInfo service : extension.getServices()) {
+      LOG.debug("Looking for duplicate service " + service.getName());
       if (service != null) {
         ServiceInfo stackService = null;
         try {
           stackService = stack.getService(service.getName());
+          if (stackService != null) {
+            LOG.debug("Found service " + service.getName());
+            if (isInheritedExtensionService(stackManager, stack, service.getName(), extension.getName())) {
+              stackService = null;
+            }
+          }
         }
         catch (Exception e) {
           //Eat the exception
+          LOG.error("Error validating service duplication", e);
         }
         if (stackService != null) {
           String message = "Existing service is included in extension"
@@ -112,6 +125,44 @@ public class ExtensionHelper {
     }
   }
 
+  private static boolean isInheritedExtensionService(StackManager stackManager, StackInfo stack, String serviceName, String extensionName) {
+    // Check if service is from an extension at the current stack level, if so then it isn't inherited from its parent stack version
+    if (isExtensionService(stack, serviceName, extensionName)) {
+      LOG.debug("Service is at requested stack/version level " + serviceName);
+      return false;
+    }
+
+    return isExtensionService(stackManager, stack.getName(), stack.getParentStackVersion(), serviceName, extensionName);
+  }
+
+  private static boolean isExtensionService(StackManager stackManager, String stackName, String stackVersion, String serviceName, String extensionName) {
+    LOG.debug("Checking at stack/version " + stackName + "/" + stackVersion);
+    StackInfo stack = stackManager.getStack(stackName, stackVersion);
+
+    if (stack == null) {
+      LOG.warn("Stack/version not found " + stackName + "/" + stackVersion);
+      return false;
+    }
+
+    if (isExtensionService(stack, serviceName, extensionName)) {
+      LOG.debug("Stack/version " + stackName + "/" + stackVersion + " contains service " + serviceName);
+      return true;
+    }
+    else {
+      return isExtensionService(stackManager, stackName, stack.getParentStackVersion(), serviceName, extensionName);
+    }
+  }
+
+  private static boolean isExtensionService(StackInfo stack, String serviceName, String extensionName) {
+    ExtensionInfo extension = stack.getExtension(extensionName);
+    if (extension == null) {
+      LOG.debug("Extension not found " + extensionName);
+      return false;
+    }
+
+    return extension.getService(serviceName) != null;
+  }
+
   private static void validateRequiredExtensions(StackInfo stack, ExtensionInfo extension) throws AmbariException {
     for (ExtensionMetainfoXml.Extension requiredExtension : extension.getExtensions()) {
       if (requiredExtension != null) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/d7b25eec/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
index 34a3047..1859928 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
@@ -197,6 +197,7 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
     if (parentVersion != null) {
       mergeStackWithParent(parentVersion, allStacks, commonServices, extensions);
     }
+
     for (ExtensionInfo extension : stackInfo.getExtensions()) {
       String extensionKey = extension.getName() + StackManager.PATH_DELIMITER + extension.getVersion();
       ExtensionModule extensionModule = extensions.get(extensionKey);
@@ -404,6 +405,9 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
 
   private void addExtensionServices() throws AmbariException {
     for (ExtensionModule extension : extensionModules.values()) {
+      for (Map.Entry<String, ServiceModule> entry : extension.getServiceModules().entrySet()) {
+        serviceModules.put(entry.getKey(), entry.getValue());
+      }
       stackInfo.addExtension(extension.getModuleInfo());
     }
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/d7b25eec/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
index 4ae52c0..34522da 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
@@ -73,6 +73,9 @@ public class StackManagerExtensionTest  {
     StackEntity stack3 = new StackEntity();
     stack3.setStackName("HDP");
     stack3.setStackVersion("0.3");
+    StackEntity stack4 = new StackEntity();
+    stack4.setStackName("HDP");
+    stack4.setStackVersion("0.4");
     ExtensionEntity extension1 = new ExtensionEntity();
     extension1.setExtensionName("EXT");
     extension1.setExtensionVersion("0.1");
@@ -82,19 +85,28 @@ public class StackManagerExtensionTest  {
     ExtensionEntity extension3 = new ExtensionEntity();
     extension3.setExtensionName("EXT");
     extension3.setExtensionVersion("0.3");
+    ExtensionLinkEntity link1 = new ExtensionLinkEntity();
+    link1.setLinkId(new Long(-1));
+    link1.setStack(stack1);
+    link1.setExtension(extension1);
     List<ExtensionLinkEntity> list = new ArrayList<>();
+    List<ExtensionLinkEntity> linkList = new ArrayList<>();
+    linkList.add(link1);
 
     expect(stackDao.find("HDP", "0.1")).andReturn(stack1).atLeastOnce();
     expect(stackDao.find("HDP", "0.2")).andReturn(stack2).atLeastOnce();
     expect(stackDao.find("HDP", "0.3")).andReturn(stack3).atLeastOnce();
+    expect(stackDao.find("HDP", "0.4")).andReturn(stack3).atLeastOnce();
     expect(extensionDao.find("EXT", "0.1")).andReturn(extension1).atLeastOnce();
     expect(extensionDao.find("EXT", "0.2")).andReturn(extension2).atLeastOnce();
     expect(extensionDao.find("EXT", "0.3")).andReturn(extension3).atLeastOnce();
 
+    expect(linkDao.findByStack("HDP", "0.1")).andReturn(linkList).atLeastOnce();
     expect(linkDao.findByStack(EasyMock.anyObject(String.class),
             EasyMock.anyObject(String.class))).andReturn(list).atLeastOnce();
 
     expect(linkDao.findByStackAndExtension("HDP", "0.2", "EXT", "0.2")).andReturn(null).atLeastOnce();
+    expect(linkDao.findByStackAndExtension("HDP", "0.1", "EXT", "0.1")).andReturn(link1).atLeastOnce();
 
     replay(actionMetadata, stackDao, metaInfoDao, osFamily, extensionDao, linkDao); //linkEntity
 
@@ -154,21 +166,34 @@ public class StackManagerExtensionTest  {
     assertNotNull(themes);
     assertTrue("Number of themes is " + themes.size(), themes.size() == 0);
 
-    StackInfo stack = stackManager.getStack("HDP", "0.2");
+    StackInfo stack = stackManager.getStack("HDP", "0.1");
     assertNotNull(stack.getService("OOZIE2"));
     oozie = stack.getService("OOZIE2");
     assertNotNull("Package dir is " + oozie.getServicePackageFolder(), oozie.getServicePackageFolder());
     assertTrue("Package dir is " + oozie.getServicePackageFolder(), oozie.getServicePackageFolder().contains("extensions/EXT/0.1/services/OOZIE2/package"));
-    assertEquals(oozie.getVersion(), "4.0.0");
+    assertEquals(oozie.getVersion(), "3.2.0");
 
     assertTrue("Extensions found: " + stack.getExtensions().size(), stack.getExtensions().size() == 1);
     extension = stack.getExtensions().iterator().next();
     assertEquals("Extension name: " + extension.getName(), extension.getName(), "EXT");
-    assertEquals("Extension version: " + extension.getVersion(), extension.getVersion(), "0.3");
+    assertEquals("Extension version: " + extension.getVersion(), extension.getVersion(), "0.1");
+
+    stack = stackManager.getStack("HDP", "0.2");
+    assertTrue("Extensions found: " + stack.getExtensions().size(), stack.getExtensions().size() == 0);
 
     stack = stackManager.getStack("HDP", "0.3");
     assertTrue("Extensions found: " + stack.getExtensions().size(), stack.getExtensions().size() == 1);
     extension = stack.getExtensions().iterator().next();
+    assertNotNull(extension.getService("OOZIE2"));
+    oozie = extension.getService("OOZIE2");
+    assertEquals(oozie.getVersion(), "4.0.0");
+
+    assertEquals("Extension name: " + extension.getName(), extension.getName(), "EXT");
+    assertEquals("Extension version: " + extension.getVersion(), extension.getVersion(), "0.3");
+
+    stack = stackManager.getStack("HDP", "0.4");
+    assertTrue("Extensions found: " + stack.getExtensions().size(), stack.getExtensions().size() == 1);
+    extension = stack.getExtensions().iterator().next();
     assertEquals("Extension name: " + extension.getName(), extension.getName(), "EXT");
     assertEquals("Extension version: " + extension.getVersion(), extension.getVersion(), "0.3");
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/d7b25eec/ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml b/ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml
index 0d37b3e..c95a20f 100644
--- a/ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml
+++ b/ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml
@@ -25,7 +25,7 @@
     <min-stack-versions>
       <stack>
         <name>HDP</name>
-        <version>0.2</version>
+        <version>0.3</version>
       </stack>
     </min-stack-versions>
   </prerequisites>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d7b25eec/ambari-server/src/test/resources/extensions/EXT/0.3/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/extensions/EXT/0.3/metainfo.xml b/ambari-server/src/test/resources/extensions/EXT/0.3/metainfo.xml
index d827314..1b6ce73 100644
--- a/ambari-server/src/test/resources/extensions/EXT/0.3/metainfo.xml
+++ b/ambari-server/src/test/resources/extensions/EXT/0.3/metainfo.xml
@@ -25,7 +25,7 @@
     <min-stack-versions>
       <stack>
         <name>HDP</name>
-        <version>0.2</version>
+        <version>0.3</version>
       </stack>
     </min-stack-versions>
   </prerequisites>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d7b25eec/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/metainfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/metainfo.xml
new file mode 100644
index 0000000..3b4897f
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/metainfo.xml
@@ -0,0 +1,22 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <versions>
+	  <upgrade>0.3</upgrade>
+    </versions>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d7b25eec/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/repos/repoinfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/repos/repoinfo.xml
new file mode 100644
index 0000000..9b3b1c7
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/repos/repoinfo.xml
@@ -0,0 +1,63 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<reposinfo>
+  <os family="redhat6">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos6</baseurl>
+      <repoid>HDP-1.1.1.16</repoid>
+      <reponame>HDP</reponame>
+      <unique>true</unique>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos6</baseurl>
+      <repoid>HDP-UTILS-1.1.0.15</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <unique>false</unique>
+      <mirrorslist></mirrorslist>
+    </repo>
+    <repo>
+      <baseurl></baseurl>
+      <repoid>epel</repoid>
+      <reponame>epel</reponame>
+      <unique>true</unique>
+      <mirrorslist><![CDATA[https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch]]></mirrorslist>
+    </repo>
+  </os>
+  <os family="centos5">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos5</baseurl>
+      <repoid>HDP-1.1.1.16</repoid>
+      <reponame>HDP</reponame>
+      <unique>true</unique>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos5</baseurl>
+      <repoid>HDP-UTILS-1.1.0.15</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <unique>false</unique>
+      <mirrorslist></mirrorslist>
+    </repo>
+    <repo>
+      <baseurl></baseurl>
+      <repoid>epel</repoid>
+      <reponame>epel</reponame>
+      <unique>true</unique>
+      <mirrorslist><![CDATA[https://mirrors.fedoraproject.org/metalink?repo=epel-5&arch=$basearch]]></mirrorslist>
+    </repo>
+  </os>
+</reposinfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d7b25eec/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HBASE/metainfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HBASE/metainfo.xml
new file mode 100644
index 0000000..48123f0
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HBASE/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HBASE</name>
+      <extends>common-services/HBASE/1.0</extends>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d7b25eec/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/global.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/global.xml
new file mode 100644
index 0000000..bcab577
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/global.xml
@@ -0,0 +1,145 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>namenode_host</name>
+    <value></value>
+    <description>NameNode Host.</description>
+  </property>
+  <property>
+    <name>dfs_name_dir</name>
+    <value>/hadoop/hdfs/namenode</value>
+    <description>NameNode Directories.</description>
+  </property>
+  <property>
+    <name>snamenode_host</name>
+    <value></value>
+    <description>Secondary NameNode.</description>
+  </property>
+  <property>
+    <name>fs_checkpoint_dir</name>
+    <value>/hadoop/hdfs/namesecondary</value>
+    <description>Secondary NameNode checkpoint dir.</description>
+  </property>
+  <property>
+    <name>datanode_hosts</name>
+    <value></value>
+    <description>List of Datanode Hosts.</description>
+  </property>
+  <property>
+    <name>dfs_data_dir</name>
+    <value>/hadoop/hdfs/data</value>
+    <description>Data directories for Data Nodes.</description>
+  </property>
+  <property>
+    <name>hdfs_log_dir_prefix</name>
+    <value>/var/log/hadoop</value>
+    <description>Hadoop Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_pid_dir_prefix</name>
+    <value>/var/run/hadoop</value>
+    <description>Hadoop PID Dir Prefix</description>
+  </property>
+  <property>
+    <name>dfs_webhdfs_enabled</name>
+    <value>true</value>
+    <description>WebHDFS enabled</description>
+  </property>
+  <property>
+    <name>hadoop_heapsize</name>
+    <value>1024</value>
+    <description>Hadoop maximum Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_heapsize</name>
+    <value>1024</value>
+    <description>NameNode Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_opt_newsize</name>
+    <value>200</value>
+    <description>Default size of Java new generation for NameNode (Java option -XX:NewSize) Note: The value of NameNode new generation size (default size of Java new generation for NameNode (Java option -XX:NewSize)) should be 1/8 of maximum heap size (-Xmx). Ensure that the value of the namenode_opt_newsize property is 1/8 the value of maximum heap size (-Xmx).</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxnewsize</name>
+    <value>640</value>
+    <description>NameNode maximum new generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_permsize</name>
+    <value>128</value>
+    <description>NameNode permanent generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxpermsize</name>
+    <value>256</value>
+    <description>NameNode maximum permanent generation size</description>
+  </property>
+  <property>
+    <name>datanode_du_reserved</name>
+    <value>1</value>
+    <description>Reserved space for HDFS</description>
+  </property>
+  <property>
+    <name>dtnode_heapsize</name>
+    <value>1024</value>
+    <description>DataNode maximum Java heap size</description>
+  </property>
+  <property>
+    <name>dfs_datanode_failed_volume_tolerated</name>
+    <value>0</value>
+    <description>DataNode volumes failure toleration</description>
+  </property>
+  <property>
+    <name>fs_checkpoint_period</name>
+    <value>21600</value>
+    <description>HDFS Maximum Checkpoint Delay</description>
+  </property>
+  <property>
+    <name>fs_checkpoint_size</name>
+    <value>0.5</value>
+    <description>FS Checkpoint Size.</description>
+  </property>
+  <property>
+    <name>security_enabled</name>
+    <value>false</value>
+    <description>Hadoop Security</description>
+  </property>
+  <property>
+    <name>kerberos_domain</name>
+    <value>EXAMPLE.COM</value>
+    <description>Kerberos realm.</description>
+  </property>
+  <property>
+    <name>kerberos_domain</name>
+    <value>EXAMPLE.COM</value>
+    <description>Kerberos realm.</description>
+  </property>
+  <property>
+    <name>keytab_path</name>
+    <value>/etc/security/keytabs</value>
+    <description>KeyTab Directory.</description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d7b25eec/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hadoop-env.xml
new file mode 100644
index 0000000..8fb8c7f
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hadoop-env.xml
@@ -0,0 +1,223 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>hdfs_log_dir_prefix</name>
+    <value>/var/log/hadoop</value>
+    <description>Hadoop Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_pid_dir_prefix</name>
+    <value>/var/run/hadoop</value>
+    <description>Hadoop PID Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_heapsize</name>
+    <value>1024</value>
+    <description>Hadoop maximum Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_heapsize</name>
+    <value>1024</value>
+    <description>NameNode Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_opt_newsize</name>
+    <value>200</value>
+    <description>Default size of Java new generation for NameNode (Java option -XX:NewSize) Note: The value of NameNode new generation size (default size of Java new generation for NameNode (Java option -XX:NewSize)) should be 1/8 of maximum heap size (-Xmx). Ensure that the value of the namenode_opt_newsize property is 1/8 the value of maximum heap size (-Xmx).</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxnewsize</name>
+    <value>200</value>
+    <description>NameNode maximum new generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_permsize</name>
+    <value>128</value>
+    <description>NameNode permanent generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxpermsize</name>
+    <value>256</value>
+    <description>NameNode maximum permanent generation size</description>
+  </property>
+  <property>
+    <name>dtnode_heapsize</name>
+    <value>1024</value>
+    <description>DataNode maximum Java heap size</description>
+  </property>
+  <property>
+    <name>proxyuser_group</name>
+    <value>users</value>
+    <description>Proxy user group.</description>
+  </property>
+  <property>
+    <name>security_enabled</name>
+    <value>false</value>
+    <description>Hadoop Security</description>
+  </property>
+  <property>
+    <name>kerberos_domain</name>
+    <value>EXAMPLE.COM</value>
+    <description>Kerberos realm.</description>
+  </property>
+  <property>
+    <name>hdfs_user</name>
+    <value>hdfs</value>
+    <description>User and Groups.</description>
+  </property>
+  <property>
+    <name>ignore_groupsusers_create</name>
+    <value>false</value>
+    <description>Whether to ignores failures on users and group creation</description>
+  </property>
+  <property>
+    <name>smokeuser</name>
+    <value>ambari-qa</value>
+    <description>User executing service checks</description>
+  </property>
+  <property>
+    <name>user_group</name>
+    <value>hadoop</value>
+    <description>Proxy user group.</description>
+  </property>
+
+  <!-- hadoop-env.sh -->
+  <property>
+    <name>content</name>
+    <description>hadoop-env.sh content</description>
+    <value>
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.  Required.
+export JAVA_HOME={{java_home}}
+export HADOOP_HOME_WARN_SUPPRESS=1
+
+# Hadoop home directory
+export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
+
+# Hadoop Configuration Directory
+#TODO: if env var set that can cause problems
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
+
+{# this is different for HDP1 #}
+# Path to jsvc required by secure HDP 2.0 datanode
+export JSVC_HOME={{jsvc_path}}
+
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+
+HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
+HADOOP_DATANODE_OPTS="-XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -XX:ConcGCThreads=4 -XX:+UseConcMarkSweepGC -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+# On secure datanodes, user to run the datanode as after dropping privileges
+export HADOOP_SECURE_DN_USER={{hdfs_user}}
+
+# Extra ssh options.  Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
+
+# History server logs
+export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from.  Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
+export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# History server pid
+export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
+
+YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes.  See 'man nice'.
+
+# export HADOOP_NICENESS=10
+
+# Use libraries from standard classpath
+JAVA_JDBC_LIBS=""
+#Add libraries required by mysql connector
+for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by oracle connector
+for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by nodemanager
+MAPREDUCE_LIBS={{mapreduce_libs_path}}
+export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
+
+if [ -d "/usr/lib/tez" ]; then
+  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf
+fi
+
+# Setting path to hdfs command line
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+
+#Mostly required for hadoop 2.0
+export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d7b25eec/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hbase-site.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hbase-site.xml
new file mode 100644
index 0000000..5024e85
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hbase-site.xml
@@ -0,0 +1,137 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>hbase.regionserver.msginterval</name>
+    <value>1000</value>
+    <description>Interval between messages from the RegionServer to HMaster
+    in milliseconds.  Default is 15. Set this value low if you want unit
+    tests to be responsive.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.pause</name>
+    <value>5000</value>
+    <description>General client pause value.  Used mostly as value to wait
+    before running a retry of a failed get, region lookup, etc.</description>
+  </property>
+  <property>
+    <name>hbase.master.meta.thread.rescanfrequency</name>
+    <value>10000</value>
+    <description>How long the HMaster sleeps (in milliseconds) between scans of
+    the root and meta tables.
+    </description>
+  </property>
+  <property>
+    <name>hbase.server.thread.wakefrequency</name>
+    <value>1000</value>
+    <description>Time to sleep in between searches for work (in milliseconds).
+    Used as sleep interval by service threads such as META scanner and log roller.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.handler.count</name>
+    <value>5</value>
+    <description>Count of RPC Server instances spun up on RegionServers
+    Same property is used by the HMaster for count of master handlers.
+    Default is 10.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.lease.period</name>
+    <value>6000</value>
+    <description>Length of time the master will wait before timing out a region
+    server lease. Since region servers report in every second (see above), this
+    value has been reduced so that the master will notice a dead region server
+    sooner. The default is 30 seconds.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.info.port</name>
+    <value>-1</value>
+    <description>The port for the hbase master web UI
+    Set to -1 if you do not want the info server to run.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port</name>
+    <value>-1</value>
+    <description>The port for the hbase regionserver web UI
+    Set to -1 if you do not want the info server to run.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port.auto</name>
+    <value>true</value>
+    <description>Info server auto port bind. Enables automatic port
+    search if hbase.regionserver.info.port is already in use.
+    Enabled for testing to run multiple tests on one machine.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.lease.thread.wakefrequency</name>
+    <value>3000</value>
+    <description>The interval between checks for expired region server leases.
+    This value has been reduced due to the other reduced values above so that
+    the master will notice a dead region server sooner. The default is 15 seconds.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.optionalcacheflushinterval</name>
+    <value>10000</value>
+    <description>
+    Amount of time to wait since the last time a region was flushed before
+    invoking an optional cache flush. Default 60,000.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.safemode</name>
+    <value>false</value>
+    <description>
+    Turn on/off safe mode in region server. Always on for production, always off
+    for tests.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.max.filesize</name>
+    <value>67108864</value>
+    <description>
+    Maximum desired file size for an HRegion.  If filesize exceeds
+    value + (value / 2), the HRegion is split in two.  Default: 256M.
+
+    Keep the maximum filesize small so we split more often in tests.
+    </description>
+  </property>
+  <property>
+    <name>hadoop.log.dir</name>
+    <value>${user.dir}/../logs</value>
+  </property>
+  <property>
+    <name>hbase.zookeeper.property.clientPort</name>
+    <value>21818</value>
+    <description>Property from ZooKeeper's config zoo.cfg.
+    The port at which the clients will connect.
+    </description>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d7b25eec/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hdfs-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hdfs-log4j.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hdfs-log4j.xml
new file mode 100644
index 0000000..649472d
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hdfs-log4j.xml
@@ -0,0 +1,199 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <value>
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+#Security audit appender
+#
+hadoop.security.logger=INFO,console
+hadoop.security.log.maxfilesize=256MB
+hadoop.security.log.maxbackupindex=20
+log4j.category.SecurityLogger=${hadoop.security.logger}
+hadoop.security.log.file=SecurityAuth.audit
+log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
+
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
+
+#
+# hdfs audit logging
+#
+hdfs.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
+log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
+log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# mapred audit logging
+#
+mapred.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
+log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
+log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
+log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# Rolling File Appender
+#
+
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Logfile size and and 30-day backups
+log4j.appender.RFA.MaxFileSize=256MB
+log4j.appender.RFA.MaxBackupIndex=10
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+# Custom Logging levels
+
+hadoop.metrics.log.level=INFO
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Null Appender
+# Trap security logger on the hadoop client side
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+
+# Removes "deprecated" messages
+log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d7b25eec/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hdfs-site.xml
new file mode 100644
index 0000000..2b979d7
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/configuration/hdfs-site.xml
@@ -0,0 +1,396 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+<!-- file system properties -->
+
+  <property>
+    <name>dfs.name.dir</name>
+    <!-- cluster variant -->
+    <value>/mnt/hmc/hadoop/hdfs/namenode</value>
+    <description>Determines where on the local filesystem the DFS name node
+      should store the name table.  If this is a comma-delimited list
+      of directories then the name table is replicated in all of the
+      directories, for redundancy. </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.support.append</name>
+    <value>true</value>
+    <description>to enable dfs append</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.webhdfs.enabled</name>
+    <value>false</value>
+    <description>to enable webhdfs</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.datanode.failed.volumes.tolerated</name>
+    <value>0</value>
+    <description>#of failed disks dn would tolerate</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.block.local-path-access.user</name>
+    <value>hbase</value>
+    <description>the user who is allowed to perform short
+    circuit reads.
+    </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.data.dir</name>
+    <value>/mnt/hmc/hadoop/hdfs/data</value>
+    <description>Determines where on the local filesystem an DFS data node
+  should store its blocks.  If this is a comma-delimited
+  list of directories, then data will be stored in all named
+  directories, typically on different devices.
+  Directories that do not exist are ignored.
+  </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.hosts.exclude</name>
+    <value>/etc/hadoop/conf/dfs.exclude</value>
+    <description>Names a file that contains a list of hosts that are
+    not permitted to connect to the namenode.  The full pathname of the
+    file must be specified.  If the value is empty, no hosts are
+    excluded.</description>
+  </property>
+
+  <property>
+    <name>dfs.hosts</name>
+    <value>/etc/hadoop/conf/dfs.include</value>
+    <description>Names a file that contains a list of hosts that are
+    permitted to connect to the namenode. The full pathname of the file
+    must be specified.  If the value is empty, all hosts are
+    permitted.</description>
+  </property>
+
+  <property>
+    <name>dfs.replication.max</name>
+    <value>50</value>
+    <description>Maximal block replication.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.replication</name>
+    <value>3</value>
+    <description>Default block replication.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.heartbeat.interval</name>
+    <value>3</value>
+    <description>Determines datanode heartbeat interval in seconds.</description>
+  </property>
+
+  <property>
+    <name>dfs.safemode.threshold.pct</name>
+    <value>1.0f</value>
+    <description>
+        Specifies the percentage of blocks that should satisfy
+        the minimal replication requirement defined by dfs.replication.min.
+        Values less than or equal to 0 mean not to start in safe mode.
+        Values greater than 1 will make safe mode permanent.
+        </description>
+  </property>
+
+  <property>
+    <name>dfs.balance.bandwidthPerSec</name>
+    <value>6250000</value>
+    <description>
+        Specifies the maximum amount of bandwidth that each datanode
+        can utilize for the balancing purpose in term of
+        the number of bytes per second.
+  </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.address</name>
+    <value>0.0.0.0:50010</value>
+  </property>
+
+  <property>
+    <name>dfs.datanode.http.address</name>
+    <value>0.0.0.0:50075</value>
+  </property>
+
+  <property>
+    <name>dfs.block.size</name>
+    <value>134217728</value>
+    <description>The default block size for new files.</description>
+  </property>
+
+  <property>
+    <name>dfs.http.address</name>
+    <value>hdp1.cybervisiontech.com.ua:50070</value>
+<description>The name of the default file system.  Either the
+literal string "local" or a host:port for HDFS.</description>
+<final>true</final>
+</property>
+
+<property>
+<name>dfs.datanode.du.reserved</name>
+<!-- cluster variant -->
+<value>1073741824</value>
+<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+</description>
+</property>
+
+<property>
+<name>dfs.datanode.ipc.address</name>
+<value>0.0.0.0:8010</value>
+<description>
+The datanode ipc server address and port.
+If the port is 0 then the server will start on a free port.
+</description>
+</property>
+
+<property>
+<name>dfs.blockreport.initialDelay</name>
+<value>120</value>
+<description>Delay for first block report in seconds.</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>40</value>
+<description>The number of server threads for the namenode.</description>
+</property>
+
+<property>
+<name>dfs.datanode.max.xcievers</name>
+<value>1024</value>
+<description>PRIVATE CONFIG VARIABLE</description>
+</property>
+
+<!-- Permissions configuration -->
+
+<property>
+<name>dfs.umaskmode</name>
+<value>077</value>
+<description>
+The octal umask used when creating files and directories.
+</description>
+</property>
+
+<property>
+<name>dfs.web.ugi</name>
+<!-- cluster variant -->
+<value>gopher,gopher</value>
+<description>The user account used by the web interface.
+Syntax: USERNAME,GROUP1,GROUP2, ...
+</description>
+</property>
+
+<property>
+<name>dfs.permissions</name>
+<value>true</value>
+<description>
+If "true", enable permission checking in HDFS.
+If "false", permission checking is turned off,
+but all other behavior is unchanged.
+Switching from one parameter value to the other does not change the mode,
+owner or group of files or directories.
+</description>
+</property>
+
+<property>
+<name>dfs.permissions.supergroup</name>
+<value>hdfs</value>
+<description>The name of the group of super-users.</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>100</value>
+<description>Added to grow Queue size so that more client connections are allowed</description>
+</property>
+
+<property>
+<name>ipc.server.max.response.size</name>
+<value>5242880</value>
+</property>
+<property>
+<name>dfs.block.access.token.enable</name>
+<value>true</value>
+<description>
+If "true", access tokens are used as capabilities for accessing datanodes.
+If "false", no access tokens are checked on accessing datanodes.
+</description>
+</property>
+
+<property>
+<name>dfs.namenode.kerberos.principal</name>
+<value>nn/_HOST@</value>
+<description>
+Kerberos principal name for the NameNode
+</description>
+</property>
+
+<property>
+<name>dfs.secondary.namenode.kerberos.principal</name>
+<value>nn/_HOST@</value>
+    <description>
+        Kerberos principal name for the secondary NameNode.
+    </description>
+  </property>
+
+
+<!--
+  This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
+-->
+  <property>
+    <name>dfs.namenode.kerberos.https.principal</name>
+    <value>host/_HOST@</value>
+     <description>The Kerberos principal for the host that the NameNode runs on.</description>
+
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.kerberos.https.principal</name>
+    <value>host/_HOST@</value>
+    <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
+
+  </property>
+
+  <property>
+    <!-- cluster variant -->
+    <name>dfs.secondary.http.address</name>
+    <value>hdp2.cybervisiontech.com.ua:50090</value>
+    <description>Address of secondary namenode web server</description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.https.port</name>
+    <value>50490</value>
+    <description>The https port where secondary-namenode binds</description>
+  </property>
+
+  <property>
+    <name>dfs.web.authentication.kerberos.principal</name>
+    <value>HTTP/_HOST@</value>
+    <description>
+      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
+      HTTP SPENGO specification.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.web.authentication.kerberos.keytab</name>
+    <value>/nn.service.keytab</value>
+    <description>
+      The Kerberos keytab file with the credentials for the
+      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.kerberos.principal</name>
+    <value>dn/_HOST@</value>
+ <description>
+        The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.keytab.file</name>
+    <value>/nn.service.keytab</value>
+ <description>
+        Combined keytab file containing the namenode service and host principals.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.keytab.file</name>
+    <value>/nn.service.keytab</value>
+  <description>
+        Combined keytab file containing the namenode service and host principals.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.keytab.file</name>
+    <value>/dn.service.keytab</value>
+ <description>
+        The filename of the keytab file for the DataNode.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.https.port</name>
+    <value>50470</value>
+ <description>The https port where namenode binds</description>
+
+  </property>
+
+  <property>
+    <name>dfs.https.address</name>
+    <value>hdp1.cybervisiontech.com.ua:50470</value>
+  <description>The https address where namenode binds</description>
+
+  </property>
+
+  <property>
+    <name>dfs.datanode.data.dir.perm</name>
+    <value>750</value>
+<description>The permissions that should be there on dfs.data.dir
+directories. The datanode will not come up if the permissions are
+different on existing dfs.data.dir directories. If the directories
+don't exist, they will be created with this permission.</description>
+  </property>
+
+  <property>
+  <name>dfs.access.time.precision</name>
+  <value>0</value>
+  <description>The access time for HDFS file is precise upto this value.
+               The default value is 1 hour. Setting a value of 0 disables
+               access times for HDFS.
+  </description>
+</property>
+
+<property>
+ <name>dfs.cluster.administrators</name>
+ <value> hdfs</value>
+ <description>ACL for who all can view the default servlets in the HDFS</description>
+</property>
+
+<property>
+  <name>ipc.server.read.threadpool.size</name>
+  <value>5</value>
+  <description></description>
+</property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d7b25eec/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/metainfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/metainfo.xml
new file mode 100644
index 0000000..da61660
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/metainfo.xml
@@ -0,0 +1,30 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HDFS</name>
+      <extends>common-services/HDFS/1.0</extends>
+      <configuration-dependencies>
+        <config-type>core-site</config-type>
+        <config-type>global</config-type>
+        <config-type>hdfs-site</config-type>
+        <config-type>hadoop-policy</config-type>
+        <config-type>hdfs-log4j</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d7b25eec/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/package/dummy-script.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/package/dummy-script.py b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/package/dummy-script.py
new file mode 100644
index 0000000..35de4bb
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HDFS/package/dummy-script.py
@@ -0,0 +1,20 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/d7b25eec/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HIVE/metainfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HIVE/metainfo.xml
new file mode 100644
index 0000000..9c122b2
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/HIVE/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HIVE</name>
+      <extends>common-services/HIVE/1.0</extends>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d7b25eec/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/MAPREDUCE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/MAPREDUCE/metainfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/MAPREDUCE/metainfo.xml
new file mode 100644
index 0000000..3b0b3d9
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/MAPREDUCE/metainfo.xml
@@ -0,0 +1,23 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>MAPREDUCE</name>
+      <extends>common-services/MAPREDUCE/1.0</extends>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/d7b25eec/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/ZOOKEEPER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/ZOOKEEPER/metainfo.xml
new file mode 100644
index 0000000..9c8a299
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.4/services/ZOOKEEPER/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ZOOKEEPER</name>
+      <extends>common-services/ZOOKEEPER/1.0</extends>
+    </service>
+  </services>
+</metainfo>


[09/16] ambari git commit: AMBARI-22513 Make yumrpm.py functions to use global defined commands (dgrinenko)

Posted by ao...@apache.org.
AMBARI-22513 Make yumrpm.py functions to use global defined commands (dgrinenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/54bc2a2e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/54bc2a2e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/54bc2a2e

Branch: refs/heads/branch-feature-AMBARI-22457
Commit: 54bc2a2e38a022356f1c405f755ac1b000450b3a
Parents: c57e243
Author: Dmytro Grinenko <ha...@apache.org>
Authored: Fri Nov 24 15:55:20 2017 +0200
Committer: Dmytro Grinenko <ha...@apache.org>
Committed: Fri Nov 24 15:55:20 2017 +0200

----------------------------------------------------------------------
 .../core/providers/package/yumrpm.py                    |  4 ++--
 .../HIVE/0.12.0.2.0/package/scripts/hive.py             | 12 ++++++------
 .../test/python/stacks/2.0.6/HIVE/test_hive_server.py   |  2 +-
 3 files changed, 9 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/54bc2a2e/ambari-common/src/main/python/resource_management/core/providers/package/yumrpm.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/core/providers/package/yumrpm.py b/ambari-common/src/main/python/resource_management/core/providers/package/yumrpm.py
index 24f03f7..fdf1743 100644
--- a/ambari-common/src/main/python/resource_management/core/providers/package/yumrpm.py
+++ b/ambari-common/src/main/python/resource_management/core/providers/package/yumrpm.py
@@ -138,7 +138,7 @@ class YumProvider(RPMBasedPackageProvider):
     :rtype list[list,]
     """
 
-    cmd = [AMBARI_SUDO_BINARY, "yum", "list", "available"]
+    cmd = list(ALL_AVAILABLE_PACKAGES_CMD)
 
     if repo_filter:
       cmd.extend(["--disablerepo=*", "--enablerepo=" + repo_filter])
@@ -154,7 +154,7 @@ class YumProvider(RPMBasedPackageProvider):
     :rtype list[list,]
     """
 
-    packages = self._lookup_packages([AMBARI_SUDO_BINARY, "yum", "list", "installed"], "Installed Packages")
+    packages = self._lookup_packages(list(ALL_INSTALLED_PACKAGES_CMD), "Installed Packages")
     if repo_filter:
       packages = [item for item in packages if item[2].lower() == repo_filter.lower()]
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/54bc2a2e/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
index abbe59e..a02f951 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
@@ -24,7 +24,7 @@ from urlparse import urlparse
 
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
-from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
+from resource_management.libraries.functions import copy_tarball
 from resource_management.libraries.functions import StackFeature
 from resource_management.libraries.functions.stack_features import check_stack_feature
 from resource_management.core.resources.service import ServiceConfig
@@ -138,19 +138,19 @@ def hive(name=None):
     # *********************************
     #  if copy tarball to HDFS feature  supported copy mapreduce.tar.gz and tez.tar.gz to HDFS
     if params.stack_version_formatted_major and check_stack_feature(StackFeature.COPY_TARBALL_TO_HDFS, params.stack_version_formatted_major):
-      copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs)
-      copy_to_hdfs("tez", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs)
+      copy_tarball.copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs)
+      copy_tarball.copy_to_hdfs("tez", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs)
 
     # Always copy pig.tar.gz and hive.tar.gz using the appropriate mode.
     # This can use a different source and dest location to account
-    copy_to_hdfs("pig",
+    copy_tarball.copy_to_hdfs("pig",
                  params.user_group,
                  params.hdfs_user,
                  file_mode=params.tarballs_mode,
                  custom_source_file=params.pig_tar_source,
                  custom_dest_file=params.pig_tar_dest_file,
                  skip=params.sysprep_skip_copy_tarballs_hdfs)
-    copy_to_hdfs("hive",
+    copy_tarball.copy_to_hdfs("hive",
                  params.user_group,
                  params.hdfs_user,
                  file_mode=params.tarballs_mode,
@@ -171,7 +171,7 @@ def hive(name=None):
         src_filename = os.path.basename(source_file)
         dest_file = os.path.join(dest_dir, src_filename)
 
-        copy_to_hdfs(tarball_name,
+        copy_tarball.copy_to_hdfs(tarball_name,
                      params.user_group,
                      params.hdfs_user,
                      file_mode=params.tarballs_mode,

http://git-wip-us.apache.org/repos/asf/ambari/blob/54bc2a2e/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
index fa230cb..dca7c18 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
@@ -781,12 +781,12 @@ class TestHiveServer(RMFTestCase):
   @patch("os.path.exists", new = MagicMock(return_value=True))
   @patch("platform.linux_distribution", new = MagicMock(return_value="Linux"))
   def test_stop_during_upgrade(self, copy_to_hdfs_mock):
-
     hiveServerVersionOutput = """WARNING: Use "yarn jar" to launch YARN applications.
 Hive 1.2.1.2.3.0.0-2434
 Subversion git://ip-10-0-0-90.ec2.internal/grid/0/jenkins/workspace/HDP-dal-centos6/bigtop/build/hive/rpm/BUILD/hive-1.2.1.2.3.0.0 -r a77a00ae765a73b2957337e96ed5a0dbb2e60dfb
 Compiled by jenkins on Sat Jun 20 11:50:41 EDT 2015
 From source with checksum 150f554beae04f76f814f59549dead8b"""
+
     call_side_effects = [(0, hiveServerVersionOutput), (0, hiveServerVersionOutput)] * 4
     copy_to_hdfs_mock.return_value = True
 


[12/16] ambari git commit: AMBARI-20891 - Allow extensions to auto-link with supported stack versions

Posted by ao...@apache.org.
AMBARI-20891 - Allow extensions to auto-link with supported stack versions

Conflicts:
	ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
	ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExtensionLinkDAO.java
	ambari-server/src/main/java/org/apache/ambari/server/stack/StackManager.java
	ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7c56924a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7c56924a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7c56924a

Branch: refs/heads/branch-feature-AMBARI-22457
Commit: 7c56924a952358127dd80eef7e8c9dfeac0aa8b0
Parents: 0e2e711
Author: Tim Thorpe <tt...@apache.org>
Authored: Tue May 9 07:53:39 2017 -0700
Committer: Tim Thorpe <tt...@apache.org>
Committed: Fri Nov 24 12:00:57 2017 -0800

----------------------------------------------------------------------
 .../AmbariManagementControllerImpl.java         |  68 +---
 .../controller/AmbariManagementHelper.java      | 175 ++++++++
 .../ambari/server/orm/dao/ExtensionLinkDAO.java |  36 +-
 .../orm/entities/ExtensionLinkEntity.java       |   1 +
 .../ambari/server/stack/ExtensionModule.java    |   2 +
 .../ambari/server/stack/StackManager.java       |  81 +++-
 .../apache/ambari/server/stack/StackModule.java |   4 +-
 .../ambari/server/state/ExtensionInfo.java      |  26 +-
 .../apache/ambari/server/state/StackInfo.java   |  27 +-
 .../state/stack/ExtensionMetainfoXml.java       |  11 +
 .../stack/StackManagerCommonServicesTest.java   |   4 +-
 .../server/stack/StackManagerExtensionTest.java |  79 ++--
 .../server/stack/StackManagerMiscTest.java      |  13 +-
 .../ambari/server/stack/StackManagerMock.java   |   5 +-
 .../ambari/server/stack/StackManagerTest.java   |  13 +-
 .../resources/extensions/EXT/0.1/metainfo.xml   |   2 +-
 .../resources/extensions/EXT/0.2/metainfo.xml   |   3 +-
 .../resources/extensions/EXT/0.3/metainfo.xml   |  32 ++
 .../EXT/0.3/services/OOZIE2/metainfo.xml        | 118 ++++++
 .../services/OOZIE2/themes/broken_theme.json    |   3 +
 .../stacks_with_extensions/HDP/0.3/metainfo.xml |  22 ++
 .../HDP/0.3/repos/repoinfo.xml                  |  63 +++
 .../HDP/0.3/services/HBASE/metainfo.xml         |  26 ++
 .../0.3/services/HDFS/configuration/global.xml  | 145 +++++++
 .../services/HDFS/configuration/hadoop-env.xml  | 223 +++++++++++
 .../services/HDFS/configuration/hbase-site.xml  | 137 +++++++
 .../services/HDFS/configuration/hdfs-log4j.xml  | 199 ++++++++++
 .../services/HDFS/configuration/hdfs-site.xml   | 396 +++++++++++++++++++
 .../HDP/0.3/services/HDFS/metainfo.xml          |  30 ++
 .../0.3/services/HDFS/package/dummy-script.py   |  20 +
 .../HDP/0.3/services/HIVE/metainfo.xml          |  26 ++
 .../HDP/0.3/services/MAPREDUCE/metainfo.xml     |  23 ++
 .../HDP/0.3/services/ZOOKEEPER/metainfo.xml     |  26 ++
 33 files changed, 1921 insertions(+), 118 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index a6315f8..585ee46 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -122,14 +122,12 @@ import org.apache.ambari.server.orm.dao.StackDAO;
 import org.apache.ambari.server.orm.dao.WidgetDAO;
 import org.apache.ambari.server.orm.dao.WidgetLayoutDAO;
 import org.apache.ambari.server.orm.entities.ClusterEntity;
-import org.apache.ambari.server.orm.entities.ExtensionEntity;
 import org.apache.ambari.server.orm.entities.ExtensionLinkEntity;
 import org.apache.ambari.server.orm.entities.HostEntity;
 import org.apache.ambari.server.orm.entities.OperatingSystemEntity;
 import org.apache.ambari.server.orm.entities.RepositoryEntity;
 import org.apache.ambari.server.orm.entities.RepositoryVersionEntity;
 import org.apache.ambari.server.orm.entities.SettingEntity;
-import org.apache.ambari.server.orm.entities.StackEntity;
 import org.apache.ambari.server.orm.entities.WidgetEntity;
 import org.apache.ambari.server.orm.entities.WidgetLayoutEntity;
 import org.apache.ambari.server.orm.entities.WidgetLayoutUserWidgetEntity;
@@ -313,11 +311,13 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
 
   private MaintenanceStateHelper maintenanceStateHelper;
 
-  @Inject
-  private ExtensionLinkDAO linkDAO;
+  private AmbariManagementHelper helper;
+
   @Inject
   private ExtensionDAO extensionDAO;
   @Inject
+  private ExtensionLinkDAO linkDAO;
+  @Inject
   private StackDAO stackDAO;
   @Inject
   protected OsFamily osFamily;
@@ -401,6 +401,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
       mysqljdbcUrl = null;
       serverDB = null;
     }
+    helper = new AmbariManagementHelper(stackDAO, extensionDAO, linkDAO);
   }
 
   @Override
@@ -1825,7 +1826,6 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
       cluster.setCurrentStackVersion(desiredVersion);
     }
     // Stack Upgrade: unlike the workflow for creating a cluster, updating a cluster via the API will not
-    // create any ClusterVersionEntity changes because those have to go through the Stack Upgrade process.
 
     boolean requiresHostListUpdate =
         request.getHostNames() != null && !request.getHostNames().isEmpty();
@@ -5632,7 +5632,13 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
    */
   @Override
   public void createExtensionLink(ExtensionLinkRequest request) throws AmbariException {
-    validateCreateExtensionLinkRequest(request);
+    if (StringUtils.isBlank(request.getStackName())
+            || StringUtils.isBlank(request.getStackVersion())
+            || StringUtils.isBlank(request.getExtensionName())
+            || StringUtils.isBlank(request.getExtensionVersion())) {
+
+      throw new IllegalArgumentException("Stack name, stack version, extension name and extension version should be provided");
+    }
 
     StackInfo stackInfo = ambariMetaInfo.getStack(request.getStackName(), request.getStackVersion());
 
@@ -5646,24 +5652,7 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
       throw new StackAccessException("extensionName=" + request.getExtensionName() + ", extensionVersion=" + request.getExtensionVersion());
     }
 
-    ExtensionHelper.validateCreateLink(stackInfo, extensionInfo);
-    ExtensionLinkEntity linkEntity = createExtensionLinkEntity(request);
-    ambariMetaInfo.getStackManager().linkStackToExtension(stackInfo, extensionInfo);
-
-    try {
-      linkDAO.create(linkEntity);
-      linkEntity = linkDAO.merge(linkEntity);
-    } catch (RollbackException e) {
-      String message = "Unable to create extension link";
-      LOG.debug(message, e);
-      String errorMessage = message
-              + ", stackName=" + request.getStackName()
-              + ", stackVersion=" + request.getStackVersion()
-              + ", extensionName=" + request.getExtensionName()
-              + ", extensionVersion=" + request.getExtensionVersion();
-      LOG.warn(errorMessage);
-      throw new AmbariException(errorMessage, e);
-    }
+    helper.createExtensionLink(ambariMetaInfo.getStackManager(), stackInfo, extensionInfo);
   }
 
   /**
@@ -5714,37 +5703,6 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
     ambariMetaInfo.getStackManager().linkStackToExtension(stackInfo, extensionInfo);
   }
 
-  private void validateCreateExtensionLinkRequest(ExtensionLinkRequest request) throws AmbariException {
-    if (request.getStackName() == null
-            || request.getStackVersion() == null
-            || request.getExtensionName() == null
-            || request.getExtensionVersion() == null) {
-
-      throw new IllegalArgumentException("Stack name, stack version, extension name and extension version should be provided");
-    }
-
-    ExtensionLinkEntity entity = linkDAO.findByStackAndExtension(request.getStackName(), request.getStackVersion(),
-            request.getExtensionName(), request.getExtensionVersion());
-
-    if (entity != null) {
-      throw new AmbariException("The stack and extension are already linked"
-                + ", stackName=" + request.getStackName()
-                + ", stackVersion=" + request.getStackVersion()
-                + ", extensionName=" + request.getExtensionName()
-                + ", extensionVersion=" + request.getExtensionVersion());
-    }
-  }
-
-  private ExtensionLinkEntity createExtensionLinkEntity(ExtensionLinkRequest request) throws AmbariException {
-    StackEntity stack = stackDAO.find(request.getStackName(), request.getStackVersion());
-    ExtensionEntity extension = extensionDAO.find(request.getExtensionName(), request.getExtensionVersion());
-
-    ExtensionLinkEntity linkEntity = new ExtensionLinkEntity();
-    linkEntity.setStack(stack);
-    linkEntity.setExtension(extension);
-    return linkEntity;
-  }
-
   @Override
   public QuickLinkVisibilityController getQuicklinkVisibilityController() {
     SettingEntity entity = settingDAO.findByName(QuickLinksProfile.SETTING_NAME_QUICKLINKS_PROFILE);

http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementHelper.java
new file mode 100644
index 0000000..2dd6f12
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementHelper.java
@@ -0,0 +1,175 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+
+import javax.persistence.RollbackException;
+
+import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.orm.dao.ExtensionDAO;
+import org.apache.ambari.server.orm.dao.ExtensionLinkDAO;
+import org.apache.ambari.server.orm.dao.StackDAO;
+import org.apache.ambari.server.orm.entities.ExtensionEntity;
+import org.apache.ambari.server.orm.entities.ExtensionLinkEntity;
+import org.apache.ambari.server.orm.entities.StackEntity;
+import org.apache.ambari.server.stack.ExtensionHelper;
+import org.apache.ambari.server.stack.StackManager;
+import org.apache.ambari.server.state.ExtensionInfo;
+import org.apache.ambari.server.state.StackInfo;
+import org.apache.ambari.server.state.stack.ExtensionMetainfoXml;
+import org.apache.ambari.server.utils.VersionUtils;
+import org.apache.commons.lang.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.inject.Inject;
+import com.google.inject.Singleton;
+
+@Singleton
+public class AmbariManagementHelper {
+
+  private final static Logger LOG =
+      LoggerFactory.getLogger(AmbariManagementHelper.class);
+
+  private ExtensionLinkDAO linkDAO;
+  private ExtensionDAO extensionDAO;
+  private StackDAO stackDAO;
+
+  @Inject
+  public AmbariManagementHelper(StackDAO stackDAO, ExtensionDAO extensionDAO, ExtensionLinkDAO linkDAO) {
+    this.stackDAO = stackDAO;
+    this.extensionDAO = extensionDAO;
+    this.linkDAO = linkDAO;
+  }
+
+  /**
+   * This method will create a link between an extension version and a stack version (Extension Link).
+   *
+   * An extension version is like a stack version but it contains custom services.  Linking an extension
+   * version to the current stack version allows the cluster to install the custom services contained in
+   * the extension version.
+   */
+  public void createExtensionLink(StackManager stackManager, StackInfo stackInfo, ExtensionInfo extensionInfo) throws AmbariException {
+    validateCreateExtensionLinkRequest(stackInfo, extensionInfo);
+    ExtensionHelper.validateCreateLink(stackInfo, extensionInfo);
+    ExtensionLinkEntity linkEntity = createExtensionLinkEntity(stackInfo, extensionInfo);
+    stackManager.linkStackToExtension(stackInfo, extensionInfo);
+
+    try {
+      linkDAO.create(linkEntity);
+      linkEntity = linkDAO.merge(linkEntity);
+    } catch (RollbackException e) {
+      String message = "Unable to create extension link";
+      LOG.debug(message, e);
+      String errorMessage = message
+              + ", stackName=" + stackInfo.getName()
+              + ", stackVersion=" + stackInfo.getVersion()
+              + ", extensionName=" + extensionInfo.getName()
+              + ", extensionVersion=" + extensionInfo.getVersion();
+      LOG.warn(errorMessage);
+      throw new AmbariException(errorMessage, e);
+    }
+  }
+
+  /**
+   * This method will create a link between an extension version and a stack version (Extension Link).
+   *
+   * An extension version is like a stack version but it contains custom services.  Linking an extension
+   * version to the current stack version allows the cluster to install the custom services contained in
+   * the extension version.
+   */
+  public void createExtensionLinks(StackManager stackManager, List<ExtensionInfo> extensions) throws AmbariException {
+    Map<String, List<StackInfo>> stackMap = stackManager.getStacksByName();
+    for (List<StackInfo> stacks : stackMap.values()) {
+      Collections.sort(stacks);
+      Collections.reverse(stacks);
+    }
+
+    Collections.sort(extensions);
+    Collections.reverse(extensions);
+    for (ExtensionInfo extension : extensions) {
+      if (extension.isActive() && extension.isAutoLink()) {
+        LOG.debug("Autolink - looking for matching stack versions for extension:{}/{} ", extension.getName(), extension.getVersion());
+        for (ExtensionMetainfoXml.Stack supportedStack : extension.getStacks()) {
+          List<StackInfo> stacks = stackMap.get(supportedStack.getName());
+          for (StackInfo stack : stacks) {
+            // If the stack version is not currently linked to a version of the extension and it meets the minimum stack version then link them
+            if (stack.getExtension(extension.getName()) == null && VersionUtils.compareVersions(stack.getVersion(), supportedStack.getVersion()) > -1) {
+              LOG.debug("Autolink - extension: {}/{} stack: {}/{}", extension.getName(), extension.getVersion(),
+                       stack.getName(), stack.getVersion());
+              createExtensionLink(stackManager, stack, extension);
+            }
+            else {
+              LOG.debug("Autolink - not a match extension: {}/{} stack: {}/{}", extension.getName(), extension.getVersion(),
+                       stack.getName(), stack.getVersion());
+            }
+          }
+        }
+      }
+      else {
+        LOG.debug("Autolink - skipping extension: {}/{}.  It is either not active or set to autolink.", extension.getName(), extension.getVersion());
+      }
+    }
+  }
+
+  /**
+   * Validates the stackInfo and extensionInfo parameters are valid.
+   * If they are then it confirms that the stack and extension are not already linked.
+   */
+  private void validateCreateExtensionLinkRequest(StackInfo stackInfo, ExtensionInfo extensionInfo) throws AmbariException {
+    if (stackInfo == null) {
+      throw new IllegalArgumentException("Stack should be provided");
+    }
+    if (extensionInfo == null) {
+      throw new IllegalArgumentException("Extension should be provided");
+    }
+    if (StringUtils.isBlank(stackInfo.getName())
+            || StringUtils.isBlank(stackInfo.getVersion())
+            || StringUtils.isBlank(extensionInfo.getName())
+            || StringUtils.isBlank(extensionInfo.getVersion())) {
+
+      throw new IllegalArgumentException("Stack name, stack version, extension name and extension version should be provided");
+    }
+
+    ExtensionLinkEntity entity = linkDAO.findByStackAndExtension(stackInfo.getName(), stackInfo.getVersion(),
+		extensionInfo.getName(), extensionInfo.getVersion());
+
+    if (entity != null) {
+      throw new AmbariException("The stack and extension are already linked"
+                + ", stackName=" + stackInfo.getName()
+                + ", stackVersion=" + stackInfo.getVersion()
+                + ", extensionName=" + extensionInfo.getName()
+                + ", extensionVersion=" + extensionInfo.getVersion());
+    }
+  }
+
+  private ExtensionLinkEntity createExtensionLinkEntity(StackInfo stackInfo, ExtensionInfo extensionInfo) throws AmbariException {
+    StackEntity stack = stackDAO.find(stackInfo.getName(), stackInfo.getVersion());
+    ExtensionEntity extension = extensionDAO.find(extensionInfo.getName(), extensionInfo.getVersion());
+
+    ExtensionLinkEntity linkEntity = new ExtensionLinkEntity();
+    linkEntity.setStack(stack);
+    linkEntity.setExtension(extension);
+    return linkEntity;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExtensionLinkDAO.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExtensionLinkDAO.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExtensionLinkDAO.java
index d90480b..85c5722 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExtensionLinkDAO.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ExtensionLinkDAO.java
@@ -74,16 +74,19 @@ public class ExtensionLinkDAO {
     }
 
     String stackName = request.getStackName();
-    String stackVersion = request.getStackName();
-    String extensionName = request.getStackName();
-    String extensionVersion = request.getStackName();
+    String stackVersion = request.getStackVersion();
+    String extensionName = request.getExtensionName();
+    String extensionVersion = request.getExtensionVersion();
 
     if (stackName != null && stackVersion != null) {
-      if (extensionName != null && extensionVersion != null) {
-        ExtensionLinkEntity entity = findByStackAndExtension(stackName, stackVersion, extensionName, extensionVersion);
-        List<ExtensionLinkEntity> list = new ArrayList<ExtensionLinkEntity>();
-        list.add(entity);
-        return list;
+      if (extensionName != null) {
+        if (extensionVersion != null) {
+          ExtensionLinkEntity entity = findByStackAndExtension(stackName, stackVersion, extensionName, extensionVersion);
+          List<ExtensionLinkEntity> list = new ArrayList<>();
+          list.add(entity);
+          return list;
+        }
+        return findByStackAndExtensionName(stackName, stackVersion, extensionName);
       }
       return findByStack(stackName, stackVersion);
     }
@@ -153,6 +156,23 @@ public class ExtensionLinkDAO {
   }
 
   /**
+   * Gets the extension link that match the specified stack name, stack version and extension name.
+   *
+   * @return the extension link matching the specified stack name, stack version and extension name if any.
+   */
+  @RequiresSession
+  public List<ExtensionLinkEntity> findByStackAndExtensionName(String stackName, String stackVersion, String extensionName) {
+    TypedQuery<ExtensionLinkEntity> query = entityManagerProvider.get().createNamedQuery(
+        "ExtensionLinkEntity.findByStackAndExtensionName", ExtensionLinkEntity.class);
+
+    query.setParameter("stackName", stackName);
+    query.setParameter("stackVersion", stackVersion);
+    query.setParameter("extensionName", extensionName);
+
+    return daoUtils.selectList(query);
+  }
+
+  /**
    * Gets the extension link that match the specified stack name, stack version, extension name and extension version.
    *
    * @return the extension link matching the specified stack name, stack version, extension name and extension version if any.

http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ExtensionLinkEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ExtensionLinkEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ExtensionLinkEntity.java
index 12b3ce0..e2b48bf 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ExtensionLinkEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ExtensionLinkEntity.java
@@ -42,6 +42,7 @@ import javax.persistence.UniqueConstraint;
 @TableGenerator(name = "link_id_generator", table = "ambari_sequences", pkColumnName = "sequence_name", valueColumnName = "sequence_value", pkColumnValue = "link_id_seq", initialValue = 0)
 @NamedQueries({
     @NamedQuery(name = "ExtensionLinkEntity.findAll", query = "SELECT link FROM ExtensionLinkEntity link"),
+    @NamedQuery(name = "ExtensionLinkEntity.findByStackAndExtensionName", query = "SELECT link FROM ExtensionLinkEntity link WHERE link.stack.stackName = :stackName AND link.stack.stackVersion = :stackVersion AND link.extension.extensionName = :extensionName"),
     @NamedQuery(name = "ExtensionLinkEntity.findByStackAndExtension", query = "SELECT link FROM ExtensionLinkEntity link WHERE link.stack.stackName = :stackName AND link.stack.stackVersion = :stackVersion AND link.extension.extensionName = :extensionName AND link.extension.extensionVersion = :extensionVersion"),
     @NamedQuery(name = "ExtensionLinkEntity.findByStack", query = "SELECT link FROM ExtensionLinkEntity link WHERE link.stack.stackName = :stackName AND link.stack.stackVersion = :stackVersion"),
     @NamedQuery(name = "ExtensionLinkEntity.findByExtension", query = "SELECT link FROM ExtensionLinkEntity link WHERE link.extension.extensionName = :extensionName AND link.extension.extensionVersion = :extensionVersion") })

http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionModule.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionModule.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionModule.java
index d425f9a..5c3c60e 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/ExtensionModule.java
@@ -399,6 +399,8 @@ public class ExtensionModule extends BaseModule<ExtensionModule, ExtensionInfo>
       extensionInfo.setParentExtensionVersion(emx.getExtends());
       extensionInfo.setStacks(emx.getStacks());
       extensionInfo.setExtensions(emx.getExtensions());
+      extensionInfo.setActive(emx.getVersion().isActive());
+      extensionInfo.setAutoLink(emx.isAutoLink());
     }
 
     try {

http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackManager.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackManager.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackManager.java
index 749a95e..ff0a016 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackManager.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackManager.java
@@ -20,11 +20,13 @@ package org.apache.ambari.server.stack;
 
 import java.io.File;
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 import javax.annotation.Nullable;
 import javax.xml.XMLConstants;
@@ -35,6 +37,7 @@ import javax.xml.validation.Validator;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.controller.AmbariManagementHelper;
 import org.apache.ambari.server.metadata.ActionMetadata;
 import org.apache.ambari.server.orm.dao.ExtensionDAO;
 import org.apache.ambari.server.orm.dao.ExtensionLinkDAO;
@@ -57,7 +60,6 @@ import org.xml.sax.SAXParseException;
 import com.google.inject.assistedinject.Assisted;
 import com.google.inject.assistedinject.AssistedInject;
 
-
 /**
  * Manages all stack related behavior including parsing of stacks and providing access to
  * stack information.
@@ -110,6 +112,8 @@ public class StackManager {
    */
   private Map<String, ExtensionInfo> extensionMap = new HashMap<String, ExtensionInfo>();
 
+  private AmbariManagementHelper helper;
+
   /**
    * Constructor. Initialize stack manager.
    *
@@ -131,6 +135,8 @@ public class StackManager {
    *          extension DAO automatically injected
    * @param linkDao
    *          extension link DAO automatically injected
+   * @param helper
+   *          Ambari management helper automatically injected
    *
    * @throws AmbariException
    *           if an exception occurs while processing the stacks
@@ -141,7 +147,7 @@ public class StackManager {
       @Assisted("extensionRoot") @Nullable File extensionRoot,
       @Assisted OsFamily osFamily, @Assisted boolean validate,
       MetainfoDAO metaInfoDAO, ActionMetadata actionMetadata, StackDAO stackDao,
-      ExtensionDAO extensionDao, ExtensionLinkDAO linkDao)
+      ExtensionDAO extensionDao, ExtensionLinkDAO linkDao, AmbariManagementHelper helper)
       throws AmbariException {
 
     LOG.info("Initializing the stack manager...");
@@ -154,7 +160,8 @@ public class StackManager {
 
     stackMap = new HashMap<String, StackInfo>();
     stackContext = new StackContext(metaInfoDAO, actionMetadata, osFamily);
-    extensionMap = new HashMap<String, ExtensionInfo>();
+    extensionMap = new HashMap<>();
+    this.helper = helper;
 
     parseDirectories(stackRoot, commonServicesRoot, extensionRoot);
 
@@ -189,6 +196,7 @@ public class StackManager {
     LOG.info("About to parse extension directories");
     extensionModules = parseExtensionDirectory(extensionRoot);
   }
+
   private void populateDB(StackDAO stackDao, ExtensionDAO extensionDao) throws AmbariException {
     // for every stack read in, ensure that we have a database entry for it;
     // don't put try/catch logic around this since a failure here will
@@ -227,6 +235,51 @@ public class StackManager {
         extensionDao.create(extensionEntity);
       }
     }
+
+    createLinks();
+  }
+
+  /**
+   * Attempts to automatically create links between extension versions and stack versions.
+   * This is limited to 'active' extensions that have the 'autolink' attribute set (in the metainfo.xml).
+   * Stack versions are selected based on the minimum stack versions that the extension supports.
+   * The extension and stack versions are processed in order of most recent to oldest.
+   * In this manner, the newest extension version will be autolinked before older extension versions.
+   * If a different version of the same extension is already linked to a stack version then that stack version
+   * will be skipped.
+   */
+  private void createLinks() {
+    LOG.info("Creating links");
+    Collection<ExtensionInfo> extensions = getExtensions();
+    Set<String> names = new HashSet<String>();
+    for(ExtensionInfo extension : extensions){
+      names.add(extension.getName());
+    }
+    for(String name : names) {
+      createLinksForExtension(name);
+    }
+  }
+
+  /**
+   * Attempts to automatically create links between versions of a particular extension and stack versions they support.
+   * This is limited to 'active' extensions that have the 'autolink' attribute set (in the metainfo.xml).
+   * Stack versions are selected based on the minimum stack versions that the extension supports.
+   * The extension and stack versions are processed in order of most recent to oldest.
+   * In this manner, the newest extension version will be autolinked before older extension versions.
+   * If a different version of the same extension is already linked to a stack version then that stack version
+   * will be skipped.
+   */
+  private void createLinksForExtension(String name) {
+    Collection<ExtensionInfo> collection = getExtensions(name);
+    List<ExtensionInfo> extensions = new ArrayList<ExtensionInfo>(collection.size());
+    extensions.addAll(collection);
+    try {
+      helper.createExtensionLinks(this, extensions);
+    }
+    catch (AmbariException e) {
+      String msg = String.format("Failed to create link for extension: %s with exception: %s", name, e.getMessage());
+      LOG.error(msg);
+    }
   }
 
   /**
@@ -259,6 +312,24 @@ public class StackManager {
   }
 
   /**
+   * Obtain all a map of all stacks by name.
+   *
+   * @return A map of all stacks with the name as the key.
+   */
+  public Map<String, List<StackInfo>> getStacksByName() {
+    Map<String, List<StackInfo>> stacks = new HashMap<String, List<StackInfo>>();
+    for (StackInfo stack: stackMap.values()) {
+      List<StackInfo> list = stacks.get(stack.getName());
+      if (list == null) {
+        list = new ArrayList<StackInfo>();
+        stacks.put(stack.getName(),  list);
+      }
+      list.add(stack);
+    }
+    return stacks;
+  }
+
+  /**
    * Obtain all stacks.
    *
    * @return collection of all stacks
@@ -470,8 +541,6 @@ public class StackManager {
     }
   }
 
-
-
   /**
    * Validate that the specified extension root is a valid directory.
    *
@@ -578,9 +647,11 @@ public class StackManager {
   }
 
   public void linkStackToExtension(StackInfo stack, ExtensionInfo extension) throws AmbariException {
+    stack.addExtension(extension);
   }
 
   public void unlinkStackAndExtension(StackInfo stack, ExtensionInfo extension) throws AmbariException {
+    stack.removeExtension(extension);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
index 0313770..34a3047 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
@@ -404,9 +404,7 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
 
   private void addExtensionServices() throws AmbariException {
     for (ExtensionModule extension : extensionModules.values()) {
-      stackInfo.getExtensions().add(extension.getModuleInfo());
-      Collection<ServiceModule> services = extension.getServiceModules().values();
-      addServices(services);
+      stackInfo.addExtension(extension.getModuleInfo());
     }
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/main/java/org/apache/ambari/server/state/ExtensionInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ExtensionInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ExtensionInfo.java
index 89a6fb5..c05a466 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ExtensionInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ExtensionInfo.java
@@ -30,6 +30,7 @@ import java.util.Set;
 import org.apache.ambari.server.controller.ExtensionVersionResponse;
 import org.apache.ambari.server.stack.Validable;
 import org.apache.ambari.server.state.stack.ExtensionMetainfoXml;
+import org.apache.ambari.server.utils.VersionUtils;
 
 /**
  * An extension version is like a stack version but it contains custom services.  Linking an extension
@@ -45,6 +46,8 @@ public class ExtensionInfo implements Comparable<ExtensionInfo>, Validable{
   private List<ExtensionMetainfoXml.Stack> stacks;
   private List<ExtensionMetainfoXml.Extension> extensions;
   private boolean valid = true;
+  private boolean autoLink = false;
+  private boolean active = false;
 
   /**
    *
@@ -185,9 +188,10 @@ public class ExtensionInfo implements Comparable<ExtensionInfo>, Validable{
 
   @Override
   public int compareTo(ExtensionInfo o) {
-    String myId = name + "-" + version;
-    String oId = o.name + "-" + o.version;
-    return myId.compareTo(oId);
+    if (name.equals(o.name)) {
+      return VersionUtils.compareVersions(version, o.version);
+    }
+    return name.compareTo(o.name);
   }
 
   public List<ExtensionMetainfoXml.Stack> getStacks() {
@@ -205,4 +209,20 @@ public class ExtensionInfo implements Comparable<ExtensionInfo>, Validable{
   public void setExtensions(List<ExtensionMetainfoXml.Extension> extensions) {
     this.extensions = extensions;
   }
+
+  public boolean isAutoLink() {
+    return autoLink;
+  }
+
+  public void setAutoLink(boolean autoLink) {
+    this.autoLink = autoLink;
+  }
+
+  public boolean isActive() {
+    return active;
+  }
+
+  public void setActive(boolean active) {
+    this.active = active;
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
index b7f64f9..1658f1b 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
@@ -36,6 +36,7 @@ import org.apache.ambari.server.state.stack.ConfigUpgradePack;
 import org.apache.ambari.server.state.stack.RepositoryXml;
 import org.apache.ambari.server.state.stack.StackRoleCommandOrder;
 import org.apache.ambari.server.state.stack.UpgradePack;
+import org.apache.ambari.server.utils.VersionUtils;
 
 import com.google.common.collect.ListMultimap;
 import com.google.common.collect.Multimaps;
@@ -200,6 +201,7 @@ public class StackInfo implements Comparable<StackInfo>, Validable{
   public ExtensionInfo getExtensionByService(String serviceName) {
     Collection<ExtensionInfo> extensions = getExtensions();
     for (ExtensionInfo extension : extensions) {
+      Collection<ServiceInfo> services = extension.getServices();
       for (ServiceInfo service : services) {
         if (service.getName().equals(serviceName))
           return extension;
@@ -209,6 +211,24 @@ public class StackInfo implements Comparable<StackInfo>, Validable{
     return null;
   }
 
+  public void addExtension(ExtensionInfo extension) {
+    Collection<ExtensionInfo> extensions = getExtensions();
+    extensions.add(extension);
+    Collection<ServiceInfo> services = getServices();
+    for (ServiceInfo service : extension.getServices()) {
+      services.add(service);
+    }
+  }
+
+  public void removeExtension(ExtensionInfo extension) {
+    Collection<ExtensionInfo> extensions = getExtensions();
+    extensions.remove(extension);
+    Collection<ServiceInfo> services = getServices();
+    for (ServiceInfo service : extension.getServices()) {
+      services.remove(service);
+    }
+  }
+
   public List<PropertyInfo> getProperties() {
     if (properties == null) properties = new ArrayList<>();
     return properties;
@@ -476,9 +496,10 @@ public class StackInfo implements Comparable<StackInfo>, Validable{
 
   @Override
   public int compareTo(StackInfo o) {
-    String myId = name + "-" + version;
-    String oId = o.name + "-" + o.version;
-    return myId.compareTo(oId);
+    if (name.equals(o.name)) {
+      return VersionUtils.compareVersions(version, o.version);
+    }
+    return name.compareTo(o.name);
   }
 
   //todo: ensure that required properties are never modified...

http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/ExtensionMetainfoXml.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/ExtensionMetainfoXml.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/ExtensionMetainfoXml.java
index 790e514..26572e5 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/ExtensionMetainfoXml.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/ExtensionMetainfoXml.java
@@ -77,6 +77,9 @@ public class ExtensionMetainfoXml implements Validable{
   @XmlTransient
   private boolean valid = true;
 
+  @XmlElement(name="auto-link")
+  private boolean autoLink = false;
+
   /**
    *
    * @return valid xml flag
@@ -201,4 +204,12 @@ public class ExtensionMetainfoXml implements Validable{
     }
   }
 
+  public boolean isAutoLink() {
+    return autoLink;
+  }
+
+  public void setAutoLink(boolean autoLink) {
+    this.autoLink = autoLink;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerCommonServicesTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerCommonServicesTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerCommonServicesTest.java
index 6503e7f..09a934e 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerCommonServicesTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerCommonServicesTest.java
@@ -35,6 +35,7 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.controller.AmbariManagementHelper;
 import org.apache.ambari.server.metadata.ActionMetadata;
 import org.apache.ambari.server.orm.dao.ExtensionDAO;
 import org.apache.ambari.server.orm.dao.ExtensionLinkDAO;
@@ -123,10 +124,11 @@ public class StackManagerCommonServicesTest {
     osFamily = new OsFamily(config);
 
     replay(metaInfoDao, actionMetadata);
+    AmbariManagementHelper helper = new AmbariManagementHelper(stackDao, extensionDao, linkDao);
 
     StackManager stackManager = new StackManager(new File(stackRoot), new File(
         commonServicesRoot), new File(extensionRoot), osFamily, true, metaInfoDao,
-        actionMetadata, stackDao, extensionDao, linkDao);
+        actionMetadata, stackDao, extensionDao, linkDao, helper);
 
     EasyMock.verify( config, stackDao );
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
index 8165398..4ae52c0 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerExtensionTest.java
@@ -34,6 +34,7 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.ambari.server.controller.AmbariManagementHelper;
 import org.apache.ambari.server.metadata.ActionMetadata;
 import org.apache.ambari.server.orm.dao.ExtensionDAO;
 import org.apache.ambari.server.orm.dao.ExtensionLinkDAO;
@@ -63,42 +64,57 @@ public class StackManagerExtensionTest  {
     ExtensionLinkDAO linkDao = createNiceMock(ExtensionLinkDAO.class);
     ActionMetadata actionMetadata = createNiceMock(ActionMetadata.class);
     OsFamily osFamily = createNiceMock(OsFamily.class);
-    StackEntity stackEntity = createNiceMock(StackEntity.class);
-    ExtensionEntity extensionEntity = createNiceMock(ExtensionEntity.class);
-    ExtensionLinkEntity linkEntity = createNiceMock(ExtensionLinkEntity.class);
-    List<ExtensionLinkEntity> list = new ArrayList<ExtensionLinkEntity>();
-    list.add(linkEntity);
-
-    expect(
-        stackDao.find(EasyMock.anyObject(String.class),
-            EasyMock.anyObject(String.class))).andReturn(stackEntity).atLeastOnce();
-
-    expect(
-        extensionDao.find(EasyMock.anyObject(String.class),
-            EasyMock.anyObject(String.class))).andReturn(extensionEntity).atLeastOnce();
-
-    expect(
-        linkDao.findByStack(EasyMock.anyObject(String.class),
+    StackEntity stack1 = new StackEntity();
+    stack1.setStackName("HDP");
+    stack1.setStackVersion("0.1");
+    StackEntity stack2 = new StackEntity();
+    stack2.setStackName("HDP");
+    stack2.setStackVersion("0.2");
+    StackEntity stack3 = new StackEntity();
+    stack3.setStackName("HDP");
+    stack3.setStackVersion("0.3");
+    ExtensionEntity extension1 = new ExtensionEntity();
+    extension1.setExtensionName("EXT");
+    extension1.setExtensionVersion("0.1");
+    ExtensionEntity extension2 = new ExtensionEntity();
+    extension2.setExtensionName("EXT");
+    extension2.setExtensionVersion("0.2");
+    ExtensionEntity extension3 = new ExtensionEntity();
+    extension3.setExtensionName("EXT");
+    extension3.setExtensionVersion("0.3");
+    List<ExtensionLinkEntity> list = new ArrayList<>();
+
+    expect(stackDao.find("HDP", "0.1")).andReturn(stack1).atLeastOnce();
+    expect(stackDao.find("HDP", "0.2")).andReturn(stack2).atLeastOnce();
+    expect(stackDao.find("HDP", "0.3")).andReturn(stack3).atLeastOnce();
+    expect(extensionDao.find("EXT", "0.1")).andReturn(extension1).atLeastOnce();
+    expect(extensionDao.find("EXT", "0.2")).andReturn(extension2).atLeastOnce();
+    expect(extensionDao.find("EXT", "0.3")).andReturn(extension3).atLeastOnce();
+
+    expect(linkDao.findByStack(EasyMock.anyObject(String.class),
             EasyMock.anyObject(String.class))).andReturn(list).atLeastOnce();
 
-    expect(
-        linkEntity.getExtension()).andReturn(extensionEntity).atLeastOnce();
+    expect(linkDao.findByStackAndExtension("HDP", "0.2", "EXT", "0.2")).andReturn(null).atLeastOnce();
 
-    expect(
-        extensionEntity.getExtensionName()).andReturn("EXT").atLeastOnce();
-
-    expect(
-        extensionEntity.getExtensionVersion()).andReturn("0.2").atLeastOnce();
-
-    replay(actionMetadata, stackDao, metaInfoDao, osFamily, extensionDao, linkDao, extensionEntity, linkEntity);
+    replay(actionMetadata, stackDao, metaInfoDao, osFamily, extensionDao, linkDao); //linkEntity
 
     String stacks = ClassLoader.getSystemClassLoader().getResource("stacks_with_extensions").getPath();
     String common = ClassLoader.getSystemClassLoader().getResource("common-services").getPath();
     String extensions = ClassLoader.getSystemClassLoader().getResource("extensions").getPath();
 
-    StackManager stackManager = new StackManager(new File(stacks),
+    AmbariManagementHelper helper = new AmbariManagementHelper(stackDao, extensionDao, linkDao);
+
+    StackManager stackManager = null;
+    try {
+      stackManager = new StackManager(new File(stacks),
         new File(common), new File(extensions), osFamily, false,
-        metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao);
+        metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao, helper);
+    }
+    catch (Exception e) {
+      e.printStackTrace();
+    }
+
+    assertNotNull("Failed to create Stack Manager", stackManager);
 
     ExtensionInfo extension = stackManager.getExtension("EXT", "0.1");
     assertNull("EXT 0.1's parent: " + extension.getParentExtensionVersion(), extension.getParentExtensionVersion());
@@ -123,6 +139,7 @@ public class StackManagerExtensionTest  {
     assertNotNull("EXT 0.2's parent: " + extension.getParentExtensionVersion(), extension.getParentExtensionVersion());
     assertEquals("EXT 0.2's parent: " + extension.getParentExtensionVersion(), "0.1", extension.getParentExtensionVersion());
     assertNotNull(extension.getService("OOZIE2"));
+    assertTrue("Extension is not set to auto link", extension.isAutoLink());
     oozie = extension.getService("OOZIE2");
     assertNotNull("Package dir is " + oozie.getServicePackageFolder(), oozie.getServicePackageFolder());
     assertTrue("Package dir is " + oozie.getServicePackageFolder(), oozie.getServicePackageFolder().contains("extensions/EXT/0.1/services/OOZIE2/package"));
@@ -147,7 +164,13 @@ public class StackManagerExtensionTest  {
     assertTrue("Extensions found: " + stack.getExtensions().size(), stack.getExtensions().size() == 1);
     extension = stack.getExtensions().iterator().next();
     assertEquals("Extension name: " + extension.getName(), extension.getName(), "EXT");
-    assertEquals("Extension version: " + extension.getVersion(), extension.getVersion(), "0.2");
+    assertEquals("Extension version: " + extension.getVersion(), extension.getVersion(), "0.3");
+
+    stack = stackManager.getStack("HDP", "0.3");
+    assertTrue("Extensions found: " + stack.getExtensions().size(), stack.getExtensions().size() == 1);
+    extension = stack.getExtensions().iterator().next();
+    assertEquals("Extension name: " + extension.getName(), extension.getName(), "EXT");
+    assertEquals("Extension version: " + extension.getVersion(), extension.getVersion(), "0.3");
   }
 
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerMiscTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerMiscTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerMiscTest.java
index ca24cd9..6df46c3 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerMiscTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerMiscTest.java
@@ -32,6 +32,7 @@ import java.util.Collections;
 import java.util.List;
 
 import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.controller.AmbariManagementHelper;
 import org.apache.ambari.server.metadata.ActionMetadata;
 import org.apache.ambari.server.orm.dao.ExtensionDAO;
 import org.apache.ambari.server.orm.dao.ExtensionLinkDAO;
@@ -69,12 +70,13 @@ public class StackManagerMiscTest  {
             EasyMock.anyObject(String.class))).andReturn(list).atLeastOnce();
 
     replay(actionMetadata, stackDao, extensionDao, linkDao, metaInfoDao, osFamily);
+    AmbariManagementHelper helper = new AmbariManagementHelper(stackDao, extensionDao, linkDao);
 
     try {
       String stacksCycle1 = ClassLoader.getSystemClassLoader().getResource("stacks_with_cycle").getPath();
 
       StackManager stackManager = new StackManager(new File(stacksCycle1), null, null, osFamily, false,
-          metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao);
+          metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao, helper);
 
       fail("Expected exception due to cyclic stack");
     } catch (AmbariException e) {
@@ -86,7 +88,7 @@ public class StackManagerMiscTest  {
           "stacks_with_cycle2").getPath();
 
       StackManager stackManager = new StackManager(new File(stacksCycle2),
-          null, null, osFamily, true, metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao);
+          null, null, osFamily, true, metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao, helper);
 
       fail("Expected exception due to cyclic stack");
     } catch (AmbariException e) {
@@ -124,10 +126,11 @@ public class StackManagerMiscTest  {
     replay(metaInfoDao, stackDao, extensionDao, linkDao, actionMetadata, osFamily);
 
     String singleStack = ClassLoader.getSystemClassLoader().getResource("single_stack").getPath();
+    AmbariManagementHelper helper = new AmbariManagementHelper(stackDao, extensionDao, linkDao);
 
     StackManager stackManager = new StackManager(new File(singleStack.replace(
         StackManager.PATH_DELIMITER, File.separator)), null, null, osFamily, false, metaInfoDao,
-        actionMetadata, stackDao, extensionDao, linkDao);
+        actionMetadata, stackDao, extensionDao, linkDao, helper);
 
     Collection<StackInfo> stacks = stackManager.getStacks();
     assertEquals(1, stacks.size());
@@ -161,11 +164,13 @@ public class StackManagerMiscTest  {
 
     replay(metaInfoDao, stackDao, extensionDao, linkDao, actionMetadata, osFamily);
 
+    AmbariManagementHelper helper = new AmbariManagementHelper(stackDao, extensionDao, linkDao);
+
     try {
       String upgradeCycle = ClassLoader.getSystemClassLoader().getResource("stacks_with_upgrade_cycle").getPath();
 
       StackManager stackManager = new StackManager(new File(upgradeCycle), null, null, osFamily, false,
-          metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao);
+          metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao, helper);
 
       fail("Expected exception due to cyclic service upgrade xml");
     } catch (AmbariException e) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerMock.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerMock.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerMock.java
index 1b9e15f..4e7d040 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerMock.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerMock.java
@@ -26,6 +26,7 @@ import java.util.concurrent.locks.ReentrantLock;
 import javax.annotation.Nullable;
 
 import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.controller.AmbariManagementHelper;
 import org.apache.ambari.server.metadata.ActionMetadata;
 import org.apache.ambari.server.orm.dao.ExtensionDAO;
 import org.apache.ambari.server.orm.dao.ExtensionLinkDAO;
@@ -133,8 +134,8 @@ public class StackManagerMock extends StackManager {
       File commonServicesRoot, @Assisted("extensionRoot") @Nullable File extensionRoot,
                           @Assisted OsFamily osFamily, @Assisted boolean validate, MetainfoDAO metaInfoDAO,
                           ActionMetadata actionMetadata, StackDAO stackDao, ExtensionDAO extensionDao,
-                          ExtensionLinkDAO linkDao) throws AmbariException {
-    super(stackRoot, commonServicesRoot, extensionRoot, osFamily, validate, metaInfoDAO, actionMetadata, stackDao, extensionDao, linkDao);
+                          ExtensionLinkDAO linkDao, AmbariManagementHelper helper) throws AmbariException {
+    super(stackRoot, commonServicesRoot, extensionRoot, osFamily, validate, metaInfoDAO, actionMetadata, stackDao, extensionDao, linkDao, helper);
     currentStackRoot = stackRoot;
     currentCommonServicesRoot = commonServicesRoot;
     currentExtensionRoot = extensionRoot;

http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
index 090bf55..74a8f29 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/stack/StackManagerTest.java
@@ -44,6 +44,7 @@ import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.Role;
 import org.apache.ambari.server.RoleCommand;
 import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.controller.AmbariManagementHelper;
 import org.apache.ambari.server.metadata.ActionMetadata;
 import org.apache.ambari.server.orm.dao.ExtensionDAO;
 import org.apache.ambari.server.orm.dao.ExtensionLinkDAO;
@@ -119,9 +120,10 @@ public class StackManagerTest {
     replay(config, metaInfoDao, stackDao, extensionDao, linkDao, actionMetadata);
 
     osFamily = new OsFamily(config);
+    AmbariManagementHelper helper = new AmbariManagementHelper(stackDao, extensionDao, linkDao);
 
     StackManager stackManager = new StackManager(new File(stackRoot), null, null, osFamily, false,
-        metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao);
+        metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao, helper);
 
     verify(config, metaInfoDao, stackDao, actionMetadata);
 
@@ -778,9 +780,10 @@ public class StackManagerTest {
     replay(config, metaInfoDao, stackDao, extensionDao, linkDao, actionMetadata);
 
     OsFamily osFamily = new OsFamily(config);
+    AmbariManagementHelper helper = new AmbariManagementHelper(stackDao, extensionDao, linkDao);
 
     StackManager stackManager = new StackManager(stackRoot, commonServices, extensions,
-            osFamily, false, metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao);
+            osFamily, false, metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao, helper);
 
     for (StackInfo stackInfo : stackManager.getStacks()) {
       for (ServiceInfo serviceInfo : stackInfo.getServices()) {
@@ -843,9 +846,10 @@ public class StackManagerTest {
     replay(config, metaInfoDao, stackDao, extensionDao, linkDao, actionMetadata);
 
     OsFamily osFamily = new OsFamily(config);
+    AmbariManagementHelper helper = new AmbariManagementHelper(stackDao, extensionDao, linkDao);
 
     StackManager stackManager = new StackManager(stackRoot, commonServices, extensions, osFamily,
-        false, metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao);
+        false, metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao, helper);
 
     String rangerUserSyncRoleCommand = Role.RANGER_USERSYNC + "-" + RoleCommand.START;
     String rangerAdminRoleCommand = Role.RANGER_ADMIN + "-" + RoleCommand.START;
@@ -972,9 +976,10 @@ public class StackManagerTest {
     replay(config, metaInfoDao, stackDao, extensionDao, linkDao, actionMetadata);
 
     OsFamily osFamily = new OsFamily(config);
+    AmbariManagementHelper helper = new AmbariManagementHelper(stackDao, extensionDao, linkDao);
 
     StackManager stackManager = new StackManager(stackRoot, commonServices, extensions, osFamily,
-        false, metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao);
+        false, metaInfoDao, actionMetadata, stackDao, extensionDao, linkDao, helper);
 
     String zookeeperServerRoleCommand = Role.ZOOKEEPER_SERVER + "-" + RoleCommand.START;
     String logsearchServerRoleCommand = Role.LOGSEARCH_SERVER + "-" + RoleCommand.START;

http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/resources/extensions/EXT/0.1/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/extensions/EXT/0.1/metainfo.xml b/ambari-server/src/test/resources/extensions/EXT/0.1/metainfo.xml
index 77a832c..27f5902 100644
--- a/ambari-server/src/test/resources/extensions/EXT/0.1/metainfo.xml
+++ b/ambari-server/src/test/resources/extensions/EXT/0.1/metainfo.xml
@@ -23,7 +23,7 @@
     <min-stack-versions>
       <stack>
         <name>HDP</name>
-        <version>0.1.*</version>
+        <version>0.1</version>
       </stack>
     </min-stack-versions>
   </prerequisites>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml b/ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml
index 04f733c..0d37b3e 100644
--- a/ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml
+++ b/ambari-server/src/test/resources/extensions/EXT/0.2/metainfo.xml
@@ -20,11 +20,12 @@
     <active>true</active>
   </versions>
   <extends>0.1</extends>
+  <auto-link>true</auto-link>
   <prerequisites>
     <min-stack-versions>
       <stack>
         <name>HDP</name>
-        <version>0.2.*</version>
+        <version>0.2</version>
       </stack>
     </min-stack-versions>
   </prerequisites>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/resources/extensions/EXT/0.3/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/extensions/EXT/0.3/metainfo.xml b/ambari-server/src/test/resources/extensions/EXT/0.3/metainfo.xml
new file mode 100644
index 0000000..d827314
--- /dev/null
+++ b/ambari-server/src/test/resources/extensions/EXT/0.3/metainfo.xml
@@ -0,0 +1,32 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <versions>
+    <active>true</active>
+  </versions>
+  <extends>0.2</extends>
+  <auto-link>true</auto-link>
+  <prerequisites>
+    <min-stack-versions>
+      <stack>
+        <name>HDP</name>
+        <version>0.2</version>
+      </stack>
+    </min-stack-versions>
+  </prerequisites>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/resources/extensions/EXT/0.3/services/OOZIE2/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/extensions/EXT/0.3/services/OOZIE2/metainfo.xml b/ambari-server/src/test/resources/extensions/EXT/0.3/services/OOZIE2/metainfo.xml
new file mode 100644
index 0000000..9176551
--- /dev/null
+++ b/ambari-server/src/test/resources/extensions/EXT/0.3/services/OOZIE2/metainfo.xml
@@ -0,0 +1,118 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>OOZIE2</name>
+      <comment>System for workflow coordination and execution of Apache Hadoop jobs</comment>
+      <version>4.0.0</version>
+
+      <components>
+        <component>
+          <name>OOZIE2_SERVER</name>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE/MAPREDUCE_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/oozie2_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>OOZIE2_CLIENT</name>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE/MAPREDUCE_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/oozie2_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>oozie2.noarch</name>
+            </package>
+            <package>
+              <name>oozie2-client.noarch</name>
+            </package>
+            <package>
+              <name>extjs-2.2-1</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>global</config-type>
+        <config-type>oozie2-site</config-type>
+      </configuration-dependencies>
+
+      <themes>
+        <theme>
+          <fileName>broken_theme.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
+
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/resources/extensions/EXT/0.3/services/OOZIE2/themes/broken_theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/extensions/EXT/0.3/services/OOZIE2/themes/broken_theme.json b/ambari-server/src/test/resources/extensions/EXT/0.3/services/OOZIE2/themes/broken_theme.json
new file mode 100644
index 0000000..6e8b5bf
--- /dev/null
+++ b/ambari-server/src/test/resources/extensions/EXT/0.3/services/OOZIE2/themes/broken_theme.json
@@ -0,0 +1,3 @@
+{
+  "configuration": {
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/metainfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/metainfo.xml
new file mode 100644
index 0000000..b52857b
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/metainfo.xml
@@ -0,0 +1,22 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <versions>
+	  <upgrade>0.2</upgrade>
+    </versions>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/repos/repoinfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/repos/repoinfo.xml
new file mode 100644
index 0000000..9b3b1c7
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/repos/repoinfo.xml
@@ -0,0 +1,63 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<reposinfo>
+  <os family="redhat6">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos6</baseurl>
+      <repoid>HDP-1.1.1.16</repoid>
+      <reponame>HDP</reponame>
+      <unique>true</unique>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos6</baseurl>
+      <repoid>HDP-UTILS-1.1.0.15</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <unique>false</unique>
+      <mirrorslist></mirrorslist>
+    </repo>
+    <repo>
+      <baseurl></baseurl>
+      <repoid>epel</repoid>
+      <reponame>epel</reponame>
+      <unique>true</unique>
+      <mirrorslist><![CDATA[https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch]]></mirrorslist>
+    </repo>
+  </os>
+  <os family="centos5">
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.1.1.16/repos/centos5</baseurl>
+      <repoid>HDP-1.1.1.16</repoid>
+      <reponame>HDP</reponame>
+      <unique>true</unique>
+    </repo>
+    <repo>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/centos5</baseurl>
+      <repoid>HDP-UTILS-1.1.0.15</repoid>
+      <reponame>HDP-UTILS</reponame>
+      <unique>false</unique>
+      <mirrorslist></mirrorslist>
+    </repo>
+    <repo>
+      <baseurl></baseurl>
+      <repoid>epel</repoid>
+      <reponame>epel</reponame>
+      <unique>true</unique>
+      <mirrorslist><![CDATA[https://mirrors.fedoraproject.org/metalink?repo=epel-5&arch=$basearch]]></mirrorslist>
+    </repo>
+  </os>
+</reposinfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HBASE/metainfo.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HBASE/metainfo.xml
new file mode 100644
index 0000000..48123f0
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HBASE/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HBASE</name>
+      <extends>common-services/HBASE/1.0</extends>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/global.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/global.xml
new file mode 100644
index 0000000..bcab577
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/global.xml
@@ -0,0 +1,145 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>namenode_host</name>
+    <value></value>
+    <description>NameNode Host.</description>
+  </property>
+  <property>
+    <name>dfs_name_dir</name>
+    <value>/hadoop/hdfs/namenode</value>
+    <description>NameNode Directories.</description>
+  </property>
+  <property>
+    <name>snamenode_host</name>
+    <value></value>
+    <description>Secondary NameNode.</description>
+  </property>
+  <property>
+    <name>fs_checkpoint_dir</name>
+    <value>/hadoop/hdfs/namesecondary</value>
+    <description>Secondary NameNode checkpoint dir.</description>
+  </property>
+  <property>
+    <name>datanode_hosts</name>
+    <value></value>
+    <description>List of Datanode Hosts.</description>
+  </property>
+  <property>
+    <name>dfs_data_dir</name>
+    <value>/hadoop/hdfs/data</value>
+    <description>Data directories for Data Nodes.</description>
+  </property>
+  <property>
+    <name>hdfs_log_dir_prefix</name>
+    <value>/var/log/hadoop</value>
+    <description>Hadoop Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_pid_dir_prefix</name>
+    <value>/var/run/hadoop</value>
+    <description>Hadoop PID Dir Prefix</description>
+  </property>
+  <property>
+    <name>dfs_webhdfs_enabled</name>
+    <value>true</value>
+    <description>WebHDFS enabled</description>
+  </property>
+  <property>
+    <name>hadoop_heapsize</name>
+    <value>1024</value>
+    <description>Hadoop maximum Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_heapsize</name>
+    <value>1024</value>
+    <description>NameNode Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_opt_newsize</name>
+    <value>200</value>
+    <description>Default size of Java new generation for NameNode (Java option -XX:NewSize) Note: The value of NameNode new generation size (default size of Java new generation for NameNode (Java option -XX:NewSize)) should be 1/8 of maximum heap size (-Xmx). Ensure that the value of the namenode_opt_newsize property is 1/8 the value of maximum heap size (-Xmx).</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxnewsize</name>
+    <value>640</value>
+    <description>NameNode maximum new generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_permsize</name>
+    <value>128</value>
+    <description>NameNode permanent generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxpermsize</name>
+    <value>256</value>
+    <description>NameNode maximum permanent generation size</description>
+  </property>
+  <property>
+    <name>datanode_du_reserved</name>
+    <value>1</value>
+    <description>Reserved space for HDFS</description>
+  </property>
+  <property>
+    <name>dtnode_heapsize</name>
+    <value>1024</value>
+    <description>DataNode maximum Java heap size</description>
+  </property>
+  <property>
+    <name>dfs_datanode_failed_volume_tolerated</name>
+    <value>0</value>
+    <description>DataNode volumes failure toleration</description>
+  </property>
+  <property>
+    <name>fs_checkpoint_period</name>
+    <value>21600</value>
+    <description>HDFS Maximum Checkpoint Delay</description>
+  </property>
+  <property>
+    <name>fs_checkpoint_size</name>
+    <value>0.5</value>
+    <description>FS Checkpoint Size.</description>
+  </property>
+  <property>
+    <name>security_enabled</name>
+    <value>false</value>
+    <description>Hadoop Security</description>
+  </property>
+  <property>
+    <name>kerberos_domain</name>
+    <value>EXAMPLE.COM</value>
+    <description>Kerberos realm.</description>
+  </property>
+  <property>
+    <name>kerberos_domain</name>
+    <value>EXAMPLE.COM</value>
+    <description>Kerberos realm.</description>
+  </property>
+  <property>
+    <name>keytab_path</name>
+    <value>/etc/security/keytabs</value>
+    <description>KeyTab Directory.</description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/7c56924a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hadoop-env.xml
new file mode 100644
index 0000000..8fb8c7f
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks_with_extensions/HDP/0.3/services/HDFS/configuration/hadoop-env.xml
@@ -0,0 +1,223 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>hdfs_log_dir_prefix</name>
+    <value>/var/log/hadoop</value>
+    <description>Hadoop Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_pid_dir_prefix</name>
+    <value>/var/run/hadoop</value>
+    <description>Hadoop PID Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_heapsize</name>
+    <value>1024</value>
+    <description>Hadoop maximum Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_heapsize</name>
+    <value>1024</value>
+    <description>NameNode Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_opt_newsize</name>
+    <value>200</value>
+    <description>Default size of Java new generation for NameNode (Java option -XX:NewSize) Note: The value of NameNode new generation size (default size of Java new generation for NameNode (Java option -XX:NewSize)) should be 1/8 of maximum heap size (-Xmx). Ensure that the value of the namenode_opt_newsize property is 1/8 the value of maximum heap size (-Xmx).</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxnewsize</name>
+    <value>200</value>
+    <description>NameNode maximum new generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_permsize</name>
+    <value>128</value>
+    <description>NameNode permanent generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxpermsize</name>
+    <value>256</value>
+    <description>NameNode maximum permanent generation size</description>
+  </property>
+  <property>
+    <name>dtnode_heapsize</name>
+    <value>1024</value>
+    <description>DataNode maximum Java heap size</description>
+  </property>
+  <property>
+    <name>proxyuser_group</name>
+    <value>users</value>
+    <description>Proxy user group.</description>
+  </property>
+  <property>
+    <name>security_enabled</name>
+    <value>false</value>
+    <description>Hadoop Security</description>
+  </property>
+  <property>
+    <name>kerberos_domain</name>
+    <value>EXAMPLE.COM</value>
+    <description>Kerberos realm.</description>
+  </property>
+  <property>
+    <name>hdfs_user</name>
+    <value>hdfs</value>
+    <description>User and Groups.</description>
+  </property>
+  <property>
+    <name>ignore_groupsusers_create</name>
+    <value>false</value>
+    <description>Whether to ignores failures on users and group creation</description>
+  </property>
+  <property>
+    <name>smokeuser</name>
+    <value>ambari-qa</value>
+    <description>User executing service checks</description>
+  </property>
+  <property>
+    <name>user_group</name>
+    <value>hadoop</value>
+    <description>Proxy user group.</description>
+  </property>
+
+  <!-- hadoop-env.sh -->
+  <property>
+    <name>content</name>
+    <description>hadoop-env.sh content</description>
+    <value>
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.  Required.
+export JAVA_HOME={{java_home}}
+export HADOOP_HOME_WARN_SUPPRESS=1
+
+# Hadoop home directory
+export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
+
+# Hadoop Configuration Directory
+#TODO: if env var set that can cause problems
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
+
+{# this is different for HDP1 #}
+# Path to jsvc required by secure HDP 2.0 datanode
+export JSVC_HOME={{jsvc_path}}
+
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+
+HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
+HADOOP_DATANODE_OPTS="-XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly -XX:ConcGCThreads=4 -XX:+UseConcMarkSweepGC -Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+# On secure datanodes, user to run the datanode as after dropping privileges
+export HADOOP_SECURE_DN_USER={{hdfs_user}}
+
+# Extra ssh options.  Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
+
+# History server logs
+export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from.  Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
+export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# History server pid
+export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
+
+YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes.  See 'man nice'.
+
+# export HADOOP_NICENESS=10
+
+# Use libraries from standard classpath
+JAVA_JDBC_LIBS=""
+#Add libraries required by mysql connector
+for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by oracle connector
+for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by nodemanager
+MAPREDUCE_LIBS={{mapreduce_libs_path}}
+export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
+
+if [ -d "/usr/lib/tez" ]; then
+  export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf
+fi
+
+# Setting path to hdfs command line
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+
+#Mostly required for hadoop 2.0
+export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64
+    </value>
+  </property>
+
+</configuration>


[10/16] ambari git commit: AMBARI-22510. Handle new error type from Stack Advisor (akovalenko)

Posted by ao...@apache.org.
AMBARI-22510. Handle new error type from Stack Advisor (akovalenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/0e2e711e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/0e2e711e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/0e2e711e

Branch: refs/heads/branch-feature-AMBARI-22457
Commit: 0e2e711e0616ea00e8d7bac1f375977e019e6e77
Parents: 54bc2a2
Author: Aleksandr Kovalenko <ak...@hortonworks.com>
Authored: Thu Nov 23 19:52:26 2017 +0200
Committer: Aleksandr Kovalenko <ak...@hortonworks.com>
Committed: Fri Nov 24 17:58:31 2017 +0200

----------------------------------------------------------------------
 ambari-web/app/messages.js                      |   5 +-
 ambari-web/app/mixins/common/serverValidator.js |  41 +++++---
 ambari-web/app/styles/application.less          |   3 +-
 .../config_recommendation_popup.hbs             | 102 +++++++++++++------
 .../config_validation_popup.js                  |   7 +-
 .../test/mixins/common/serverValidator_test.js  |  15 +--
 6 files changed, 112 insertions(+), 61 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/0e2e711e/ambari-web/app/messages.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js
index 9b22208..783baf1 100644
--- a/ambari-web/app/messages.js
+++ b/ambari-web/app/messages.js
@@ -352,6 +352,7 @@ Em.I18n.translations = {
   'common.repositoryType': 'Repository Type',
   'common.rolling.downgrade': 'Rolling Downgrade',
   'common.express.downgrade': 'Express Downgrade',
+  'common.critical.error': 'Critical',
 
   'models.alert_instance.tiggered.verbose': "Occurred on {0} <br> Checked on {1}",
   'models.alert_definition.triggered.verbose': "Occurred on {0}",
@@ -932,8 +933,8 @@ Em.I18n.translations = {
   'installer.step7.popup.validation.failed.body': 'Some services are not properly configured. You have to change the highlighted configs according to the recommended values.',
   'installer.step7.popup.validation.request.failed.body': 'The configuration changes could not be validated for consistency due to an unknown error.  Your changes have not been saved yet.  Would you like to proceed and save the changes?',
   'installer.step7.popup.validation.warning.header': 'Configurations',
-  'installer.step7.popup.validation.warning.body': 'Some service configurations are not configured properly. We recommend you review and change the highlighted configuration values. Are you sure you want to proceed without correcting configurations?',
-  'installer.step7.popup.validation.error.body': 'Service configurations resulted in validation errors. Please address them before proceeding.',
+  'installer.step7.popup.validation.issues.body': 'The following configuration changes are highly recommended, but can be skipped.',
+  'installer.step7.popup.validation.criticalIssues.body': 'You must correct the following critical issues before proceeding:',
   'installer.step7.popup.oozie.derby.warning': 'Derby is not recommended for production use. With Derby, Oozie Server HA and concurrent connection support will not be available.',
   'installer.step7.oozie.database.new': 'New Derby Database',
   'installer.step7.hive.database.new.mysql': 'New MySQL Database',

http://git-wip-us.apache.org/repos/asf/ambari/blob/0e2e711e/ambari-web/app/mixins/common/serverValidator.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mixins/common/serverValidator.js b/ambari-web/app/mixins/common/serverValidator.js
index 955549f..ff5319d 100644
--- a/ambari-web/app/mixins/common/serverValidator.js
+++ b/ambari-web/app/mixins/common/serverValidator.js
@@ -42,7 +42,10 @@ App.ServerValidatorMixin = Em.Mixin.create({
    *
    * @type {Object[]}
    */
-  configErrorList: [],
+  configErrorList: Em.Object.create({
+    issues: [],
+    criticalIssues: []
+  }),
 
   /**
    * Map with allowed error types
@@ -50,6 +53,7 @@ App.ServerValidatorMixin = Em.Mixin.create({
    * @type {Object}
    */
   errorTypes: {
+    CRITICAL_ERROR: 'NOT_APPLICABLE',
     ERROR: 'ERROR',
     WARN: 'WARN',
     GENERAL: 'GENERAL'
@@ -98,10 +102,13 @@ App.ServerValidatorMixin = Em.Mixin.create({
       self = this,
       primary = function() { deferred.resolve(); },
       secondary = function() { deferred.reject('invalid_configs'); };
-    this.set('configErrorList', []);
+    this.set('configErrorList', Em.Object.create({
+      issues: [],
+      criticalIssues: []
+    }));
 
     this.runServerSideValidation().done(function() {
-      if (self.get('configErrorList.length')) {
+      if (self.get('configErrorList.issues.length') || self.get('configErrorList.criticalIssues.length')) {
         App.showConfigValidationPopup(self.get('configErrorList'), primary, secondary);
       } else {
         deferred.resolve();
@@ -178,13 +185,14 @@ App.ServerValidatorMixin = Em.Mixin.create({
     var errorTypes = this.get('errorTypes');
     var error = {
       type: type,
+      isCriticalError: type === errorTypes.CRITICAL_ERROR,
       isError: type === errorTypes.ERROR,
       isWarn: type === errorTypes.WARN,
       isGeneral: type === errorTypes.GENERAL,
       messages: Em.makeArray(messages)
     };
 
-    Em.assert('Unknown config error type ' + type, error.isError || error.isWarn || error.isGeneral);
+    Em.assert('Unknown config error type ' + type, error.isError || error.isWarn || error.isGeneral || error.isCriticalError);
     if (property) {
       error.id = Em.get(property, 'id');
       error.serviceName = Em.get(property, 'serviceDisplayName') || App.StackService.find(Em.get(property, 'serviceName')).get('displayName');
@@ -248,29 +256,33 @@ App.ServerValidatorMixin = Em.Mixin.create({
    */
   collectAllIssues: function(configErrorsMap, generalErrors)  {
     var errorTypes = this.get('errorTypes');
-    var configErrorList = [];
+    var configErrorList = {};
+    configErrorList[errorTypes.WARN] = [];
+    configErrorList[errorTypes.ERROR] = [];
+    configErrorList[errorTypes.CRITICAL_ERROR] = [];
+    configErrorList[errorTypes.GENERAL] = [];
 
     this.get('stepConfigs').forEach(function(service) {
       service.get('configs').forEach(function(property) {
         if (property.get('isVisible') && !property.get('hiddenBySection')) {
           var serverIssue = configErrorsMap[property.get('id')];
           if (serverIssue) {
-            configErrorList.push(this.createErrorMessage(serverIssue.type, property, serverIssue.messages));
+            configErrorList[serverIssue.type].push(this.createErrorMessage(serverIssue.type, property, serverIssue.messages));
           } else if (property.get('warnMessage')) {
-            configErrorList.push(this.createErrorMessage(errorTypes.WARN, property, [property.get('warnMessage')]));
+            configErrorList[errorTypes.WARN].push(this.createErrorMessage(errorTypes.WARN, property, [property.get('warnMessage')]));
           }
         }
       }, this);
     }, this);
 
     generalErrors.forEach(function(serverIssue) {
-      configErrorList.push(this.createErrorMessage(errorTypes.GENERAL, null, serverIssue.messages));
+      configErrorList[errorTypes.GENERAL].push(this.createErrorMessage(errorTypes.GENERAL, null, serverIssue.messages));
     }, this);
 
     Em.keys(configErrorsMap).forEach(function (id) {
-      if (!configErrorList.someProperty('id', id)) {
-        var serverIssue = configErrorsMap[id],
-          filename = Em.get(serverIssue, 'filename'),
+      var serverIssue = configErrorsMap[id];
+      if (!configErrorList[serverIssue.type].someProperty('id', id)) {
+        var filename = Em.get(serverIssue, 'filename'),
           service = App.config.get('serviceByConfigTypeMap')[filename],
           property = {
             id: id,
@@ -278,11 +290,14 @@ App.ServerValidatorMixin = Em.Mixin.create({
             filename: App.config.getOriginalFileName(filename),
             serviceDisplayName: service && Em.get(service, 'displayName')
           };
-        configErrorList.push(this.createErrorMessage(serverIssue.type, property, serverIssue.messages));
+        configErrorList[serverIssue.type].push(this.createErrorMessage(serverIssue.type, property, serverIssue.messages));
       }
     }, this);
 
-    return configErrorList;
+    return Em.Object.create({
+      criticalIssues: configErrorList[errorTypes.CRITICAL_ERROR],
+      issues: configErrorList[errorTypes.ERROR].concat(configErrorList[errorTypes.WARN], configErrorList[errorTypes.GENERAL])
+    });
   },
 
   /**

http://git-wip-us.apache.org/repos/asf/ambari/blob/0e2e711e/ambari-web/app/styles/application.less
----------------------------------------------------------------------
diff --git a/ambari-web/app/styles/application.less b/ambari-web/app/styles/application.less
index 4c1b002..77f379c 100644
--- a/ambari-web/app/styles/application.less
+++ b/ambari-web/app/styles/application.less
@@ -5973,7 +5973,7 @@ input[type="radio"].align-checkbox, input[type="checkbox"].align-checkbox {
 .table td.error { background-color: @error-background; }
 .table td.warning { background-color: @warning-background; }
 
-#config-validation-warnings {
+.config-validation-warnings {
   table {
     tbody{
       tr {
@@ -5997,7 +5997,6 @@ input[type="radio"].align-checkbox, input[type="checkbox"].align-checkbox {
   }
 }
 
-
 @config-dependency-t-name-width: 180px;
 @config-dependency-t-service-width: 100px;
 @config-dependency-t-group-width: 140px;

http://git-wip-us.apache.org/repos/asf/ambari/blob/0e2e711e/ambari-web/app/templates/common/modal_popups/config_recommendation_popup.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/common/modal_popups/config_recommendation_popup.hbs b/ambari-web/app/templates/common/modal_popups/config_recommendation_popup.hbs
index 8100dff..e9ef3b7 100644
--- a/ambari-web/app/templates/common/modal_popups/config_recommendation_popup.hbs
+++ b/ambari-web/app/templates/common/modal_popups/config_recommendation_popup.hbs
@@ -16,34 +16,25 @@
 * limitations under the License.
 }}
 
-<p>{{view.messageBody}}</p>
-<div id="config-validation-warnings" class="limited-height-2">
-  <table class="table no-borders">
-    <thead>
-    <tr>
-      <th>{{t common.type}}</th>
-      <th>{{t common.service}}</th>
-      <th>{{t common.property}}</th>
-      <th>{{t common.value}}</th>
-      <th>{{t common.description}}</th>
-    </tr>
-    </thead>
-    <tbody>
-    {{#each error in view.configErrors}}
-      <tr {{bindAttr class="error.isError:error:warning"}}>
-        <td>
-          {{#if error.isError}}
-            {{t common.error}}
-          {{else}}
-            {{t common.warning}}
-          {{/if}}
-        </td>
-
-        {{#if error.isGeneral}}
-          {{#each message in error.messages}}
-            <td colspan="4">{{error.message}}</td>
-          {{/each}}
-        {{else}}
+{{#if view.configErrors.criticalIssues.length}}
+  <p>{{t installer.step7.popup.validation.criticalIssues.body}}</p>
+  <div class="limited-height-2 config-validation-warnings">
+    <table class="table no-borders">
+      <thead>
+      <tr>
+        <th>{{t common.type}}</th>
+        <th>{{t common.service}}</th>
+        <th>{{t common.property}}</th>
+        <th>{{t common.value}}</th>
+        <th>{{t common.description}}</th>
+      </tr>
+      </thead>
+      <tbody>
+      {{#each error in view.configErrors.criticalIssues}}
+        <tr class="error">
+          <td>
+            {{t common.critical}}
+          </td>
           <td>{{error.serviceName}}</td>
           <td>{{error.propertyName}}</td>
           <td>{{error.value}}</td>
@@ -53,9 +44,54 @@
             {{/each}}
             <div class="property-description">{{error.description}}</div>
           </td>
-        {{/if}}
+        </tr>
+      {{/each}}
+      </tbody>
+    </table>
+  </div>
+{{/if}}
+{{#if view.configErrors.issues.length}}
+  <p>{{t installer.step7.popup.validation.issues.body}}</p>
+  <div class="limited-height-2 config-validation-warnings">
+    <table class="table no-borders">
+      <thead>
+      <tr>
+        <th>{{t common.type}}</th>
+        <th>{{t common.service}}</th>
+        <th>{{t common.property}}</th>
+        <th>{{t common.value}}</th>
+        <th>{{t common.description}}</th>
       </tr>
-    {{/each}}
-    </tbody>
-  </table>
-</div>
+      </thead>
+      <tbody>
+      {{#each error in view.configErrors.issues}}
+        <tr {{bindAttr class="error.isError:error:warning"}}>
+          <td>
+            {{#if error.isError}}
+              {{t common.error}}
+            {{else}}
+              {{t common.warning}}
+            {{/if}}
+          </td>
+
+          {{#if error.isGeneral}}
+            {{#each message in error.messages}}
+              <td colspan="4">{{error.message}}</td>
+            {{/each}}
+          {{else}}
+            <td>{{error.serviceName}}</td>
+            <td>{{error.propertyName}}</td>
+            <td>{{error.value}}</td>
+            <td>
+              {{#each message in error.messages}}
+                <div class="property-message">{{message}}</div>
+              {{/each}}
+              <div class="property-description">{{error.description}}</div>
+            </td>
+          {{/if}}
+        </tr>
+      {{/each}}
+      </tbody>
+    </table>
+  </div>
+{{/if}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/0e2e711e/ambari-web/app/views/common/modal_popups/config_validation/config_validation_popup.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/common/modal_popups/config_validation/config_validation_popup.js b/ambari-web/app/views/common/modal_popups/config_validation/config_validation_popup.js
index 98e1ce5..76f5701 100644
--- a/ambari-web/app/views/common/modal_popups/config_validation/config_validation_popup.js
+++ b/ambari-web/app/views/common/modal_popups/config_validation/config_validation_popup.js
@@ -42,13 +42,10 @@ App.showConfigValidationPopup = function (configErrors, primary, secondary) {
       this._super();
       secondary();
     },
+    disablePrimary: !!configErrors.get('criticalIssues.length'),
     bodyClass: Em.View.extend({
       templateName: require('templates/common/modal_popups/config_recommendation_popup'),
-      configErrors: configErrors,
-      configValidationError: Em.computed.someBy('configErrors', 'isError', true),
-      messageBody: Em.I18n.t(this.get('configValidationError')
-        ? 'installer.step7.popup.validation.error.body'
-        : 'installer.step7.popup.validation.warning.body')
+      configErrors: configErrors
     })
   });
 };

http://git-wip-us.apache.org/repos/asf/ambari/blob/0e2e711e/ambari-web/test/mixins/common/serverValidator_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/mixins/common/serverValidator_test.js b/ambari-web/test/mixins/common/serverValidator_test.js
index cdd69fb..4a12a69 100644
--- a/ambari-web/test/mixins/common/serverValidator_test.js
+++ b/ambari-web/test/mixins/common/serverValidator_test.js
@@ -94,35 +94,35 @@ describe('App.ServerValidatorMixin', function () {
     });
 
     it('should add server warnings', function () {
-      var error = result.find(function(r) { return r.propertyName === 'c1' && r.filename === 'f1'; });
+      var error = result.issues.find(function(r) { return r.propertyName === 'c1' && r.filename === 'f1'; });
       expect(error.type).to.equal('WARN');
       expect(error.messages).to.eql(['warn1']);
     });
 
     it('should add server errors', function () {
-      var error = result.find(function(r) { return r.propertyName === 'c2' && r.filename === 'f2'; });
+      var error = result.issues.find(function(r) { return r.propertyName === 'c2' && r.filename === 'f2'; });
       expect(error.type).to.equal('ERROR');
       expect(error.messages).to.eql(['error2']);
     });
 
     it('should add ui warning', function () {
-      var error = result.find(function(r) { return r.propertyName === 'c3' && r.filename === 'f3'; });
+      var error = result.issues.find(function(r) { return r.propertyName === 'c3' && r.filename === 'f3'; });
       expect(error.type).to.equal('WARN');
       expect(error.messages).to.eql(['warn3']);
     });
 
     it('should add general issues', function () {
-      var error = result.findProperty('type', 'GENERAL');
+      var error = result.issues.findProperty('type', 'GENERAL');
       expect(error.messages).to.eql(['general issue']);
     });
 
     it('should ignore issues for hidden configs', function () {
-      var error = result.find(function(r) { return r.propertyName === 'c4' && r.filename === 'f4'; });
+      var error = result.issues.find(function(r) { return r.propertyName === 'c4' && r.filename === 'f4'; });
       expect(error).to.be.undefined;
     });
 
     it('should add issues for deleted properties', function () {
-      var error = result.find(function(r) { return r.id === 'c5_f5'; });
+      var error = result.issues.find(function(r) { return r.id === 'c5_f5'; });
       expect(error.messages).to.eql(['error5']);
     });
   });
@@ -150,6 +150,7 @@ describe('App.ServerValidatorMixin', function () {
     it('creates warn object', function() {
       expect(instanceObject.createErrorMessage('WARN', property, ['msg1'])).to.eql({
         type: 'WARN',
+        isCriticalError: false,
         isError: false,
         isWarn: true,
         isGeneral: false,
@@ -166,6 +167,7 @@ describe('App.ServerValidatorMixin', function () {
     it('creates error object', function() {
       expect(instanceObject.createErrorMessage('ERROR', $.extend({}, property, {serviceDisplayName: 'S Name'}), ['msg2'])).to.eql({
         type: 'ERROR',
+        isCriticalError: false,
         isError: true,
         isWarn: false,
         isGeneral: false,
@@ -182,6 +184,7 @@ describe('App.ServerValidatorMixin', function () {
     it('creates general issue object', function() {
       expect(instanceObject.createErrorMessage('GENERAL', null, ['msg3'])).to.eql({
         type: 'GENERAL',
+        isCriticalError: false,
         isError: false,
         isWarn: false,
         isGeneral: true,