You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jl...@apache.org on 2016/03/10 00:49:53 UTC

[01/51] [abbrv] ambari git commit: AMBARI-15305. Move Hive Server Interactive related files from HDP 2.4 stack to HDP 2.6 (Swapan Shridhar via alejandro) [Forced Update!]

Repository: ambari
Updated Branches:
  refs/heads/AMBARI-13364 ec88341d1 -> 7d862f588 (forced update)


http://git-wip-us.apache.org/repos/asf/ambari/blob/037d9338/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-site.xml
new file mode 100644
index 0000000..e77b379
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-site.xml
@@ -0,0 +1,2053 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+<configuration supports_final="true">
+
+  <property>
+    <name>hive.cbo.enable</name>
+    <value>true</value>
+    <description>Flag to control enabling Cost Based Optimizations using Calcite framework.</description>
+    <display-name>Enable Cost Based Optimizer</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>On</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>Off</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.zookeeper.quorum</name>
+    <value>localhost:2181</value>
+    <description>List of ZooKeeper servers to talk to. This is needed for: 1.
+      Read/write locks - when hive.lock.manager is set to
+      org.apache.hadoop.hive.ql.lockmgr.zookeeper.ZooKeeperHiveLockManager,
+      2. When HiveServer2 supports service discovery via Zookeeper.</description>
+    <value-attributes>
+      <type>multiLine</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.metastore.connect.retries</name>
+    <value>24</value>
+    <description>Number of retries while opening a connection to metastore</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.failure.retries</name>
+    <value>24</value>
+    <description>Number of retries upon failure of Thrift metastore calls</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.client.connect.retry.delay</name>
+    <value>5s</value>
+    <description>
+      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
+      Number of seconds for the client to wait between consecutive connection attempts
+    </description>
+  </property>
+
+ <property>
+    <name>hive.heapsize</name>
+    <value>1024</value>
+    <display-name>HiveServer2 heap size</display-name>
+    <deleted>true</deleted>
+    <description>Hive Java heap size</description>
+    <value-attributes>
+      <type>int</type>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>ambari.hive.db.schema.name</name>
+    <value>hive</value>
+    <display-name>Database Name</display-name>
+    <description>Database name used as the Hive Metastore</description>
+    <value-attributes>
+      <type>database</type>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>javax.jdo.option.ConnectionURL</name>
+    <value>jdbc:mysql://localhost/hive?createDatabaseIfNotExist=true</value>
+    <display-name>Database URL</display-name>
+    <description>JDBC connect string for a JDBC metastore</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>javax.jdo.option.ConnectionDriverName</name>
+    <value>com.mysql.jdbc.Driver</value>
+    <display-name>JDBC Driver Class</display-name>
+    <description>Driver class name for a JDBC metastore</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <name>hive_database</name>
+        <type>hive-env</type>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>javax.jdo.option.ConnectionUserName</name>
+    <value>hive</value>
+    <display-name>Database Username</display-name>
+    <description>username to use against metastore database</description>
+    <value-attributes>
+      <type>db_user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property require-input="true">
+    <name>javax.jdo.option.ConnectionPassword</name>
+    <value></value>
+    <property-type>PASSWORD</property-type>
+    <display-name>Database Password</display-name>
+    <description>password to use against metastore database</description>
+    <value-attributes>
+      <type>password</type>
+      <overridable>false</overridable>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>javax.jdo.option.ConnectionURL</name>
+    <value>jdbc:mysql://localhost/hive?createDatabaseIfNotExist=true</value>
+    <display-name>Database URL</display-name>
+    <description>JDBC connect string for a JDBC metastore</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <name>hive_database</name>
+        <type>hive-env</type>
+      </property>
+      <property>
+        <name>ambari.hive.db.schema.name</name>
+        <type>hive-site</type>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.metastore.server.max.threads</name>
+    <value>100000</value>
+    <description>Maximum number of worker threads in the Thrift server's pool.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.warehouse.dir</name>
+    <value>/apps/hive/warehouse</value>
+    <description>location of default database for the warehouse</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.sasl.enabled</name>
+    <value>false</value>
+    <description>If true, the metastore thrift interface will be secured with SASL.
+      Clients must authenticate with Kerberos.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.kerberos.keytab.file</name>
+    <value>/etc/security/keytabs/hive.service.keytab</value>
+    <description>The path to the Kerberos Keytab file containing the metastore Thrift server's service principal.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.kerberos.principal</name>
+    <value>hive/_HOST@EXAMPLE.COM</value>
+    <description>
+      The service principal for the metastore Thrift server.
+      The special string _HOST will be replaced automatically with the correct host name.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.cluster.delegation.token.store.zookeeper.znode</name>
+    <value>/hive/cluster/delegation</value>
+    <description>The root path for token store data.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.cache.pinobjtypes</name>
+    <value>Table,Database,Type,FieldSchema,Order</value>
+    <description>List of comma separated metastore object types that should be pinned in the cache</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.uris</name>
+    <value>thrift://localhost:9083</value>
+    <description>Thrift URI for the remote metastore. Used by metastore client to connect to remote metastore.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.pre.event.listeners</name>
+    <value>org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener</value>
+    <description>List of comma separated listeners for metastore events.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.authorization.storage.checks</name>
+    <value>false</value>
+    <description>
+      Should the metastore do authorization checks against the underlying storage (usually hdfs)
+      for operations like drop-partition (disallow the drop-partition if the user in
+      question doesn't have permissions to delete the corresponding directory
+      on the storage).
+    </description>
+  </property>
+
+  <property>
+    <name>datanucleus.autoCreateSchema</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <name>datanucleus.fixedDatastore</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>hive.metastore.client.socket.timeout</name>
+    <value>1800s</value>
+    <description>
+      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
+      MetaStore Client socket timeout in seconds
+    </description>
+  </property>
+
+  <property>
+    <name>hive.metastore.execute.setugi</name>
+    <value>true</value>
+    <description>In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.</description>
+  </property>
+
+  <property>
+    <name>hive.security.authorization.enabled</name>
+    <value>false</value>
+    <description>enable or disable the Hive client authorization</description>
+    <display-name>Enable Authorization</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_security_authorization</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.security.authorization.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory</value>
+    <description>
+      The Hive client authorization manager class name. The user defined authorization class should implement
+      interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.
+    </description>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_security_authorization</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.cluster.delegation.token.store.class</name>
+    <value>org.apache.hadoop.hive.thrift.ZooKeeperTokenStore</value>
+    <description>The delegation token store implementation.
+      Set to org.apache.hadoop.hive.thrift.ZooKeeperTokenStore for load-balanced cluster.</description>
+  </property>
+
+  <property>
+    <name>hive.cluster.delegation.token.store.zookeeper.connectString</name>
+    <value>localhost:2181</value>
+    <description>The ZooKeeper token store connect string.</description>
+  </property>
+
+  <property>
+    <name>hive.security.metastore.authorization.auth.reads</name>
+    <value>true</value>
+    <description>If this is true, metastore authorizer authorizes read actions on database, table</description>
+  </property>
+
+  <property>
+    <name>hive.server2.logging.operation.log.location</name>
+    <value>/tmp/hive/operation_logs</value>
+    <description>Top level directory where operation logs are stored if logging functionality is enabled</description>
+  </property>
+
+  <property>
+    <name>hive.server2.logging.operation.enabled</name>
+    <value>true</value>
+    <description>When true, HS2 will save operation logs</description>
+  </property>
+
+  <property>
+    <name>hive.security.metastore.authenticator.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator</value>
+    <description>
+      authenticator manager class name to be used in the metastore for authentication.
+      The user defined authenticator should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.security.metastore.authorization.manager</name>
+    <display-name>Hive Authorization Manager</display-name>
+    <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
+    <description>
+      authorization manager class name to be used in the metastore for authorization.
+      The user defined authorization class should implement interface
+      org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider.
+    </description>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_security_authorization</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.security.authenticator.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator</value>
+    <description>
+      hive client authenticator manager class name. The user defined authenticator should implement
+      interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.
+    </description>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_security_authorization</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.server2.enable.doAs</name>
+    <value>true</value>
+    <description>
+      Setting this property to true will have HiveServer2 execute
+      Hive operations as the user making the calls to it.
+    </description>
+    <display-name>Run as end user instead of Hive user</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_security_authorization</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.user.install.directory</name>
+    <value>/user/</value>
+    <description>
+      If hive (in tez mode only) cannot find a usable hive jar in "hive.jar.directory",
+      it will upload the hive jar to "hive.user.install.directory/user.name"
+      and use it to run queries.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.conf.restricted.list</name>
+    <value>hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role</value>
+    <description>Comma separated list of configuration options which are immutable at runtime</description>
+  </property>
+
+  <property>
+    <name>hive.server2.use.SSL</name>
+    <value>false</value>
+    <description/>
+    <display-name>Use SSL</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.server2.table.type.mapping</name>
+    <value>CLASSIC</value>
+    <description>
+      Expects one of [classic, hive].
+      This setting reflects how HiveServer2 will report the table types for JDBC and other
+      client implementations that retrieve the available tables and supported table types
+      HIVE : Exposes Hive's native table types like MANAGED_TABLE, EXTERNAL_TABLE, VIRTUAL_VIEW
+      CLASSIC : More generic types like TABLE and VIEW
+    </description>
+  </property>
+
+  <property>
+    <name>hive.server2.enable.impersonation</name>
+    <value>true</value>
+    <deleted>true</deleted>
+    <description>Enable user impersonation for HiveServer2</description>
+  </property>
+
+  <property>
+    <name>fs.hdfs.impl.disable.cache</name>
+    <value>true</value>
+    <description>Disable HDFS filesystem cache.</description>
+  </property>
+
+  <property>
+    <name>fs.file.impl.disable.cache</name>
+    <value>true</value>
+    <description>Disable local filesystem cache.</description>
+  </property>
+
+  <property>
+    <name>hive.exec.scratchdir</name>
+    <value>/tmp/hive</value>
+    <description>HDFS root scratch dir for Hive jobs which gets created with write all (733) permission. For each connecting user, an HDFS scratch dir: ${hive.exec.scratchdir}/&lt;username&gt; is created, with ${hive.scratch.dir.permission}.</description>
+  </property>
+
+  <property>
+    <name>hive.exec.submit.local.task.via.child</name>
+    <value>true</value>
+    <description>
+      Determines whether local tasks (typically mapjoin hashtable generation phase) runs in
+      separate JVM (true recommended) or not.
+      Avoids the overhead of spawning new JVM, but can lead to out-of-memory issues.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.exec.compress.intermediate</name>
+    <value>false</value>
+    <description>
+      This controls whether intermediate files produced by Hive between multiple map-reduce jobs are compressed.
+      The compression codec and other options are determined from Hadoop config variables mapred.output.compress*
+    </description>
+  </property>
+
+  <property>
+    <name>hive.exec.reducers.bytes.per.reducer</name>
+    <value>67108864</value>
+    <description>Defines the size per reducer. For example, if it is set to 64M, given 256M input size, 4 reducers will be used.</description>
+    <display-name>Data per Reducer</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>64</minimum>
+      <maximum>4294967296</maximum>
+      <unit>B</unit>
+      <step-increment></step-increment>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.exec.reducers.max</name>
+    <value>1009</value>
+    <description>
+      max number of reducers will be used. If the one specified in the configuration parameter mapred.reduce.tasks is
+      negative, Hive will use this one as the max number of reducers when automatically determine number of reducers.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.exec.compress.output</name>
+    <value>false</value>
+    <description>
+      This controls whether the final outputs of a query (to a local/HDFS file or a Hive table) is compressed.
+      The compression codec and other options are determined from Hadoop config variables mapred.output.compress*
+    </description>
+  </property>
+
+  <property>
+    <name>hive.exec.submitviachild</name>
+    <value>false</value>
+    <description/>
+  </property>
+
+  <property>
+    <name>hive.enforce.bucketing</name>
+    <value>true</value>
+    <description>Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced.</description>
+    <display-name>Enforce bucketing</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_txn_acid</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.enforce.sorting</name>
+    <value>true</value>
+    <description>Whether sorting is enforced. If true, while inserting into the table, sorting is enforced.</description>
+  </property>
+
+  <property>
+    <name>hive.enforce.sortmergebucketmapjoin</name>
+    <value>true</value>
+    <description>If the user asked for sort-merge bucketed map-side join, and it cannot be performed, should the query fail or not ?</description>
+  </property>
+
+  <property>
+    <name>hive.map.aggr</name>
+    <value>true</value>
+    <description>Whether to use map-side aggregation in Hive Group By queries</description>
+  </property>
+
+  <property>
+    <name>hive.mapjoin.optimized.hashtable</name>
+    <value>true</value>
+    <description>
+      Whether Hive should use memory-optimized hash table for MapJoin. Only works on Tez,
+      because memory-optimized hashtable cannot be serialized.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.smbjoin.cache.rows</name>
+    <value>10000</value>
+    <description>How many rows with the same key value should be cached in memory per smb joined table.</description>
+  </property>
+
+  <property>
+    <name>hive.map.aggr.hash.percentmemory</name>
+    <value>0.5</value>
+    <description>Portion of total memory to be used by map-side group aggregation hash table</description>
+  </property>
+
+  <property>
+    <name>hive.map.aggr.hash.force.flush.memory.threshold</name>
+    <value>0.9</value>
+    <description>
+      The max memory to be used by map-side group aggregation hash table.
+      If the memory usage is higher than this number, force to flush data
+    </description>
+  </property>
+
+  <property>
+    <name>hive.map.aggr.hash.min.reduction</name>
+    <value>0.5</value>
+    <description>
+      Hash aggregation will be turned off if the ratio between hash  table size and input rows is bigger than this number.
+      Set to 1 to make sure hash aggregation is never turned off.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.merge.mapfiles</name>
+    <value>true</value>
+    <description>Merge small files at the end of a map-only job</description>
+  </property>
+
+  <property>
+    <name>hive.merge.mapredfiles</name>
+    <value>false</value>
+    <description>Merge small files at the end of a map-reduce job</description>
+  </property>
+
+  <property>
+    <name>hive.merge.tezfiles</name>
+    <value>false</value>
+    <description>Merge small files at the end of a Tez DAG</description>
+  </property>
+
+  <property>
+    <name>hive.merge.size.per.task</name>
+    <value>256000000</value>
+    <description>Size of merged files at the end of the job</description>
+  </property>
+
+  <property>
+    <name>hive.merge.smallfiles.avgsize</name>
+    <value>16000000</value>
+    <description>
+      When the average output file size of a job is less than this number, Hive will start an additional
+      map-reduce job to merge the output files into bigger files. This is only done for map-only jobs
+      if hive.merge.mapfiles is true, and for map-reduce jobs if hive.merge.mapredfiles is true.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.merge.rcfile.block.level</name>
+    <value>true</value>
+    <description/>
+  </property>
+
+  <property>
+    <name>hive.merge.orcfile.stripe.level</name>
+    <value>true</value>
+    <description>
+      When hive.merge.mapfiles or hive.merge.mapredfiles is enabled while writing a
+      table with ORC file format, enabling this config will do stripe level fast merge
+      for small ORC files. Note that enabling this config will not honor padding tolerance
+      config (hive.exec.orc.block.padding.tolerance).
+    </description>
+  </property>
+
+  <property>
+    <name>hive.exec.orc.default.stripe.size</name>
+    <value>67108864</value>
+    <description>Define the default ORC stripe size</description>
+    <display-name>Default ORC Stripe Size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>8388608</minimum>
+      <maximum>268435456</maximum>
+      <unit>B</unit>
+      <increment-step>8388608</increment-step>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.optimize.bucketmapjoin</name>
+    <value>true</value>
+    <description>Whether to try bucket mapjoin</description>
+  </property>
+
+  <property>
+    <name>hive.optimize.bucketmapjoin.sortedmerge</name>
+    <value>false</value>
+    <description> If the tables being joined are sorted and bucketized on the join columns, and they have the same number
+      of buckets, a sort-merge join can be performed by setting this parameter as true.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.mapred.reduce.tasks.speculative.execution</name>
+    <value>false</value>
+    <description>Whether speculative execution for reducers should be turned on. </description>
+  </property>
+
+  <property>
+    <name>hive.exec.dynamic.partition</name>
+    <value>true</value>
+    <description>Whether or not to allow dynamic partitions in DML/DDL.</description>
+  </property>
+
+  <property>
+    <name>hive.exec.dynamic.partition.mode</name>
+    <value>nonstrict</value>
+    <description>
+      In strict mode, the user must specify at least one static partition
+      in case the user accidentally overwrites all partitions.
+      NonStrict allows all partitions of a table to be dynamic.
+    </description>
+    <display-name>Allow all partitions to be Dynamic</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>nonstrict</value>
+          <label>On</label>
+        </entry>
+        <entry>
+          <value>strict</value>
+          <label>Off</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_txn_acid</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.exec.max.dynamic.partitions.pernode</name>
+    <value>2000</value>
+    <description>Maximum number of dynamic partitions allowed to be created in each mapper/reducer node.</description>
+  </property>
+
+  <property>
+    <name>hive.exec.max.created.files</name>
+    <value>100000</value>
+    <description>Maximum number of HDFS files created by all mappers/reducers in a MapReduce job.</description>
+  </property>
+
+  <property>
+    <name>hive.exec.max.dynamic.partitions</name>
+    <value>5000</value>
+    <description>Maximum number of dynamic partitions allowed to be created in total.</description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.join</name>
+    <value>true</value>
+    <description>Whether Hive enables the optimization about converting common join into mapjoin based on the input file size</description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.sortmerge.join</name>
+    <value>true</value>
+    <description>Will the join be automatically converted to a sort-merge join, if the joined tables pass the criteria for sort-merge join.</description>
+  </property>
+
+  <property>
+    <name>hive.optimize.constant.propagation</name>
+    <value>true</value>
+    <description>Whether to enable constant propagation optimizer</description>
+  </property>
+
+  <property>
+    <name>hive.optimize.metadataonly</name>
+    <value>true</value>
+    <description/>
+  </property>
+
+  <property>
+    <name>hive.optimize.null.scan</name>
+    <value>true</value>
+    <description>Dont scan relations which are guaranteed to not generate any rows</description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.sortmerge.join.to.mapjoin</name>
+    <value>false</value>
+    <description>
+      If hive.auto.convert.sortmerge.join is set to true, and a join was converted to a sort-merge join,
+      this parameter decides whether each table should be tried as a big table, and effectively a map-join should be
+      tried. That would create a conditional task with n+1 children for a n-way join (1 child for each table as the
+      big table), and the backup task will be the sort-merge join. In some cases, a map-join would be faster than a
+      sort-merge join, if there is no advantage of having the output bucketed and sorted. For example, if a very big sorted
+      and bucketed table with few files (say 10 files) are being joined with a very small sorter and bucketed table
+      with few files (10 files), the sort-merge join will only use 10 mappers, and a simple map-only join might be faster
+      if the complete small table can fit in memory, and a map-join can be performed.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.sortmerge.join.noconditionaltask</name>
+    <value>true</value>
+    <deleted>true</deleted>
+    <description>Required to Enable the conversion of an SMB (Sort-Merge-Bucket) to a map-join SMB.</description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.join.noconditionaltask</name>
+    <value>true</value>
+    <description>
+      Whether Hive enables the optimization about converting common join into mapjoin based on the input file size.
+      If this parameter is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the
+      specified size, the join is directly converted to a mapjoin (there is no conditional task).
+    </description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.join.noconditionaltask.size</name>
+    <value>52428800</value>
+    <description>If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. However, if it
+      is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly
+      converted to a mapjoin(there is no conditional task).
+    </description>
+    <display-name>For Map Join, per Map memory threshold</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>8192</minimum>
+      <maximum>17179869184</maximum>
+      <unit>B</unit>
+      <step-increment></step-increment>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-site</type>
+        <name>hive.tez.container.size</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.optimize.reducededuplication.min.reducer</name>
+    <value>4</value>
+    <description>
+      Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS.
+      That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.
+      The optimization will be automatically disabled if number of reducers would be less than specified value.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.optimize.sort.dynamic.partition</name>
+    <value>false</value>
+    <description>
+      When enabled dynamic partitioning column will be globally sorted.
+      This way we can keep only one record writer open for each partition value
+      in the reducer thereby reducing the memory pressure on reducers.
+    </description>
+    <display-name>Sort Partitions Dynamically</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.optimize.mapjoin.mapreduce</name>
+    <value>true</value>
+    <deleted>true</deleted>
+    <description>If hive.auto.convert.join is off, this parameter does not take
+      affect. If it is on, and if there are map-join jobs followed by a map-reduce
+      job (for e.g a group by), each map-only job is merged with the following
+      map-reduce job.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.mapjoin.bucket.cache.size</name>
+    <value>10000</value>
+    <description>
+      Size per reducer.The default is 1G, i.e if the input size is 10G, it
+      will use 10 reducers.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.vectorized.execution.enabled</name>
+    <value>true</value>
+    <description>
+      This flag should be set to true to enable vectorized mode of query execution.
+      The default value is false.
+    </description>
+    <display-name>Enable Vectorization and Map Vectorization</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.optimize.reducededuplication</name>
+    <value>true</value>
+    <description>
+      Remove extra map-reduce jobs if the data is already clustered by the same key which needs to be used again.
+      This should always be set to true. Since it is a new feature, it has been made configurable.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.optimize.index.filter</name>
+    <value>true</value>
+    <description>Whether to enable automatic use of indexes</description>
+    <display-name>Push Filters to Storage</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.execution.engine</name>
+    <value>tez</value>
+    <description>
+      Expects one of [mr, tez].
+      Chooses execution engine. Options are: mr (Map reduce, default) or tez (hadoop 2 only)
+    </description>
+    <display-name>Execution Engine</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>mr</value>
+          <label>MapReduce</label>
+        </entry>
+        <entry>
+          <value>tez</value>
+          <label>TEZ</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.exec.post.hooks</name>
+    <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
+    <description>
+      Comma-separated list of post-execution hooks to be invoked for each statement.
+      A post-execution hook is specified as the name of a Java class which implements the
+      org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.
+    </description>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_timeline_logging_enabled</name>
+      </property>
+      <property>
+        <type>application-properties</type>
+        <name>atlas.server.http.port</name>
+      </property>
+      <property>
+        <type>application-properties</type>
+        <name>atlas.server.https.port</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.exec.pre.hooks</name>
+    <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
+    <description>
+      Comma-separated list of pre-execution hooks to be invoked for each statement.
+      A pre-execution hook is specified as the name of a Java class which implements the
+      org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.
+    </description>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_timeline_logging_enabled</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.exec.failure.hooks</name>
+    <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
+    <description>
+      Comma-separated list of on-failure hooks to be invoked for each statement.
+      An on-failure hook is specified as the name of Java class which implements the
+      org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.
+    </description>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_timeline_logging_enabled</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.exec.parallel</name>
+    <value>false</value>
+    <description>Whether to execute jobs in parallel</description>
+  </property>
+
+  <property>
+    <name>hive.exec.parallel.thread.number</name>
+    <value>8</value>
+    <description>How many jobs at most can be executed in parallel</description>
+  </property>
+
+  <property>
+    <name>hive.vectorized.groupby.maxentries</name>
+    <value>100000</value>
+    <description>
+      Max number of entries in the vector group by aggregation hashtables.
+      Exceeding this will trigger a flush irrelevant of memory pressure condition.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.tez.smb.number.waves</name>
+    <value>0.5</value>
+    <description>The number of waves in which to run the SMB join. Account for cluster being occupied. Ideally should be 1 wave.</description>
+  </property>
+
+  <property>
+    <name>hive.tez.dynamic.partition.pruning.max.data.size</name>
+    <value>104857600</value>
+    <description>Maximum total data size of events in dynamic pruning.</description>
+  </property>
+
+  <property>
+    <name>hive.tez.dynamic.partition.pruning.max.event.size</name>
+    <value>1048576</value>
+    <description>Maximum size of events sent by processors in dynamic pruning. If this size is crossed no pruning will take place.</description>
+  </property>
+
+  <property>
+    <name>hive.tez.dynamic.partition.pruning</name>
+    <value>true</value>
+    <description>When dynamic pruning is enabled, joins on partition keys will be processed by sending events from the processing vertices to the tez application master. These events will be used to prune unnecessary partitions.</description>
+    <display-name>Allow dynamic partition pruning</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.tez.min.partition.factor</name>
+    <value>0.25</value>
+    <description>
+      When auto reducer parallelism is enabled this factor will be used to put a lower limit to the number
+      of reducers that tez specifies.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.tez.max.partition.factor</name>
+    <value>2.0</value>
+    <description>When auto reducer parallelism is enabled this factor will be used to over-partition data in shuffle edges.</description>
+  </property>
+
+  <property>
+    <name>hive.tez.auto.reducer.parallelism</name>
+    <value>false</value>
+    <description>
+      Turn on Tez' auto reducer parallelism feature. When enabled, Hive will still estimate data sizes
+      and set parallelism estimates. Tez will sample source vertices' output sizes and adjust the estimates at runtime as
+      necessary.
+    </description>
+    <display-name>Allow dynamic numbers of reducers</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.convert.join.bucket.mapjoin.tez</name>
+    <value>false</value>
+    <description>
+      Whether joins can be automatically converted to bucket map joins in hive
+      when tez is used as the execution engine.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.prewarm.numcontainers</name>
+    <value>3</value>
+    <description>Controls the number of containers to prewarm for Tez (Hadoop 2 only)</description>
+    <display-name>Number of Containers Held</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>1</minimum>
+      <maximum>20</maximum>
+      <increment-step>1</increment-step>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.prewarm.enabled</name>
+    <value>false</value>
+    <description>Enables container prewarm for Tez (Hadoop 2 only)</description>
+    <display-name>Hold Containers to Reduce Latency</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.vectorized.groupby.checkinterval</name>
+    <value>4096</value>
+    <description>Number of entries added to the group by aggregation hash before a recomputation of average entry size is performed.</description>
+  </property>
+
+  <property>
+    <name>hive.vectorized.groupby.flush.percent</name>
+    <value>0.1</value>
+    <description>Percent of entries in the group by aggregation hash flushed when the memory threshold is exceeded.</description>
+  </property>
+
+  <property>
+    <name>hive.stats.autogather</name>
+    <value>true</value>
+    <description>A flag to gather statistics automatically during the INSERT OVERWRITE command.</description>
+  </property>
+
+  <property>
+    <name>hive.stats.dbclass</name>
+    <value>fs</value>
+    <description>
+      Expects one of the pattern in [jdbc(:.*), hbase, counter, custom, fs].
+      The storage that stores temporary Hive statistics. Currently, jdbc, hbase, counter and custom type are supported.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.stats.fetch.partition.stats</name>
+    <value>true</value>
+    <description>
+      Annotation of operator tree with statistics information requires partition level basic
+      statistics like number of rows, data size and file size. Partition statistics are fetched from
+      metastore. Fetching partition statistics for each needed partition can be expensive when the
+      number of partitions is high. This flag can be used to disable fetching of partition statistics
+      from metastore. When this flag is disabled, Hive will make calls to filesystem to get file sizes
+      and will estimate the number of rows from row schema.
+    </description>
+    <display-name>Fetch partition stats at compiler</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>On</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>Off</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-site</type>
+        <name>hive.cbo.enable</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.zookeeper.client.port</name>
+    <value>2181</value>
+    <description>The port of ZooKeeper servers to talk to. If the list of Zookeeper servers specified in hive.zookeeper.quorum,does not contain port numbers, this value is used.</description>
+  </property>
+
+  <property>
+    <name>hive.zookeeper.namespace</name>
+    <value>hive_zookeeper_namespace</value>
+    <description>The parent node under which all ZooKeeper nodes are created.</description>
+  </property>
+
+  <property>
+    <name>hive.stats.fetch.column.stats</name>
+    <value>false</value>
+    <description>
+      Annotation of operator tree with statistics information requires column statistics.
+      Column statistics are fetched from metastore. Fetching column statistics for each needed column
+      can be expensive when the number of columns is high. This flag can be used to disable fetching
+      of column statistics from metastore.
+    </description>
+    <display-name>Fetch column stats at compiler</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>On</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>Off</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+       <property>
+        <type>hive-site</type>
+        <name>hive.cbo.enable</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.tez.container.size</name>
+    <value>682</value>
+    <description>By default, Tez uses the java options from map tasks. Use this property to override that value.</description>
+    <display-name>Tez Container Size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>682</minimum>
+      <maximum>6820</maximum>
+      <unit>MB</unit>
+      <increment-step>682</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.scheduler.minimum-allocation-mb</name>
+      </property>
+      <property>
+        <type>yarn-site</type>
+        <name>yarn.scheduler.maximum-allocation-mb</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.tez.input.format</name>
+    <value>org.apache.hadoop.hive.ql.io.HiveInputFormat</value>
+    <description>The default input format for Tez. Tez groups splits in the Application Master.</description>
+  </property>
+
+  <property>
+    <name>hive.tez.java.opts</name>
+    <value>-server -Xmx545m -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC -XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps</value>
+    <description>Java command line options for Tez. The -Xmx parameter value is generally 80% of hive.tez.container.size.</description>
+  </property>
+
+  <property>
+    <name>hive.compute.query.using.stats</name>
+    <value>true</value>
+    <description>
+      When set to true Hive will answer a few queries like count(1) purely using stats
+      stored in metastore. For basic stats collection turn on the config hive.stats.autogather to true.
+      For more advanced stats collection need to run analyze table queries.
+    </description>
+    <display-name>Compute simple queries using stats only</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-site</type>
+        <name>hive.cbo.enable</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.exec.orc.default.compress</name>
+    <value>ZLIB</value>
+    <description>Define the default compression codec for ORC file</description>
+    <display-name>ORC Compression Algorithm</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>ZLIB</value>
+          <label>zlib Compression Library</label>
+        </entry>
+        <entry>
+          <value>SNAPPY</value>
+          <label>Snappy Compression Library</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.orc.splits.include.file.footer</name>
+    <value>false</value>
+    <description>
+      If turned on splits generated by orc will include metadata about the stripes in the file. This
+      data is read remotely (from the client or HS2 machine) and sent to all the tasks.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.orc.compute.splits.num.threads</name>
+    <value>10</value>
+    <description>How many threads orc should use to create splits in parallel.</description>
+  </property>
+
+  <property>
+    <name>hive.limit.optimize.enable</name>
+    <value>true</value>
+    <description>Whether to enable to optimization to trying a smaller subset of data for simple LIMIT first.</description>
+  </property>
+
+  <property>
+    <name>hive.tez.cpu.vcores</name>
+    <value>-1</value>
+    <description>By default Tez will ask for however many cpus map-reduce is configured to use per container. This can be used to overwrite.</description>
+  </property>
+
+  <property>
+    <name>hive.tez.log.level</name>
+    <value>INFO</value>
+    <description>
+      The log level to use for tasks executing as part of the DAG.
+      Used only if hive.tez.java.opts is used to configure Java options.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.limit.pushdown.memory.usage</name>
+    <value>0.04</value>
+    <description>The max memory to be used for hash in RS operator for top K selection.</description>
+  </property>
+
+  <property>
+    <name>hive.exec.orc.encoding.strategy</name>
+    <value>SPEED</value>
+    <description>
+      Define the encoding strategy to use while writing data. Changing this
+      will only affect the light weight encoding for integers. This flag will not change
+      the compression level of higher level compression codec (like ZLIB). Possible
+      options are SPEED and COMPRESSION.
+    </description>
+    <display-name>ORC Encoding Strategy</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>SPEED</value>
+          <label>Speed</label>
+        </entry>
+        <entry>
+          <value>COMPRESSION</value>
+          <label>Compression</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_exec_orc_storage_strategy</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.exec.orc.compression.strategy</name>
+    <value>SPEED</value>
+    <description>
+      Define the compression strategy to use while writing data. This changes the
+      compression level of higher level compression codec (like ZLIB).
+    </description>
+    <display-name>ORC Compression Strategy</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>SPEED</value>
+          <label>Speed</label>
+        </entry>
+        <entry>
+          <value>COMPRESSION</value>
+          <label>Compression</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_exec_orc_storage_strategy</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.vectorized.execution.reduce.enabled</name>
+    <value>false</value>
+    <description>
+      This flag should be set to true to enable vectorized mode of the reduce-side of
+      query execution.
+    </description>
+    <display-name>Enable Reduce Vectorization</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.server2.authentication.ldap.baseDN</name>
+    <property-type>DONT_ADD_ON_UPGRADE</property-type>
+    <depends-on>
+      <property>
+        <type>hive-site</type>
+        <name>hive.server2.authentication</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.server2.authentication.kerberos.principal</name>
+    <value>hive/_HOST@EXAMPLE.COM</value>
+    <property-type>DONT_ADD_ON_UPGRADE</property-type>
+    <depends-on>
+      <property>
+        <type>hive-site</type>
+        <name>hive.server2.authentication</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.server2.custom.authentication.class</name>
+    <property-type>DONT_ADD_ON_UPGRADE</property-type>
+    <depends-on>
+      <property>
+        <type>hive-site</type>
+        <name>hive.server2.authentication</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.server2.authentication.kerberos.keytab</name>
+    <value>/etc/security/keytabs/hive.service.keytab</value>
+    <property-type>DONT_ADD_ON_UPGRADE</property-type>
+    <depends-on>
+      <property>
+        <type>hive-site</type>
+        <name>hive.server2.authentication</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.server2.authentication.ldap.url</name>
+    <value> </value>
+    <property-type>DONT_ADD_ON_UPGRADE</property-type>
+    <depends-on>
+      <property>
+        <type>hive-site</type>
+        <name>hive.server2.authentication</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.server2.tez.default.queues</name>
+    <display-name>Default query queues</display-name>
+    <value>default</value>
+    <description>
+      A list of comma separated values corresponding to YARN queues of the same name.
+      When HiveServer2 is launched in Tez mode, this configuration needs to be set
+      for multiple Tez sessions to run in parallel on the cluster.
+    </description>
+    <value-attributes>
+      <type>combo</type>
+      <entries>
+        <entry>
+          <value>default</value>
+          <label>Default</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1+</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>capacity-scheduler</type>
+        <name>yarn.scheduler.capacity.root.queues</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.server2.tez.sessions.per.default.queue</name>
+    <value>1</value>
+    <description>
+      A positive integer that determines the number of Tez sessions that should be
+      launched on each of the queues specified by "hive.server2.tez.default.queues".
+      Determines the parallelism on each queue.
+    </description>
+    <display-name>Session per queue</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>1</minimum>
+      <maximum>10</maximum>
+      <increment-step>1</increment-step>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.server2.tez.initialize.default.sessions</name>
+    <value>false</value>
+    <description>
+      This flag is used in HiveServer2 to enable a user to use HiveServer2 without
+      turning on Tez for HiveServer2. The user could potentially want to run queries
+      over Tez without the pool of sessions.
+    </description>
+    <display-name>Start Tez session at Initialization</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.txn.manager</name>
+    <value>org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager</value>
+    <description/>
+    <display-name>Transaction Manager</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager</value>
+          <label>org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager (off)</label>
+        </entry>
+        <entry>
+          <value>org.apache.hadoop.hive.ql.lockmgr.DbTxnManager</value>
+          <label>org.apache.hadoop.hive.ql.lockmgr.DbTxnManager (on)</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_txn_acid</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.txn.timeout</name>
+    <value>300</value>
+    <description>Time after which transactions are declared aborted if the client has not sent a heartbeat, in seconds.</description>
+  </property>
+
+  <property>
+    <name>hive.txn.max.open.batch</name>
+    <value>1000</value>
+    <description>
+      Maximum number of transactions that can be fetched in one call to open_txns().
+      Increasing this will decrease the number of delta files created when
+      streaming data into Hive.  But it will also increase the number of
+      open transactions at any given time, possibly impacting read performance.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.cli.print.header</name>
+    <value>false</value>
+    <description>
+      Whether to print the names of the columns in query output.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.support.concurrency</name>
+    <value>false</value>
+    <description>
+      Support concurrency and use locks, needed for Transactions. Requires Zookeeper.
+    </description>
+    <display-name>Use Locking</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_txn_acid</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.compactor.initiator.on</name>
+    <value>false</value>
+    <description>Whether to run the compactor's initiator thread in this metastore instance or not. If there is more than one instance of the thrift metastore this should be set to true on only one instance. Setting true on only one host can be achieved by creating a config-group containing the metastore host, and overriding the default value to true in it.</description>
+    <display-name>Run Compactor</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>True</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>False</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_txn_acid</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.compactor.worker.threads</name>
+    <value>0</value>
+    <description>Number of compactor worker threads to run on this metastore instance. Can be different values on different metastore instances.</description>
+    <display-name>Number of threads used by Compactor</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>20</maximum>
+      <increment-step>1</increment-step>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_txn_acid</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.compactor.worker.timeout</name>
+    <value>86400L</value>
+    <description>
+      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
+      Time before a given compaction in working state is declared a failure
+      and returned to the initiated state.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.compactor.check.interval</name>
+    <value>300L</value>
+    <description>
+      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
+      Time between checks to see if any partitions need compacted.
+      This should be kept high because each check for compaction requires many calls against the NameNode.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.fetch.task.conversion</name>
+    <value>more</value>
+    <description>
+      Expects one of [none, minimal, more].
+      Some select queries can be converted to single FETCH task minimizing latency.
+      Currently the query should be single sourced not having any subquery and should not have
+      any aggregations or distincts (which incurs RS), lateral views and joins.
+      0. none : disable hive.fetch.task.conversion
+      1. minimal : SELECT STAR, FILTER on partition columns, LIMIT only
+      2. more    : SELECT, FILTER, LIMIT only (support TABLESAMPLE and virtual columns)
+    </description>
+  </property>
+
+  <property>
+    <name>hive.fetch.task.aggr</name>
+    <value>false</value>
+    <description>
+      Aggregation queries with no group-by clause (for example, select count(*) from src) execute
+      final aggregations in single reduce task. If this is set true, Hive delegates final aggregation
+      stage to fetch task, possibly decreasing the query time.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.fetch.task.conversion.threshold</name>
+    <value>1073741824</value>
+    <description>
+      Input threshold for applying hive.fetch.task.conversion. If target table is native, input length
+      is calculated by summation of file lengths. If it's not native, storage handler for the table
+      can optionally implement org.apache.hadoop.hive.ql.metadata.InputEstimator interface.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.compactor.delta.num.threshold</name>
+    <value>10</value>
+    <description>Number of delta files that must exist in a directory before the compactor will attempt a minor compaction.</description>
+  </property>
+
+  <property>
+    <name>hive.compactor.delta.pct.threshold</name>
+    <value>0.1f</value>
+    <description>Percentage (by size) of base that deltas can be before major compaction is initiated.</description>
+  </property>
+
+  <property>
+    <name>hive.compactor.abortedtxn.threshold</name>
+    <value>1000</value>
+    <description>Number of aborted transactions involving a particular table or partition before major compaction is initiated.</description>
+  </property>
+
+  <property>
+    <name>datanucleus.cache.level2.type</name>
+    <value>none</value>
+    <description>Determines caching mechanism DataNucleus L2 cache will use. It is strongly recommended to use default value of 'none' as other values may cause consistency errors in Hive.</description>
+  </property>
+
+  <property>
+    <name>hive.server2.thrift.port</name>
+    <value>10500</value>
+    <display-name>HiveServer2 Port</display-name>
+    <description>
+      TCP port number to listen on, default 10000.
+    </description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>int</type>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.server2.allow.user.substitution</name>
+    <value>true</value>
+    <description>Allow alternate user to be specified as part of HiveServer2 open connection request.</description>
+  </property>
+
+  <property>
+    <name>hive.server2.thrift.max.worker.threads</name>
+    <value>500</value>
+    <description>Maximum number of Thrift worker threads</description>
+  </property>
+
+  <property>
+    <name>hive.server2.thrift.sasl.qop</name>
+    <value>auth</value>
+    <description>
+      Expects one of [auth, auth-int, auth-conf].
+      Sasl QOP value; Set it to one of following values to enable higher levels of
+      protection for HiveServer2 communication with clients.
+      "auth" - authentication only (default)
+      "auth-int" - authentication plus integrity protection
+      "auth-conf" - authentication plus integrity and confidentiality protection
+      This is applicable only if HiveServer2 is configured to use Kerberos authentication.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.server2.authentication.spnego.principal</name>
+    <value>/etc/security/keytabs/spnego.service.keytab</value>
+    <description>
+      SPNego service principal, optional,
+      typical value would look like HTTP/_HOST@EXAMPLE.COM
+      SPNego service principal would be used by HiveServer2 when Kerberos security is enabled
+      and HTTP transport mode is used.
+      This needs to be set only if SPNEGO is to be used in authentication.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.server2.authentication.spnego.keytab</name>
+    <value>HTTP/_HOST@EXAMPLE.COM</value>
+    <description>
+      keytab file for SPNego principal, optional,
+      typical value would look like /etc/security/keytabs/spnego.service.keytab,
+      This keytab would be used by HiveServer2 when Kerberos security is enabled and
+      HTTP transport mode is used.
+      This needs to be set only if SPNEGO is to be used in authentication.
+      SPNego authentication would be honored only if valid
+      hive.server2.authentication.spnego.principal
+      and
+      hive.server2.authentication.spnego.keytab
+      are specified.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.server2.authentication</name>
+    <description>Authentication mode, default NONE. Options are NONE, NOSASL, KERBEROS, LDAP, PAM and CUSTOM</description>
+    <value>NONE</value>
+    <display-name>HiveServer2 Authentication</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>NONE</value>
+          <label>None</label>
+        </entry>
+        <entry>
+          <value>LDAP</value>
+          <label>LDAP</label>
+        </entry>
+        <entry>
+          <value>KERBEROS</value>
+          <label>Kerberos</label>
+        </entry>
+        <entry>
+          <value>PAM</value>
+          <label>PAM</label>
+        </entry>
+        <entry>
+          <value>CUSTOM</value>
+          <label>Custom</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.server2.support.dynamic.service.discovery</name>
+    <value>true</value>
+    <description>Whether HiveServer2 supports dynamic service discovery for its clients.
+      To support this, each instance of HiveServer2 currently uses ZooKeeper to register itself,
+      when it is brought up. JDBC/ODBC clients should use the ZooKeeper ensemble: hive.zookeeper.quorum
+      in their connection string.
+    </description>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>hive.server2.zookeeper.namespace</name>
+    <value>hiveserver2</value>
+    <description>The parent node in ZooKeeper used by HiveServer2 when supporting dynamic service discovery.</description>
+  </property>
+
+  <property>
+    <name>hive.server2.thrift.http.port</name>
+    <value>10501</value>
+    <description>Port number of HiveServer2 Thrift interface when hive.server2.transport.mode is 'http'.</description>
+  </property>
+
+  <property>
+    <name>hive.server2.transport.mode</name>
+    <value>binary</value>
+    <description>
+      Expects one of [binary, http].
+      Transport mode of HiveServer2.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.default.fileformat</name>
+    <value>TextFile</value>
+    <description>Default file format for CREATE TABLE statement.</description>
+    <display-name>Default File Format</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>ORC</value>
+          <description>The Optimized Row Columnar (ORC) file format provides a highly efficient way to store Hive data. It was designed to overcome limitations of the other Hive file formats. Using ORC files improves performance when Hive is reading, writing, and processing data.</description>
+        </entry>
+        <entry>
+          <value>TextFile</value>
+          <description>Text file format saves Hive data as normal text.</description>
+        </entry>
+      </entries>
+    </value-attributes>
+  </property>
+
+  <property>
+    <name>atlas.cluster.name</name>
+    <value>primary</value>
+    <depends-on>
+      <property>
+        <type>application-properties</type>
+        <name>atlas.enableTLS</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>atlas.rest.address</name>
+    <value>http://localhost:21000</value>
+    <depends-on>
+      <property>
+        <type>application-properties</type>
+        <name>atlas.enableTLS</name>
+      </property>
+      <property>
+        <type>application-properties</type>
+        <name>atlas.server.http.port</name>
+      </property>
+      <property>
+        <type>application-properties</type>
+        <name>atlas.server.https.port</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>hive.default.fileformat.managed</name>
+    <value>TextFile</value>
+    <description>
+      Default file format for CREATE TABLE statement applied to managed tables only.
+      External tables will be created with default file format. Leaving this null
+      will result in using the default file format for all tables.
+    </description>
+  </property>
+
+  <property>
+    <name>datanucleus.rdbms.datastoreAdapterClassName</name>
+    <description>Datanucleus Class, This property used only when hive db is SQL Anywhere</description>
+    <depends-on>
+      <property>
+        <type>hive-env</type>
+        <name>hive_database</name>
+      </property>
+    </depends-on>
+  </property>
+
+  <property>
+    <name>atlas.hook.hive.maxThreads</name>
+    <value>1</value>
+    <description>
+      Maximum number of threads used by Atlas hook.
+    </description>
+  </property>
+
+  <property>
+    <name>atlas.hook.hive.minThreads</name>
+    <value>1</value>
+    <description>
+      Minimum number of threads maintained by Atlas hook.
+    </description>
+  </property>
+
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/037d9338/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/llap-daemon-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/llap-daemon-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/llap-daemon-log4j.xml
new file mode 100644
index 0000000..1c60285
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/llap-daemon-log4j.xml
@@ -0,0 +1,126 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false" supports_adding_forbidden="true">
+  <property>
+  <name>content</name>
+  <description>Custom log4j.properties</description>
+  <value>
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+status = WARN
+name = LlapDaemonLog4j2
+packages = org.apache.hadoop.hive.ql.log
+
+# list of properties
+property.llap.daemon.log.level = WARN
+property.llap.daemon.root.logger = console
+property.llap.daemon.log.dir = .
+property.llap.daemon.log.file = llapdaemon.log
+property.llap.daemon.historylog.file = llapdaemon_history.log
+property.llap.daemon.log.maxfilesize = 256MB
+property.llap.daemon.log.maxbackupindex = 20
+
+# list of all appenders
+appenders = console, RFA, HISTORYAPPENDER
+
+# console appender
+appender.console.type = Console
+appender.console.name = console
+appender.console.target = SYSTEM_ERR
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = %d{yy/MM/dd HH:mm:ss} [%t%x] %p %c{2} : %m%n
+
+# rolling file appender
+appender.RFA.type = RollingFile
+appender.RFA.name = RFA
+appender.RFA.fileName = ${sys:llap.daemon.log.dir}/${sys:llap.daemon.log.file}
+appender.RFA.filePattern = ${sys:llap.daemon.log.dir}/${sys:llap.daemon.log.file}_%i
+appender.RFA.layout.type = PatternLayout
+appender.RFA.layout.pattern = %d{ISO8601} %-5p [%t%x]: %c{2} (%F:%M(%L)) - %m%n
+appender.RFA.policies.type = Policies
+appender.RFA.policies.size.type = SizeBasedTriggeringPolicy
+appender.RFA.policies.size.size = ${sys:llap.daemon.log.maxfilesize}
+appender.RFA.strategy.type = DefaultRolloverStrategy
+appender.RFA.strategy.max = ${sys:llap.daemon.log.maxbackupindex}
+
+# history file appender
+appender.HISTORYAPPENDER.type = RollingFile
+appender.HISTORYAPPENDER.name = HISTORYAPPENDER
+appender.HISTORYAPPENDER.fileName = ${sys:llap.daemon.log.dir}/${sys:llap.daemon.historylog.file}
+appender.HISTORYAPPENDER.filePattern = ${sys:llap.daemon.log.dir}/${sys:llap.daemon.historylog.file}_%i
+appender.HISTORYAPPENDER.layout.type = PatternLayout
+appender.HISTORYAPPENDER.layout.pattern = %m%n
+appender.HISTORYAPPENDER.policies.type = Policies
+appender.HISTORYAPPENDER.policies.size.type = SizeBasedTriggeringPolicy
+appender.HISTORYAPPENDER.policies.size.size = ${sys:llap.daemon.log.maxfilesize}
+appender.HISTORYAPPENDER.strategy.type = DefaultRolloverStrategy
+appender.HISTORYAPPENDER.strategy.max = ${sys:llap.daemon.log.maxbackupindex}
+
+# list of all loggers
+loggers = NIOServerCnxn, ClientCnxnSocketNIO, DataNucleus, Datastore, JPOX, HistoryLogger
+
+logger.NIOServerCnxn.name = org.apache.zookeeper.server.NIOServerCnxn
+logger.NIOServerCnxn.level = WARN
+
+logger.ClientCnxnSocketNIO.name = org.apache.zookeeper.ClientCnxnSocketNIO
+logger.ClientCnxnSocketNIO.level = WARN
+
+logger.DataNucleus.name = DataNucleus
+logger.DataNucleus.level = ERROR
+
+logger.Datastore.name = Datastore
+logger.Datastore.level = ERROR
+
+logger.JPOX.name = JPOX
+logger.JPOX.level = ERROR
+
+logger.HistoryLogger.name = org.apache.hadoop.hive.llap.daemon.HistoryLogger
+logger.HistoryLogger.level = WARN
+logger.HistoryLogger.additivity = false
+logger.HistoryLogger.appenderRefs = HistoryAppender
+logger.HistoryLogger.appenderRef.HistoryAppender.ref = HISTORYAPPENDER
+
+# root logger
+rootLogger.level = ${sys:llap.daemon.log.level}
+rootLogger.appenderRefs = root
+rootLogger.appenderRef.root.ref = ${sys:llap.daemon.root.logger}
+  </value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/037d9338/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/llap-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/llap-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/llap-env.xml
new file mode 100644
index 0000000..24a95cf
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/llap-env.xml
@@ -0,0 +1,72 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+<configuration supports_final="false" supports_adding_forbidden="true">
+  <property>
+    <name>enable_hive_interactive</name>
+    <value>false</value>
+    <description>Enable or disable interactive query in this cluster.</description>
+    <display-name>Enable Interactive Query</display-name>
+    <value-attributes>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>true</value>
+          <label>Yes</label>
+        </entry>
+        <entry>
+          <value>false</value>
+          <label>No</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+  <property>
+    <name>llap_queue_name</name>
+    <value>default</value>
+    <description>LLAP Queue Name.</description>
+    <display-name>LLAP Queue Name</display-name>
+    <value-attributes>
+      <type>combo</type>
+      <entries>
+        <entry>
+          <value>default</value>
+          <label>Default</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+  <property>
+    <name>llap_am_queue_name</name>
+    <value>default</value>
+    <description>LLAP AM Queue Name.</description>
+    <display-name>LLAP AM Queue Name</display-name>
+    <value-attributes>
+      <type>combo</type>
+      <entries>
+        <entry>
+          <value>default</value>
+          <label>Default</label>
+        </entry>
+      </entries>
+      <selection-cardinality>1</selection-cardinality>
+    </value-attributes>
+  </property>
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/037d9338/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/metainfo.xml
index 54c92d8..459845b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/metainfo.xml
@@ -21,6 +21,97 @@
     <service>
       <name>HIVE</name>
       <version>1.2.1.2.6</version>
+      <components>
+        <component>
+          <name>HIVE_SERVER_INTERACTIVE</name>
+          <displayName>HiveServer2 Interactive</displayName>
+          <category>MASTER</category>
+          <cardinality>0-1</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <clientsToUpdateConfigs></clientsToUpdateConfigs>
+          <dependencies>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+              <scope>cluster</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+                <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>YARN/YARN_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+                <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+                <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+                <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>TEZ/TEZ_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+                <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>PIG/PIG</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+                <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>SLIDER/SLIDER</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+                <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/hive_server_interactive.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <configuration-dependencies>
+            <config-type>hive-site</config-type>
+            <config-type>hive-interactive-site</config-type>
+          </configuration-dependencies>
+        </component>
+      </components>
+      <themes>
+        <theme>
+          <fileName>theme.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
+
+      <requiredServices>
+        <service>ZOOKEEPER</service>
+        <service>HDFS</service>
+        <service>YARN</service>
+        <service>TEZ</service>
+        <service>PIG</service>
+        <service>SLIDER</service>
+      </requiredServices>
     </service>
   </services>
-</metainfo>
+</metainfo>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/037d9338/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/themes/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/themes/theme.json b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/themes/theme.json
new file mode 100644
index 0000000..26b9532
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/themes/theme.json
@@ -0,0 +1,76 @@
+{
+  "configuration" : {
+    "placement" : {
+      "configs" : [
+        {
+          "config": "llap-env/enable_hive_interactive",
+          "subsection-name": "interactive-query-row1-col1"
+        },
+        {
+          "config": "llap-env/llap_queue_name",
+          "subsection-name": "interactive-query-row1-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "llap-env/enable_hive_interactive"
+              ],
+              "if": "${llap-env/enable_hive_interactive}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        },
+        {
+          "config": "llap-env/llap_am_queue_name",
+          "subsection-name": "interactive-query-row1-col1",
+          "depends-on": [
+            {
+              "configs":[
+                "llap-env/enable_hive_interactive"
+              ],
+              "if": "${llap-env/enable_hive_interactive}",
+              "then": {
+                "property_value_attributes": {
+                  "visible": true
+                }
+              },
+              "else": {
+                "property_value_attributes": {
+                  "visible": false
+                }
+              }
+            }
+          ]
+        }
+      ]
+    },
+    "widgets" : [
+      {
+        "config": "llap-env/enable_hive_interactive",
+        "widget": {
+          "type": "toggle"
+        }
+      },
+      {
+        "config": "llap-env/llap_queue_name",
+        "widget": {
+          "type": "list"
+        }
+      },
+      {
+        "config": "llap-env/llap_am_queue_name",
+        "widget": {
+          "type": "list"
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file


[17/51] [abbrv] ambari git commit: AMBARI-15329: Code Cleanup: Remove hdp hardcodings in functions, variables etc. (jluniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_drpc_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_drpc_server.py b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_drpc_server.py
index e8a3135..fba7b9c 100644
--- a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_drpc_server.py
+++ b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_drpc_server.py
@@ -32,7 +32,7 @@ class TestStormDrpcServer(TestStormBase):
                        classname = "DrpcServer",
                        command = "configure",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -45,7 +45,7 @@ class TestStormDrpcServer(TestStormBase):
                        classname = "DrpcServer",
                        command = "start",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -74,7 +74,7 @@ class TestStormDrpcServer(TestStormBase):
                        classname = "DrpcServer",
                        command = "stop",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', "ambari-sudo.sh kill 123",
@@ -94,7 +94,7 @@ class TestStormDrpcServer(TestStormBase):
                        classname = "DrpcServer",
                        command = "configure",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
@@ -105,7 +105,7 @@ class TestStormDrpcServer(TestStormBase):
                        classname = "DrpcServer",
                        command = "start",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -134,7 +134,7 @@ class TestStormDrpcServer(TestStormBase):
                        classname = "DrpcServer",
                        command = "stop",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', "ambari-sudo.sh kill 123",
@@ -154,7 +154,7 @@ class TestStormDrpcServer(TestStormBase):
                      classname = "DrpcServer",
                      command = "pre_upgrade_restart",
                      config_file="default.json",
-                     hdp_stack_version = self.STACK_VERSION,
+                     stack_version = self.STACK_VERSION,
                      target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'storm-client', '2.2.1.0-2067'), sudo=True)
@@ -171,7 +171,7 @@ class TestStormDrpcServer(TestStormBase):
                      classname = "DrpcServer",
                      command = "pre_upgrade_restart",
                      config_dict = json_content,
-                     hdp_stack_version = self.STACK_VERSION,
+                     stack_version = self.STACK_VERSION,
                      target = RMFTestCase.TARGET_COMMON_SERVICES,
                      call_mocks = [(0, None, ''), (0, None)],
                      mocks_dict = mocks_dict)
@@ -217,7 +217,7 @@ class TestStormDrpcServer(TestStormBase):
                        classname = "DrpcServer",
                        command = "security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -240,7 +240,7 @@ class TestStormDrpcServer(TestStormBase):
                         classname = "DrpcServer",
                         command = "security_status",
                         config_file="secured.json",
-                        hdp_stack_version = self.STACK_VERSION,
+                        stack_version = self.STACK_VERSION,
                         target = RMFTestCase.TARGET_COMMON_SERVICES
       )
     except:
@@ -257,7 +257,7 @@ class TestStormDrpcServer(TestStormBase):
                        classname = "DrpcServer",
                        command = "security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
@@ -276,7 +276,7 @@ class TestStormDrpcServer(TestStormBase):
                        classname = "DrpcServer",
                        command = "security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
@@ -286,7 +286,7 @@ class TestStormDrpcServer(TestStormBase):
                        classname = "DrpcServer",
                        command = "security_status",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_jaas_configuration.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_jaas_configuration.py b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_jaas_configuration.py
index 74191c1..ddb111b 100644
--- a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_jaas_configuration.py
+++ b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_jaas_configuration.py
@@ -30,7 +30,7 @@ class TestStormJaasConfiguration(TestStormBase):
                        classname = "Nimbus",
                        command = "configure",
                        config_file = "default-storm-start.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -40,7 +40,7 @@ class TestStormJaasConfiguration(TestStormBase):
                        classname = "Nimbus",
                        command = "start",
                        config_file = "default-storm-start.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -53,7 +53,7 @@ class TestStormJaasConfiguration(TestStormBase):
                        classname = "Nimbus",
                        command = "configure",
                        config_file = "secured-storm-start.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
@@ -64,7 +64,7 @@ class TestStormJaasConfiguration(TestStormBase):
                        classname = "Nimbus",
                        command = "start",
                        config_file = "secured-storm-start.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py
index 8a49505..0f95a0c 100644
--- a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py
+++ b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus.py
@@ -32,7 +32,7 @@ class TestStormNimbus(TestStormBase):
                        classname = "Nimbus",
                        command = "configure",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -43,7 +43,7 @@ class TestStormNimbus(TestStormBase):
                        classname = "Nimbus",
                        command = "start",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -74,7 +74,7 @@ class TestStormNimbus(TestStormBase):
                        classname = "Nimbus",
                        command = "start",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
                        )
     self.assert_configure_default()
@@ -117,7 +117,7 @@ class TestStormNimbus(TestStormBase):
                        classname = "Nimbus",
                        command = "stop",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', "ambari-sudo.sh kill 123",
@@ -137,7 +137,7 @@ class TestStormNimbus(TestStormBase):
                        classname = "Nimbus",
                        command = "configure",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
@@ -148,7 +148,7 @@ class TestStormNimbus(TestStormBase):
                        classname = "Nimbus",
                        command = "start",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -177,7 +177,7 @@ class TestStormNimbus(TestStormBase):
                        classname = "Nimbus",
                        command = "stop",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', "ambari-sudo.sh kill 123",
@@ -197,7 +197,7 @@ class TestStormNimbus(TestStormBase):
                        classname = "Nimbus",
                        command = "pre_upgrade_restart",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'storm-client', '2.2.1.0-2067'), sudo=True)
@@ -215,7 +215,7 @@ class TestStormNimbus(TestStormBase):
                      classname = "Nimbus",
                      command = "pre_upgrade_restart",
                      config_dict = json_content,
-                     hdp_stack_version = self.STACK_VERSION,
+                     stack_version = self.STACK_VERSION,
                      target = RMFTestCase.TARGET_COMMON_SERVICES,
                      call_mocks = [(0, None, ''), (0, None)],
                      mocks_dict = mocks_dict)
@@ -262,7 +262,7 @@ class TestStormNimbus(TestStormBase):
                        classname = "Nimbus",
                        command = "security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -285,7 +285,7 @@ class TestStormNimbus(TestStormBase):
                        classname = "Nimbus",
                        command = "security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
       )
     except:
@@ -302,7 +302,7 @@ class TestStormNimbus(TestStormBase):
                        classname = "Nimbus",
                        command = "security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
@@ -320,7 +320,7 @@ class TestStormNimbus(TestStormBase):
                        classname = "Nimbus",
                        command = "security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
@@ -330,7 +330,7 @@ class TestStormNimbus(TestStormBase):
                        classname = "Nimbus",
                        command = "security_status",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus_prod.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus_prod.py b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus_prod.py
index 6ed2612..850a98b 100644
--- a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus_prod.py
+++ b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus_prod.py
@@ -31,7 +31,7 @@ class TestStormNimbus(TestStormBase):
                        classname = "Nimbus",
                        command = "configure",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -42,7 +42,7 @@ class TestStormNimbus(TestStormBase):
                        classname = "Nimbus",
                        command = "start",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -61,7 +61,7 @@ class TestStormNimbus(TestStormBase):
                        classname = "Nimbus",
                        command = "stop",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', 'supervisorctl stop storm-nimbus',
@@ -74,7 +74,7 @@ class TestStormNimbus(TestStormBase):
                        classname = "Nimbus",
                        command = "configure",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
@@ -85,7 +85,7 @@ class TestStormNimbus(TestStormBase):
                        classname = "Nimbus",
                        command = "start",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -104,7 +104,7 @@ class TestStormNimbus(TestStormBase):
                        classname = "Nimbus",
                        command = "stop",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', 'supervisorctl stop storm-nimbus',
@@ -117,7 +117,7 @@ class TestStormNimbus(TestStormBase):
                        classname = "Nimbus",
                        command = "pre_upgrade_restart",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'storm-client', '2.2.1.0-2067'), sudo=True)
@@ -136,7 +136,7 @@ class TestStormNimbus(TestStormBase):
                      classname = "Nimbus",
                      command = "pre_upgrade_restart",
                      config_dict = json_content,
-                     hdp_stack_version = self.STACK_VERSION,
+                     stack_version = self.STACK_VERSION,
                      target = RMFTestCase.TARGET_COMMON_SERVICES,
                      call_mocks = [(0, None, ''), (0, None, '')],
                      mocks_dict = mocks_dict)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_rest_api_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_rest_api_service.py b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_rest_api_service.py
index d6c2d3c..0fe57cf 100644
--- a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_rest_api_service.py
+++ b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_rest_api_service.py
@@ -32,7 +32,7 @@ class TestStormRestApi(TestStormBase):
                        classname = "StormRestApi",
                        command = "configure",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -43,7 +43,7 @@ class TestStormRestApi(TestStormBase):
                        classname = "StormRestApi",
                        command = "start",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -73,7 +73,7 @@ class TestStormRestApi(TestStormBase):
                        classname = "StormRestApi",
                        command = "stop",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', "ambari-sudo.sh kill 123",
@@ -93,7 +93,7 @@ class TestStormRestApi(TestStormBase):
                        classname = "StormRestApi",
                        command = "configure",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
@@ -104,7 +104,7 @@ class TestStormRestApi(TestStormBase):
                        classname = "StormRestApi",
                        command = "start",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -133,7 +133,7 @@ class TestStormRestApi(TestStormBase):
                        classname = "StormRestApi",
                        command = "stop",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', "ambari-sudo.sh kill 123",

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor.py b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor.py
index 82f08a3..c70e06c 100644
--- a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor.py
+++ b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor.py
@@ -32,7 +32,7 @@ class TestStormSupervisor(TestStormBase):
                        classname = "Supervisor",
                        command = "configure",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -43,7 +43,7 @@ class TestStormSupervisor(TestStormBase):
                        classname = "Supervisor",
                        command = "start",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -85,7 +85,7 @@ class TestStormSupervisor(TestStormBase):
                        classname = "Supervisor",
                        command = "stop",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', "ambari-sudo.sh kill 123",
@@ -115,7 +115,7 @@ class TestStormSupervisor(TestStormBase):
                        classname = "Supervisor",
                        command = "configure",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
@@ -126,7 +126,7 @@ class TestStormSupervisor(TestStormBase):
                        classname = "Supervisor",
                        command = "start",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -168,7 +168,7 @@ class TestStormSupervisor(TestStormBase):
                        classname = "Supervisor",
                        command = "stop",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', "ambari-sudo.sh kill 123",
@@ -198,7 +198,7 @@ class TestStormSupervisor(TestStormBase):
                        classname = "Supervisor",
                        command = "pre_upgrade_restart",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'storm-client', '2.2.1.0-2067'), sudo=True)
@@ -216,7 +216,7 @@ class TestStormSupervisor(TestStormBase):
                      classname = "Supervisor",
                      command = "pre_upgrade_restart",
                      config_dict = json_content,
-                     hdp_stack_version = self.STACK_VERSION,
+                     stack_version = self.STACK_VERSION,
                      target = RMFTestCase.TARGET_COMMON_SERVICES,
                      call_mocks = [(0, None, ''), (0, None)],
                      mocks_dict = mocks_dict)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor_prod.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor_prod.py b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor_prod.py
index df77506..f4f6bae 100644
--- a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor_prod.py
+++ b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_supervisor_prod.py
@@ -32,7 +32,7 @@ class TestStormSupervisor(TestStormBase):
                        classname = "Supervisor",
                        command = "configure",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -43,7 +43,7 @@ class TestStormSupervisor(TestStormBase):
                        classname = "Supervisor",
                        command = "start",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -75,7 +75,7 @@ class TestStormSupervisor(TestStormBase):
                        classname = "Supervisor",
                        command = "stop",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', 'supervisorctl stop storm-supervisor',
@@ -98,7 +98,7 @@ class TestStormSupervisor(TestStormBase):
                        classname = "Supervisor",
                        command = "configure",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
@@ -109,7 +109,7 @@ class TestStormSupervisor(TestStormBase):
                        classname = "Supervisor",
                        command = "start",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -141,7 +141,7 @@ class TestStormSupervisor(TestStormBase):
                        classname = "Supervisor",
                        command = "stop",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -167,7 +167,7 @@ class TestStormSupervisor(TestStormBase):
                        classname = "Supervisor",
                        command = "pre_upgrade_restart",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'storm-client', '2.2.1.0-2067'), sudo=True)
@@ -186,7 +186,7 @@ class TestStormSupervisor(TestStormBase):
                      classname = "Supervisor",
                      command = "pre_upgrade_restart",
                      config_dict = json_content,
-                     hdp_stack_version = self.STACK_VERSION,
+                     stack_version = self.STACK_VERSION,
                      target = RMFTestCase.TARGET_COMMON_SERVICES,
                      call_mocks = [(0, None, ''), (0, None)],
                      mocks_dict = mocks_dict)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_ui_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_ui_server.py b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_ui_server.py
index 0d45687..a61f5ed 100644
--- a/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_ui_server.py
+++ b/ambari-server/src/test/python/stacks/2.1/STORM/test_storm_ui_server.py
@@ -32,7 +32,7 @@ class TestStormUiServer(TestStormBase):
                        classname = "UiServer",
                        command = "configure",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -43,7 +43,7 @@ class TestStormUiServer(TestStormBase):
                        classname = "UiServer",
                        command = "start",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -86,7 +86,7 @@ class TestStormUiServer(TestStormBase):
                        classname = "UiServer",
                        command = "stop",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', "ambari-sudo.sh kill 123",
@@ -106,7 +106,7 @@ class TestStormUiServer(TestStormBase):
                        classname = "UiServer",
                        command = "configure",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
@@ -117,7 +117,7 @@ class TestStormUiServer(TestStormBase):
                        classname = "UiServer",
                        command = "start",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -161,7 +161,7 @@ class TestStormUiServer(TestStormBase):
                        classname = "UiServer",
                        command = "stop",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', "ambari-sudo.sh kill 123",
@@ -181,7 +181,7 @@ class TestStormUiServer(TestStormBase):
                        classname = "UiServer",
                        command = "pre_upgrade_restart",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'storm-client', '2.2.1.0-2067'), sudo=True)
@@ -198,7 +198,7 @@ class TestStormUiServer(TestStormBase):
                      classname = "UiServer",
                      command = "pre_upgrade_restart",
                      config_dict = json_content,
-                     hdp_stack_version = self.STACK_VERSION,
+                     stack_version = self.STACK_VERSION,
                      target = RMFTestCase.TARGET_COMMON_SERVICES,
                      call_mocks = [(0, None, ''), (0, None)],
                      mocks_dict = mocks_dict)
@@ -239,7 +239,7 @@ class TestStormUiServer(TestStormBase):
                        classname = "UiServer",
                        command = "security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -263,7 +263,7 @@ class TestStormUiServer(TestStormBase):
                         classname = "UiServer",
                         command = "security_status",
                         config_file="secured.json",
-                        hdp_stack_version = self.STACK_VERSION,
+                        stack_version = self.STACK_VERSION,
                         target = RMFTestCase.TARGET_COMMON_SERVICES
       )
     except:
@@ -280,7 +280,7 @@ class TestStormUiServer(TestStormBase):
                        classname = "UiServer",
                        command = "security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
@@ -290,7 +290,7 @@ class TestStormUiServer(TestStormBase):
                        classname = "UiServer",
                        command = "security_status",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.1/TEZ/test_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/TEZ/test_service_check.py b/ambari-server/src/test/python/stacks/2.1/TEZ/test_service_check.py
index 399d1e2..1d127d2 100644
--- a/ambari-server/src/test/python/stacks/2.1/TEZ/test_service_check.py
+++ b/ambari-server/src/test/python/stacks/2.1/TEZ/test_service_check.py
@@ -30,7 +30,7 @@ class TestTezServiceCheck(RMFTestCase):
                        classname="TezServiceCheck",
                        command="service_check",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('File', '/tmp/sample-tez-test',
@@ -107,7 +107,7 @@ class TestTezServiceCheck(RMFTestCase):
                        classname="TezServiceCheck",
                        command="service_check",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.1/TEZ/test_tez_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/TEZ/test_tez_client.py b/ambari-server/src/test/python/stacks/2.1/TEZ/test_tez_client.py
index 6243913..ab08776 100644
--- a/ambari-server/src/test/python/stacks/2.1/TEZ/test_tez_client.py
+++ b/ambari-server/src/test/python/stacks/2.1/TEZ/test_tez_client.py
@@ -32,7 +32,7 @@ class TestTezClient(RMFTestCase):
                        classname = "TezClient",
                        command = "configure",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -63,30 +63,30 @@ class TestTezClient(RMFTestCase):
 
     self.assertNoMoreResources()
 
-  @patch("resource_management.libraries.functions.get_hdp_version")
-  def test_upgrade(self, get_hdp_version_mock):
+  @patch("resource_management.libraries.functions.get_stack_version")
+  def test_upgrade(self, get_stack_version_mock):
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/tez_client.py",
                        classname = "TezClient",
                        command = "restart",
                        config_file="client-upgrade.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
-    get_hdp_version_mock.return_value = "2.2.1.0-2067"
+    get_stack_version_mock.return_value = "2.2.1.0-2067"
     self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-client', '2.2.1.0-2067'), sudo=True)
 
     # for now, it's enough that hdp-select is confirmed
 
-  @patch("resource_management.libraries.functions.get_hdp_version")
-  def test_upgrade_23(self, get_hdp_version_mock):
+  @patch("resource_management.libraries.functions.get_stack_version")
+  def test_upgrade_23(self, get_stack_version_mock):
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/tez_client.py",
                        classname = "TezClient",
                        command = "restart",
                        config_file="client-upgrade.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
-    get_hdp_version_mock.return_value = "2.2.1.0-2067"
+    get_stack_version_mock.return_value = "2.2.1.0-2067"
     self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-client', '2.2.1.0-2067'), sudo=True)
 
     # for now, it's enough that hdp-select is confirmed
@@ -103,7 +103,7 @@ class TestTezClient(RMFTestCase):
                        classname = "TezClient",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None, ''), (0, None, ''), (0, None, '')],
                        mocks_dict = mocks_dict)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py b/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
index 4af5f83..132924a 100644
--- a/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
+++ b/ambari-server/src/test/python/stacks/2.1/YARN/test_apptimelineserver.py
@@ -38,7 +38,7 @@ class TestAppTimelineServer(RMFTestCase):
                        classname="ApplicationTimelineServer",
                        command="configure",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -49,7 +49,7 @@ class TestAppTimelineServer(RMFTestCase):
                        classname="ApplicationTimelineServer",
                        command="start",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -83,7 +83,7 @@ class TestAppTimelineServer(RMFTestCase):
                        classname="ApplicationTimelineServer",
                        command="stop",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -275,7 +275,7 @@ class TestAppTimelineServer(RMFTestCase):
                        classname="ApplicationTimelineServer",
                        command="status",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -320,7 +320,7 @@ class TestAppTimelineServer(RMFTestCase):
                        classname="ApplicationTimelineServer",
                        command="security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -343,7 +343,7 @@ class TestAppTimelineServer(RMFTestCase):
                          classname="ApplicationTimelineServer",
                          command="security_status",
                          config_file="secured.json",
-                         hdp_stack_version = self.STACK_VERSION,
+                         stack_version = self.STACK_VERSION,
                          target = RMFTestCase.TARGET_COMMON_SERVICES
       )
     except:
@@ -360,7 +360,7 @@ class TestAppTimelineServer(RMFTestCase):
                        classname="ApplicationTimelineServer",
                        command="security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
@@ -379,7 +379,7 @@ class TestAppTimelineServer(RMFTestCase):
                        classname="ApplicationTimelineServer",
                        command="security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
@@ -389,12 +389,12 @@ class TestAppTimelineServer(RMFTestCase):
                        classname="ApplicationTimelineServer",
                        command="security_status",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
 
-  @patch.object(resource_management.libraries.functions, "get_hdp_version", new = MagicMock(return_value='2.3.0.0-1234'))
+  @patch.object(resource_management.libraries.functions, "get_stack_version", new = MagicMock(return_value='2.3.0.0-1234'))
   def test_pre_upgrade_restart_23(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
     with open(config_file, "r") as f:
@@ -407,7 +407,7 @@ class TestAppTimelineServer(RMFTestCase):
                        classname = "ApplicationTimelineServer",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None, '')],
                        mocks_dict = mocks_dict)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.2/ACCUMULO/test_accumulo_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/ACCUMULO/test_accumulo_client.py b/ambari-server/src/test/python/stacks/2.2/ACCUMULO/test_accumulo_client.py
index 4f75f62..e3430e5 100644
--- a/ambari-server/src/test/python/stacks/2.2/ACCUMULO/test_accumulo_client.py
+++ b/ambari-server/src/test/python/stacks/2.2/ACCUMULO/test_accumulo_client.py
@@ -41,7 +41,7 @@ class TestAccumuloClient(RMFTestCase):
       classname = "AccumuloClient",
       command = "pre_upgrade_restart",
       config_dict = json_content,
-      hdp_stack_version = self.STACK_VERSION,
+      stack_version = self.STACK_VERSION,
       target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'accumulo-client', version), sudo=True,)
@@ -62,7 +62,7 @@ class TestAccumuloClient(RMFTestCase):
       classname = "AccumuloClient",
       command = "pre_upgrade_restart",
       config_dict = json_content,
-      hdp_stack_version = self.STACK_VERSION,
+      stack_version = self.STACK_VERSION,
       target = RMFTestCase.TARGET_COMMON_SERVICES,
       call_mocks = [(0, None, ''), (0, None)],
       mocks_dict = mocks_dict)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.2/KAFKA/test_kafka_broker.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/KAFKA/test_kafka_broker.py b/ambari-server/src/test/python/stacks/2.2/KAFKA/test_kafka_broker.py
index f1e0466..117a4bc 100644
--- a/ambari-server/src/test/python/stacks/2.2/KAFKA/test_kafka_broker.py
+++ b/ambari-server/src/test/python/stacks/2.2/KAFKA/test_kafka_broker.py
@@ -33,7 +33,7 @@ class TestKafkaBroker(RMFTestCase):
                          classname = "KafkaBroker",
                          command = "configure",
                          config_file="default.json",
-                         hdp_stack_version = self.STACK_VERSION,
+                         stack_version = self.STACK_VERSION,
                          target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Directory', '/var/log/kafka',
@@ -80,7 +80,7 @@ class TestKafkaBroker(RMFTestCase):
                        classname = "KafkaBroker",
                        command = "configure",
                        config_file="default_custom_path_config.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -133,7 +133,7 @@ class TestKafkaBroker(RMFTestCase):
                        classname = "KafkaBroker",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
                               ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'kafka-broker', version), sudo=True,)
@@ -152,7 +152,7 @@ class TestKafkaBroker(RMFTestCase):
                        classname = "KafkaBroker",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None)],
                        mocks_dict = mocks_dict)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.2/KERBEROS/test_kerberos_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/KERBEROS/test_kerberos_client.py b/ambari-server/src/test/python/stacks/2.2/KERBEROS/test_kerberos_client.py
index 0592fc4..b3b7940 100644
--- a/ambari-server/src/test/python/stacks/2.2/KERBEROS/test_kerberos_client.py
+++ b/ambari-server/src/test/python/stacks/2.2/KERBEROS/test_kerberos_client.py
@@ -38,7 +38,7 @@ class TestKerberosClient(RMFTestCase):
                        classname="KerberosClient",
                        command="configure",
                        config_dict=json_data,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -64,7 +64,7 @@ class TestKerberosClient(RMFTestCase):
                        classname="KerberosClient",
                        command="configure",
                        config_dict=json_data,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -91,7 +91,7 @@ class TestKerberosClient(RMFTestCase):
                        classname="KerberosClient",
                        command="configure",
                        config_dict=json_data,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -110,7 +110,7 @@ class TestKerberosClient(RMFTestCase):
                        classname="KerberosClient",
                        command="configure",
                        config_dict=json_data,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -136,7 +136,7 @@ class TestKerberosClient(RMFTestCase):
                        classname="KerberosClient",
                        command="configure",
                        config_dict=json_data,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -270,7 +270,7 @@ class TestKerberosClient(RMFTestCase):
                        classname="KerberosClient",
                        command="set_keytab",
                        config_dict=json_data,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -347,7 +347,7 @@ class TestKerberosClient(RMFTestCase):
                        classname="KerberosClient",
                        command="remove_keytab",
                        config_dict=json_data,
-                       hdp_stack_version=self.STACK_VERSION,
+                       stack_version=self.STACK_VERSION,
                        target=RMFTestCase.TARGET_COMMON_SERVICES
     )
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.2/KERBEROS/test_kerberos_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/KERBEROS/test_kerberos_server.py b/ambari-server/src/test/python/stacks/2.2/KERBEROS/test_kerberos_server.py
index 4de232e..a11d596 100644
--- a/ambari-server/src/test/python/stacks/2.2/KERBEROS/test_kerberos_server.py
+++ b/ambari-server/src/test/python/stacks/2.2/KERBEROS/test_kerberos_server.py
@@ -36,7 +36,7 @@ class TestKerberosServer(RMFTestCase):
                        classname="KerberosServer",
                        command="configure",
                        config_dict=json_data,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -95,7 +95,7 @@ class TestKerberosServer(RMFTestCase):
                        classname="KerberosServer",
                        command="configure",
                        config_dict=json_data,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -154,7 +154,7 @@ class TestKerberosServer(RMFTestCase):
                        classname="KerberosServer",
                        command="configure",
                        config_dict=json_data,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -213,7 +213,7 @@ class TestKerberosServer(RMFTestCase):
                        classname="KerberosServer",
                        command="configure",
                        config_dict=json_data,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.2/KNOX/test_knox_gateway.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/KNOX/test_knox_gateway.py b/ambari-server/src/test/python/stacks/2.2/KNOX/test_knox_gateway.py
index 7c3b921..fbc55ca 100644
--- a/ambari-server/src/test/python/stacks/2.2/KNOX/test_knox_gateway.py
+++ b/ambari-server/src/test/python/stacks/2.2/KNOX/test_knox_gateway.py
@@ -35,7 +35,7 @@ class TestKnoxGateway(RMFTestCase):
                        classname = "KnoxGateway",
                        command = "configure",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -158,7 +158,7 @@ class TestKnoxGateway(RMFTestCase):
                        classname = "KnoxGateway",
                        command="security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -182,7 +182,7 @@ class TestKnoxGateway(RMFTestCase):
                          classname = "KnoxGateway",
                          command="security_status",
                          config_file="secured.json",
-                         hdp_stack_version = self.STACK_VERSION,
+                         stack_version = self.STACK_VERSION,
                          target = RMFTestCase.TARGET_COMMON_SERVICES
       )
     except:
@@ -199,7 +199,7 @@ class TestKnoxGateway(RMFTestCase):
                        classname = "KnoxGateway",
                        command="security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file and principal are not set."})
@@ -215,7 +215,7 @@ class TestKnoxGateway(RMFTestCase):
                        classname = "KnoxGateway",
                        command="security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
@@ -225,7 +225,7 @@ class TestKnoxGateway(RMFTestCase):
                        classname = "KnoxGateway",
                        command="security_status",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
@@ -243,7 +243,7 @@ class TestKnoxGateway(RMFTestCase):
                        classname = "KnoxGateway",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assertResourceCalled('Execute', ('tar',
@@ -285,7 +285,7 @@ class TestKnoxGateway(RMFTestCase):
                        classname = "KnoxGateway",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None)],
                        mocks_dict = mocks_dict)
@@ -355,7 +355,7 @@ class TestKnoxGateway(RMFTestCase):
                        classname = "KnoxGateway",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None)],
                        mocks_dict = mocks_dict)
@@ -425,7 +425,7 @@ class TestKnoxGateway(RMFTestCase):
                        classname = "KnoxGateway",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None)],
                        mocks_dict = mocks_dict)
@@ -482,7 +482,7 @@ class TestKnoxGateway(RMFTestCase):
                        classname = "KnoxGateway",
                        command = "start",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.2/PIG/test_pig_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/PIG/test_pig_service_check.py b/ambari-server/src/test/python/stacks/2.2/PIG/test_pig_service_check.py
index bfc8f31..20224ab 100644
--- a/ambari-server/src/test/python/stacks/2.2/PIG/test_pig_service_check.py
+++ b/ambari-server/src/test/python/stacks/2.2/PIG/test_pig_service_check.py
@@ -33,7 +33,7 @@ class TestPigServiceCheck(RMFTestCase):
                        classname="PigServiceCheck",
                        command="service_check",
                        config_file="pig-service-check-secure.json",
-                       hdp_stack_version=self.STACK_VERSION,
+                       stack_version=self.STACK_VERSION,
                        target=RMFTestCase.TARGET_COMMON_SERVICES
     )
     

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_admin.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_admin.py b/ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_admin.py
index 4c7792d..a5cc123 100644
--- a/ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_admin.py
+++ b/ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_admin.py
@@ -33,7 +33,7 @@ class TestRangerAdmin(RMFTestCase):
                    classname = "RangerAdmin",
                    command = "configure",
                    config_file="ranger-admin-default.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -44,7 +44,7 @@ class TestRangerAdmin(RMFTestCase):
                    classname = "RangerAdmin",
                    command = "start",
                    config_file="ranger-admin-default.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -60,7 +60,7 @@ class TestRangerAdmin(RMFTestCase):
                    classname = "RangerAdmin",
                    command = "stop",
                    config_file="ranger-admin-default.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', '/usr/bin/ranger-admin-stop',
@@ -74,7 +74,7 @@ class TestRangerAdmin(RMFTestCase):
                    classname = "RangerAdmin",
                    command = "configure",
                    config_file="ranger-admin-secured.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
@@ -85,7 +85,7 @@ class TestRangerAdmin(RMFTestCase):
                    classname = "RangerAdmin",
                    command = "start",
                    config_file="ranger-admin-secured.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
@@ -101,7 +101,7 @@ class TestRangerAdmin(RMFTestCase):
                    classname = "RangerAdmin",
                    command = "stop",
                    config_file="ranger-admin-secured.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', '/usr/bin/ranger-admin-stop',
@@ -206,7 +206,7 @@ class TestRangerAdmin(RMFTestCase):
                        classname = "RangerAdmin",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None)],
                        mocks_dict = mocks_dict)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_usersync.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_usersync.py b/ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_usersync.py
index 2828983..67bbb9d 100644
--- a/ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_usersync.py
+++ b/ambari-server/src/test/python/stacks/2.2/RANGER/test_ranger_usersync.py
@@ -34,7 +34,7 @@ class TestRangerUsersync(RMFTestCase):
                    classname = "RangerUsersync",
                    command = "configure",
                    config_file="ranger-admin-default.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -45,7 +45,7 @@ class TestRangerUsersync(RMFTestCase):
                    classname = "RangerUsersync",
                    command = "start",
                    config_file="ranger-admin-default.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -61,7 +61,7 @@ class TestRangerUsersync(RMFTestCase):
                    classname = "RangerUsersync",
                    command = "stop",
                    config_file="ranger-admin-default.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', ('/usr/bin/ranger-usersync-stop',),
@@ -75,7 +75,7 @@ class TestRangerUsersync(RMFTestCase):
                    classname = "RangerUsersync",
                    command = "configure",
                    config_file="ranger-admin-secured.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
@@ -86,7 +86,7 @@ class TestRangerUsersync(RMFTestCase):
                    classname = "RangerUsersync",
                    command = "start",
                    config_file="ranger-admin-secured.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
@@ -102,7 +102,7 @@ class TestRangerUsersync(RMFTestCase):
                    classname = "RangerUsersync",
                    command = "stop",
                    config_file="ranger-admin-secured.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', ('/usr/bin/ranger-usersync-stop',),
@@ -117,7 +117,7 @@ class TestRangerUsersync(RMFTestCase):
                        classname = "RangerUsersync",
                        command = "restart",
                        config_file="ranger-usersync-upgrade.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assertTrue(setup_usersync_mock.called)
@@ -139,7 +139,7 @@ class TestRangerUsersync(RMFTestCase):
                        classname = "RangerUsersync",
                        command = "restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None)],
                        mocks_dict = mocks_dict)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.2/SLIDER/test_slider_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/SLIDER/test_slider_client.py b/ambari-server/src/test/python/stacks/2.2/SLIDER/test_slider_client.py
index 2cf6edb..85fc86b 100644
--- a/ambari-server/src/test/python/stacks/2.2/SLIDER/test_slider_client.py
+++ b/ambari-server/src/test/python/stacks/2.2/SLIDER/test_slider_client.py
@@ -31,7 +31,7 @@ class TestSliderClient(RMFTestCase):
                        classname="SliderClient",
                        command="configure",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -81,7 +81,7 @@ class TestSliderClient(RMFTestCase):
                        classname="SliderServiceCheck",
                        command="service_check",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -99,7 +99,7 @@ class TestSliderClient(RMFTestCase):
                        classname="SliderServiceCheck",
                        command="service_check",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -117,7 +117,7 @@ class TestSliderClient(RMFTestCase):
                        classname = "SliderClient",
                        command = "pre_upgrade_restart",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'slider-client', '2.2.1.0-2067'), sudo=True)
@@ -136,7 +136,7 @@ class TestSliderClient(RMFTestCase):
                        classname = "SliderClient",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None, ''), (0, None, ''), (0, None, '')],
                        mocks_dict = mocks_dict)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py b/ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py
index 73322a1..4c3a3d7 100644
--- a/ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py
+++ b/ambari-server/src/test/python/stacks/2.2/SPARK/test_job_history_server.py
@@ -24,7 +24,7 @@ from stacks.utils.RMFTestCase import *
 from only_for_platform import not_for_platform, PLATFORM_WINDOWS
 
 @not_for_platform(PLATFORM_WINDOWS)
-@patch("resource_management.libraries.functions.get_hdp_version", new=MagicMock(return_value="2.3.0.0-1597"))
+@patch("resource_management.libraries.functions.get_stack_version", new=MagicMock(return_value="2.3.0.0-1597"))
 class TestJobHistoryServer(RMFTestCase):
   COMMON_SERVICES_PACKAGE_DIR = "SPARK/1.2.0.2.2/package"
   STACK_VERSION = "2.2"
@@ -36,7 +36,7 @@ class TestJobHistoryServer(RMFTestCase):
                    classname = "JobHistoryServer",
                    command = "configure",
                    config_file="default.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -49,7 +49,7 @@ class TestJobHistoryServer(RMFTestCase):
                    classname = "JobHistoryServer",
                    command = "start",
                    config_file="default.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -78,7 +78,7 @@ class TestJobHistoryServer(RMFTestCase):
                    classname = "JobHistoryServer",
                    command = "stop",
                    config_file="default.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', '/usr/hdp/current/spark-client/sbin/stop-history-server.sh',
@@ -95,7 +95,7 @@ class TestJobHistoryServer(RMFTestCase):
                    classname = "JobHistoryServer",
                    command = "configure",
                    config_file="secured.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
@@ -108,7 +108,7 @@ class TestJobHistoryServer(RMFTestCase):
                    classname = "JobHistoryServer",
                    command = "start",
                    config_file="secured.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
@@ -143,7 +143,7 @@ class TestJobHistoryServer(RMFTestCase):
                    classname = "JobHistoryServer",
                    command = "stop",
                    config_file="secured.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', '/usr/hdp/current/spark-client/sbin/stop-history-server.sh',
@@ -312,7 +312,7 @@ class TestJobHistoryServer(RMFTestCase):
                        classname = "JobHistoryServer",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None)],
                        mocks_dict = mocks_dict)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.2/SPARK/test_spark_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/SPARK/test_spark_client.py b/ambari-server/src/test/python/stacks/2.2/SPARK/test_spark_client.py
index c645b91..68b0f26 100644
--- a/ambari-server/src/test/python/stacks/2.2/SPARK/test_spark_client.py
+++ b/ambari-server/src/test/python/stacks/2.2/SPARK/test_spark_client.py
@@ -24,7 +24,7 @@ from stacks.utils.RMFTestCase import *
 from only_for_platform import not_for_platform, PLATFORM_WINDOWS
 
 @not_for_platform(PLATFORM_WINDOWS)
-@patch("resource_management.libraries.functions.get_hdp_version", new=MagicMock(return_value="2.3.0.0-1597"))
+@patch("resource_management.libraries.functions.get_stack_version", new=MagicMock(return_value="2.3.0.0-1597"))
 class TestSparkClient(RMFTestCase):
   COMMON_SERVICES_PACKAGE_DIR = "SPARK/1.2.0.2.2/package"
   STACK_VERSION = "2.2"
@@ -34,7 +34,7 @@ class TestSparkClient(RMFTestCase):
                    classname = "SparkClient",
                    command = "configure",
                    config_file="default.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -45,7 +45,7 @@ class TestSparkClient(RMFTestCase):
                    classname = "SparkClient",
                    command = "configure",
                    config_file="secured.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
@@ -149,7 +149,7 @@ class TestSparkClient(RMFTestCase):
                        classname = "SparkClient",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None)],
                        mocks_dict = mocks_dict)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.2/SPARK/test_spark_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/SPARK/test_spark_service_check.py b/ambari-server/src/test/python/stacks/2.2/SPARK/test_spark_service_check.py
index 444bac7..0987f7c 100644
--- a/ambari-server/src/test/python/stacks/2.2/SPARK/test_spark_service_check.py
+++ b/ambari-server/src/test/python/stacks/2.2/SPARK/test_spark_service_check.py
@@ -23,7 +23,7 @@ from stacks.utils.RMFTestCase import *
 from only_for_platform import not_for_platform, PLATFORM_WINDOWS
 
 @not_for_platform(PLATFORM_WINDOWS)
-@patch("resource_management.libraries.functions.get_hdp_version", new=MagicMock(return_value="2.3.0.0-1597"))
+@patch("resource_management.libraries.functions.get_stack_version", new=MagicMock(return_value="2.3.0.0-1597"))
 class TestServiceCheck(RMFTestCase):
   COMMON_SERVICES_PACKAGE_DIR = "SPARK/1.2.0.2.2/package"
   STACK_VERSION = "2.2"
@@ -33,7 +33,7 @@ class TestServiceCheck(RMFTestCase):
                         classname="SparkServiceCheck",
                         command="service_check",
                         config_file="default.json",
-                        hdp_stack_version = self.STACK_VERSION,
+                        stack_version = self.STACK_VERSION,
                         target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', "curl -s -o /dev/null -w'%{http_code}' --negotiate -u: -k http://localhost:18080 | grep 200",
@@ -49,7 +49,7 @@ class TestServiceCheck(RMFTestCase):
                         classname="SparkServiceCheck",
                         command="service_check",
                         config_file="secured.json",
-                        hdp_stack_version = self.STACK_VERSION,
+                        stack_version = self.STACK_VERSION,
                         target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/spark.service.keytab spark/localhost@EXAMPLE.COM; ',

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.3/ATLAS/test_metadata_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/ATLAS/test_metadata_server.py b/ambari-server/src/test/python/stacks/2.3/ATLAS/test_metadata_server.py
index b4ef2bc..ab5236b 100644
--- a/ambari-server/src/test/python/stacks/2.3/ATLAS/test_metadata_server.py
+++ b/ambari-server/src/test/python/stacks/2.3/ATLAS/test_metadata_server.py
@@ -101,7 +101,7 @@ class TestMetadataServer(RMFTestCase):
                        classname = "MetadataServer",
                        command = "configure",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -113,7 +113,7 @@ class TestMetadataServer(RMFTestCase):
                        classname = "MetadataServer",
                        command = "configure",
                        config_file="secure.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
                        )
 
@@ -128,7 +128,7 @@ class TestMetadataServer(RMFTestCase):
                        classname = "MetadataServer",
                        command = "start",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.configureResourcesCalled()
@@ -142,7 +142,7 @@ class TestMetadataServer(RMFTestCase):
                        classname = "MetadataServer",
                        command = "stop",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', 'source /etc/atlas/conf/atlas-env.sh; /usr/hdp/current/atlas-server/bin/atlas_stop.py',

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.3/HAWQ/test_hawqmaster.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/HAWQ/test_hawqmaster.py b/ambari-server/src/test/python/stacks/2.3/HAWQ/test_hawqmaster.py
index 56fc5e4..18205f7 100644
--- a/ambari-server/src/test/python/stacks/2.3/HAWQ/test_hawqmaster.py
+++ b/ambari-server/src/test/python/stacks/2.3/HAWQ/test_hawqmaster.py
@@ -118,7 +118,7 @@ class TestHawqMaster(RMFTestCase):
         classname = 'HawqMaster',
         command = 'configure',
         config_file ='hawq_default.json',
-        hdp_stack_version = self.STACK_VERSION,
+        stack_version = self.STACK_VERSION,
         target = RMFTestCase.TARGET_COMMON_SERVICES
         )
 
@@ -133,7 +133,7 @@ class TestHawqMaster(RMFTestCase):
         classname = 'HawqMaster',
         command = 'install',
         config_file ='hawq_default.json',
-        hdp_stack_version = self.STACK_VERSION,
+        stack_version = self.STACK_VERSION,
         target = RMFTestCase.TARGET_COMMON_SERVICES
         )
 
@@ -152,7 +152,7 @@ class TestHawqMaster(RMFTestCase):
         classname = 'HawqMaster',
         command = 'start',
         config_file ='hawq_default.json',
-        hdp_stack_version = self.STACK_VERSION,
+        stack_version = self.STACK_VERSION,
         target = RMFTestCase.TARGET_COMMON_SERVICES
         )
 
@@ -215,7 +215,7 @@ class TestHawqMaster(RMFTestCase):
         classname = 'HawqMaster',
         command = 'start',
         config_file ='hawq_default.json',
-        hdp_stack_version = self.STACK_VERSION,
+        stack_version = self.STACK_VERSION,
         target = RMFTestCase.TARGET_COMMON_SERVICES
         )
 
@@ -276,7 +276,7 @@ class TestHawqMaster(RMFTestCase):
         classname = 'HawqMaster',
         command = 'stop',
         config_file ='hawq_default.json',
-        hdp_stack_version = self.STACK_VERSION,
+        stack_version = self.STACK_VERSION,
         target = RMFTestCase.TARGET_COMMON_SERVICES
         )
 


[25/51] [abbrv] ambari git commit: AMBARI-15322: Update description / labels for HAWQ alerts. (bhuvnesh2703 via mithmatt)

Posted by jl...@apache.org.
AMBARI-15322: Update description / labels for HAWQ alerts. (bhuvnesh2703 via mithmatt)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/2763c309
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/2763c309
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/2763c309

Branch: refs/heads/AMBARI-13364
Commit: 2763c309d1d1304c192c596e96547528b8838faf
Parents: f7221e5
Author: Matt <mm...@pivotal.io>
Authored: Tue Mar 8 15:39:13 2016 -0800
Committer: Matt <mm...@pivotal.io>
Committed: Tue Mar 8 15:39:13 2016 -0800

----------------------------------------------------------------------
 .../resources/common-services/HAWQ/2.0.0/alerts.json  | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/2763c309/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/alerts.json b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/alerts.json
index 4488cb5..8da5beb 100644
--- a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/alerts.json
+++ b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/alerts.json
@@ -30,8 +30,8 @@
     "HAWQMASTER": [
       {
         "name": "hawqstandby_sync_status",
-        "label": "HAWQ Standby Sync Status",
-        "description": "This alert will trigger if HAWQ Standby is not synchronized with HAWQ Master. Use the service action 'Re-Synchronize HAWQ Standby' on HAWQ service page to synchronize HAWQ Standby with HAWQ Master.",
+        "label": "HAWQ Standby Master Sync Status",
+        "description": "This alert will trigger if HAWQ Standby Master is not synchronized with HAWQ Master. Use the service action 'Re-Sync HAWQ Standby Master' on HAWQ service page to synchronize HAWQ Standby Master with HAWQ Master.",
         "interval": 1,
         "scope": "ANY",
         "enabled": true,
@@ -56,7 +56,7 @@
               "display_name": "Component Name",
               "value": "master",
               "type": "STRING",
-              "description": "This text string indicates if it is a Master, Standby or Segment"
+              "description": "This text string indicates if it is a HAWQ Master, HAWQ Standby Master or HAWQ Segment"
             }
           ]
         }
@@ -78,7 +78,7 @@
               "display_name": "Component Name",
               "value": "segment",
               "type": "STRING",
-              "description": "This text string indicates if it is a Master, Standby or Segment"
+              "description": "This text string indicates if it is a HAWQ Master, HAWQ Standby Master or HAWQ Segment"
             }
           ]
         }
@@ -87,8 +87,8 @@
     "HAWQSTANDBY": [
       {
         "name": "hawq_standby_process",
-        "label": "HAWQ Standby Process",
-        "description": "This alert is triggered if the HAWQ Standby process cannot be confirmed to be up and listening on the network.",
+        "label": "HAWQ Standby Master Process",
+        "description": "This alert is triggered if the HAWQ Standby Master process cannot be confirmed to be up and listening on the network.",
         "interval": 1,
         "scope": "ANY",
         "source": {
@@ -100,7 +100,7 @@
               "display_name": "Component Name",
               "value": "standby",
               "type": "STRING",
-              "description": "This text string indicates if it is a Master, Standby or Segment"
+              "description": "This text string indicates if it is a HAWQ Master, HAWQ Standby Master or HAWQ Segment"
             }
           ]
         }


[27/51] [abbrv] ambari git commit: AMBARI-15342. AMS Grafana start failed with permission denied error on changing user. (swagle)

Posted by jl...@apache.org.
AMBARI-15342. AMS Grafana start failed with permission denied error on changing user. (swagle)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/249014b3
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/249014b3
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/249014b3

Branch: refs/heads/AMBARI-13364
Commit: 249014b373a0a2fc6757f8bebc5bfb1e7929c166
Parents: 2c2c201
Author: Siddharth Wagle <sw...@hortonworks.com>
Authored: Tue Mar 8 17:54:07 2016 -0800
Committer: Siddharth Wagle <sw...@hortonworks.com>
Committed: Tue Mar 8 17:54:07 2016 -0800

----------------------------------------------------------------------
 .../AMBARI_METRICS/0.1.0/package/scripts/ams.py         |  5 +++++
 .../stacks/2.0.6/AMBARI_METRICS/test_metrics_grafana.py | 12 ++++++++++++
 2 files changed, 17 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/249014b3/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
index c8c3b6d..cbdcf45 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/ams.py
@@ -420,6 +420,11 @@ def ams(name=None):
          content=InlineTemplate(params.ams_grafana_ini_template)
          )
 
+    for dir in ams_grafana_directories:
+      Execute(('chown', '-R', params.ams_user, dir),
+              sudo=True
+              )
+
     if params.metric_collector_https_enabled:
       export_ca_certs(params.ams_grafana_conf_dir)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/249014b3/ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_grafana.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_grafana.py b/ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_grafana.py
index eab50d4..d54acc7 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_grafana.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_grafana.py
@@ -49,6 +49,18 @@ class TestMetricsGrafana(RMFTestCase):
                        )
     self.maxDiff=None
     self.assert_configure()
+    self.assertResourceCalled('Execute', ('chown', u'-R', u'ams', '/etc/ambari-metrics-grafana/conf'),
+                              sudo = True
+                              )
+    self.assertResourceCalled('Execute', ('chown', u'-R', u'ams', '/var/log/ambari-metrics-grafana'),
+                              sudo = True
+                              )
+    self.assertResourceCalled('Execute', ('chown', u'-R', u'ams', '/var/lib/ambari-metrics-grafana'),
+                              sudo = True
+                              )
+    self.assertResourceCalled('Execute', ('chown', u'-R', u'ams', '/var/run/ambari-metrics-grafana'),
+                              sudo = True
+                              )
     self.assertResourceCalled('Execute', 'ambari-sudo.sh /usr/jdk64/jdk1.7.0_45/bin/keytool -importkeystore -srckeystore /etc/security/clientKeys/all.jks -destkeystore /some_tmp_dir/truststore.p12 -deststoretype PKCS12 -srcstorepass bigdata -deststorepass bigdata',
                               )
     self.assertResourceCalled('Execute', 'ambari-sudo.sh openssl pkcs12 -in /some_tmp_dir/truststore.p12 -out /etc/ambari-metrics-grafana/conf/ca.pem -cacerts -nokeys -passin pass:bigdata',


[30/51] [abbrv] ambari git commit: AMBARI-15295. HBase RS decommission issue in secure cluster.(vbrodetskyi)

Posted by jl...@apache.org.
AMBARI-15295. HBase RS decommission issue in secure cluster.(vbrodetskyi)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/112d3857
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/112d3857
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/112d3857

Branch: refs/heads/AMBARI-13364
Commit: 112d3857edd8c0d2719a995a6ca65bc97cc5fbfd
Parents: 5e69da9
Author: Vitaly Brodetskyi <vb...@hortonworks.com>
Authored: Wed Mar 9 13:30:34 2016 +0200
Committer: Vitaly Brodetskyi <vb...@hortonworks.com>
Committed: Wed Mar 9 13:30:34 2016 +0200

----------------------------------------------------------------------
 .../0.96.0.2.0/package/scripts/hbase_decommission.py  |  6 +++---
 .../HBASE/0.96.0.2.0/package/scripts/params_linux.py  |  2 ++
 .../python/stacks/2.0.6/HBASE/test_hbase_master.py    | 14 +++++++-------
 3 files changed, 12 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/112d3857/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_decommission.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_decommission.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_decommission.py
index ca7e1ef..022465a 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_decommission.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_decommission.py
@@ -63,7 +63,7 @@ def hbase_decommission(env):
     for host in hosts:
       if host:
         regiondrainer_cmd = format(
-          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_drainer} remove {host}")
+          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} {master_security_config} org.jruby.Main {region_drainer} remove {host}")
         Execute(regiondrainer_cmd,
                 user=params.hbase_user,
                 logoutput=True
@@ -75,9 +75,9 @@ def hbase_decommission(env):
     for host in hosts:
       if host:
         regiondrainer_cmd = format(
-          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_drainer} add {host}")
+          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} {master_security_config} org.jruby.Main {region_drainer} add {host}")
         regionmover_cmd = format(
-          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} org.jruby.Main {region_mover} unload {host}")
+          "{kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} {master_security_config} org.jruby.Main {region_mover} unload {host}")
 
         Execute(regiondrainer_cmd,
                 user=params.hbase_user,

http://git-wip-us.apache.org/repos/asf/ambari/blob/112d3857/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
index 01503fe..63e548a 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
@@ -194,9 +194,11 @@ kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executab
 if security_enabled:
   kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} {hbase_principal_name};")
   kinit_cmd_master = format("{kinit_path_local} -kt {master_keytab_path} {master_jaas_princ};")
+  master_security_config = format("-Djava.security.auth.login.config={hbase_conf_dir}/hbase_master_jaas.conf")
 else:
   kinit_cmd = ""
   kinit_cmd_master = ""
+  master_security_config = ""
 
 #log4j.properties
 if (('hbase-log4j' in config['configurations']) and ('content' in config['configurations']['hbase-log4j'])):

http://git-wip-us.apache.org/repos/asf/ambari/blob/112d3857/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
index 3045efa..389ae74 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
@@ -130,19 +130,19 @@ class TestHBaseMaster(RMFTestCase):
                               content = StaticFile('draining_servers.rb'),
                               mode = 0755,
                               )
-    self.assertResourceCalled('Execute', ' /usr/lib/hbase/bin/hbase --config /etc/hbase/conf org.jruby.Main /usr/lib/hbase/bin/draining_servers.rb add host1',
+    self.assertResourceCalled('Execute', ' /usr/lib/hbase/bin/hbase --config /etc/hbase/conf  org.jruby.Main /usr/lib/hbase/bin/draining_servers.rb add host1',
                               logoutput = True,
                               user = 'hbase',
                               )
-    self.assertResourceCalled('Execute', ' /usr/lib/hbase/bin/hbase --config /etc/hbase/conf org.jruby.Main /usr/lib/hbase/bin/region_mover.rb unload host1',
+    self.assertResourceCalled('Execute', ' /usr/lib/hbase/bin/hbase --config /etc/hbase/conf  org.jruby.Main /usr/lib/hbase/bin/region_mover.rb unload host1',
                               logoutput = True,
                               user = 'hbase',
                               )
-    self.assertResourceCalled('Execute', ' /usr/lib/hbase/bin/hbase --config /etc/hbase/conf org.jruby.Main /usr/lib/hbase/bin/draining_servers.rb add host2',
+    self.assertResourceCalled('Execute', ' /usr/lib/hbase/bin/hbase --config /etc/hbase/conf  org.jruby.Main /usr/lib/hbase/bin/draining_servers.rb add host2',
                               logoutput = True,
                               user = 'hbase',
                               )
-    self.assertResourceCalled('Execute', ' /usr/lib/hbase/bin/hbase --config /etc/hbase/conf org.jruby.Main /usr/lib/hbase/bin/region_mover.rb unload host2',
+    self.assertResourceCalled('Execute', ' /usr/lib/hbase/bin/hbase --config /etc/hbase/conf  org.jruby.Main /usr/lib/hbase/bin/region_mover.rb unload host2',
                               logoutput = True,
                               user = 'hbase',
                               )
@@ -161,7 +161,7 @@ class TestHBaseMaster(RMFTestCase):
                               content = StaticFile('draining_servers.rb'),
                               mode = 0755,
                               )
-    self.assertResourceCalled('Execute', ' /usr/lib/hbase/bin/hbase --config /etc/hbase/conf org.jruby.Main /usr/lib/hbase/bin/draining_servers.rb remove host1',
+    self.assertResourceCalled('Execute', ' /usr/lib/hbase/bin/hbase --config /etc/hbase/conf  org.jruby.Main /usr/lib/hbase/bin/draining_servers.rb remove host1',
                               logoutput = True,
                               user = 'hbase',
                               )
@@ -229,11 +229,11 @@ class TestHBaseMaster(RMFTestCase):
                               content = StaticFile('draining_servers.rb'),
                               mode = 0755,
                               )
-    self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/hbase.service.keytab hbase/c6401.ambari.apache.org@EXAMPLE.COM; /usr/lib/hbase/bin/hbase --config /etc/hbase/conf org.jruby.Main /usr/lib/hbase/bin/draining_servers.rb add host1',
+    self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/hbase.service.keytab hbase/c6401.ambari.apache.org@EXAMPLE.COM; /usr/lib/hbase/bin/hbase --config /etc/hbase/conf -Djava.security.auth.login.config=/etc/hbase/conf/hbase_master_jaas.conf org.jruby.Main /usr/lib/hbase/bin/draining_servers.rb add host1',
                               logoutput = True,
                               user = 'hbase',
                               )
-    self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/hbase.service.keytab hbase/c6401.ambari.apache.org@EXAMPLE.COM; /usr/lib/hbase/bin/hbase --config /etc/hbase/conf org.jruby.Main /usr/lib/hbase/bin/region_mover.rb unload host1',
+    self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/hbase.service.keytab hbase/c6401.ambari.apache.org@EXAMPLE.COM; /usr/lib/hbase/bin/hbase --config /etc/hbase/conf -Djava.security.auth.login.config=/etc/hbase/conf/hbase_master_jaas.conf org.jruby.Main /usr/lib/hbase/bin/region_mover.rb unload host1',
                               logoutput = True,
                               user = 'hbase',
                               )


[33/51] [abbrv] ambari git commit: AMBARI-15285:AMS quicklink directory name is wrong causing errors in ambari server start (dili)

Posted by jl...@apache.org.
AMBARI-15285:AMS quicklink directory name is wrong causing errors in ambari server start (dili)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/354d0793
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/354d0793
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/354d0793

Branch: refs/heads/AMBARI-13364
Commit: 354d07933f7a5ad27ff2e7a93d802fd8d10798e8
Parents: dddffd5
Author: Di Li <di...@apache.org>
Authored: Wed Mar 9 10:50:15 2016 -0500
Committer: Di Li <di...@apache.org>
Committed: Wed Mar 9 10:50:15 2016 -0500

----------------------------------------------------------------------
 .../0.1.0/quickLinks/quicklinks.json            | 34 --------------------
 .../0.1.0/quicklinks/quicklinks.json            | 34 ++++++++++++++++++++
 2 files changed, 34 insertions(+), 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/354d0793/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/quickLinks/quicklinks.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/quickLinks/quicklinks.json b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/quickLinks/quicklinks.json
deleted file mode 100644
index 1c276c0..0000000
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/quickLinks/quicklinks.json
+++ /dev/null
@@ -1,34 +0,0 @@
-{
-  "name": "default",
-  "description": "default quick links configuration",
-  "configuration": {
-    "protocol":
-    {
-      "type":"https",
-      "checks":[
-        {
-          "property":"protocol",
-          "desired":"https",
-          "site":"ams-grafana-ini"
-        }
-      ]
-    },
-    "links": [
-      {
-        "name": "metrics_ui_server",
-        "label": "Grafana",
-        "requires_user_name": "false",
-        "url":"%@://%@:%@",
-        "template":"%@://%@:%@",
-        "port":{
-          "http_property": "port",
-          "http_default_port": "3000",
-          "https_property": "port",
-          "https_default_port": "3000",
-          "regex": "^(\\d+)$",
-          "site": "ams-grafana-ini"
-        }
-      }
-    ]
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/354d0793/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/quicklinks/quicklinks.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/quicklinks/quicklinks.json b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/quicklinks/quicklinks.json
new file mode 100644
index 0000000..1c276c0
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/quicklinks/quicklinks.json
@@ -0,0 +1,34 @@
+{
+  "name": "default",
+  "description": "default quick links configuration",
+  "configuration": {
+    "protocol":
+    {
+      "type":"https",
+      "checks":[
+        {
+          "property":"protocol",
+          "desired":"https",
+          "site":"ams-grafana-ini"
+        }
+      ]
+    },
+    "links": [
+      {
+        "name": "metrics_ui_server",
+        "label": "Grafana",
+        "requires_user_name": "false",
+        "url":"%@://%@:%@",
+        "template":"%@://%@:%@",
+        "port":{
+          "http_property": "port",
+          "http_default_port": "3000",
+          "https_property": "port",
+          "https_default_port": "3000",
+          "regex": "^(\\d+)$",
+          "site": "ams-grafana-ini"
+        }
+      }
+    ]
+  }
+}


[46/51] [abbrv] ambari git commit: AMBARI-14451:Parameterize distro-specific stack information for HDFS (Juanjo Marron via dili)

Posted by jl...@apache.org.
AMBARI-14451:Parameterize distro-specific stack information for HDFS (Juanjo Marron via dili)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/4788dc27
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/4788dc27
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/4788dc27

Branch: refs/heads/AMBARI-13364
Commit: 4788dc27841c9416b4b31b0127ad9874711f7d9a
Parents: e5df414
Author: Di Li <di...@apache.org>
Authored: Mon Feb 29 10:26:13 2016 -0500
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Wed Mar 9 15:15:13 2016 -0800

----------------------------------------------------------------------
 .../HDFS/2.1.0.2.0/configuration/hadoop-env.xml     |  3 +--
 .../package/alerts/alert_ha_namenode_health.py      |  6 +++---
 .../package/alerts/alert_metrics_deviation.py       |  6 +++---
 .../HDFS/2.1.0.2.0/package/scripts/datanode.py      |  5 +++--
 .../HDFS/2.1.0.2.0/package/scripts/hdfs_client.py   |  5 +++--
 .../HDFS/2.1.0.2.0/package/scripts/journalnode.py   |  5 +++--
 .../package/scripts/journalnode_upgrade.py          |  2 +-
 .../HDFS/2.1.0.2.0/package/scripts/namenode.py      |  5 +++--
 .../HDFS/2.1.0.2.0/package/scripts/nfsgateway.py    |  5 +++--
 .../HDFS/2.1.0.2.0/package/scripts/params.py        |  1 +
 .../HDFS/2.1.0.2.0/package/scripts/params_linux.py  | 12 ++++++++----
 .../2.1.0.2.0/package/scripts/setup_ranger_hdfs.py  |  5 +++--
 .../HDFS/2.1.0.2.0/package/scripts/snamenode.py     |  5 +++--
 .../HDFS/2.1.0.2.0/package/scripts/utils.py         | 16 ++++++++--------
 .../stacks/HDP/2.0.6/configuration/cluster-env.xml  | 15 +++++++++++++++
 15 files changed, 61 insertions(+), 35 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/4788dc27/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
index 61eccce..61d503f 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
@@ -229,8 +229,7 @@ export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
 #TODO: if env var set that can cause problems
 export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
 
-{# this is different for HDP1 #}
-# Path to jsvc required by secure HDP 2.0 datanode
+# Path to jsvc required by secure datanode
 export JSVC_HOME={{jsvc_path}}
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4788dc27/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_ha_namenode_health.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_ha_namenode_health.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_ha_namenode_health.py
index 70b1970..20d1717 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_ha_namenode_health.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_ha_namenode_health.py
@@ -247,7 +247,7 @@ def get_jmx(query, connection_timeout):
 
 def _get_ha_state_from_json(string_json):
   """
-  Searches through the specified JSON string looking for either the HDP 2.0 or 2.1+ HA state
+  Searches through the specified JSON string looking for HA state
   enumerations.
   :param string_json: the string JSON
   :return:  the value of the HA state (active, standby, etc)
@@ -255,7 +255,7 @@ def _get_ha_state_from_json(string_json):
   json_data = json.loads(string_json)
   jmx_beans = json_data["beans"]
 
-  # look for HDP 2.1+ first
+  # look for NameNodeStatus-State first
   for jmx_bean in jmx_beans:
     if "name" not in jmx_bean:
       continue
@@ -264,7 +264,7 @@ def _get_ha_state_from_json(string_json):
     if jmx_bean_name == "Hadoop:service=NameNode,name=NameNodeStatus" and "State" in jmx_bean:
       return jmx_bean["State"]
 
-  # look for HDP 2.0 last
+  # look for FSNamesystem-tag.HAState last
   for jmx_bean in jmx_beans:
     if "name" not in jmx_bean:
       continue

http://git-wip-us.apache.org/repos/asf/ambari/blob/4788dc27/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py
index f6a9a56..50a9ecd 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py
@@ -338,7 +338,7 @@ def get_jmx(query, connection_timeout):
 
 def _get_ha_state_from_json(string_json):
   """
-  Searches through the specified JSON string looking for either the HDP 2.0 or 2.1+ HA state
+  Searches through the specified JSON string looking for HA state
   enumerations.
   :param string_json: the string JSON
   :return:  the value of the HA state (active, standby, etc)
@@ -346,7 +346,7 @@ def _get_ha_state_from_json(string_json):
   json_data = json.loads(string_json)
   jmx_beans = json_data["beans"]
 
-  # look for HDP 2.1+ first
+  # look for NameNodeStatus-State first
   for jmx_bean in jmx_beans:
     if "name" not in jmx_bean:
       continue
@@ -355,7 +355,7 @@ def _get_ha_state_from_json(string_json):
     if jmx_bean_name == "Hadoop:service=NameNode,name=NameNodeStatus" and "State" in jmx_bean:
       return jmx_bean["State"]
 
-  # look for HDP 2.0 last
+  # look for FSNamesystem-tag.HAState last
   for jmx_bean in jmx_beans:
     if "name" not in jmx_bean:
       continue

http://git-wip-us.apache.org/repos/asf/ambari/blob/4788dc27/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
index 3cdfda9..e3556ff 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
@@ -32,7 +32,8 @@ from utils import get_hdfs_binary
 class DataNode(Script):
 
   def get_stack_to_component(self):
-    return {"HDP": "hadoop-hdfs-datanode"}
+    import params
+    return {params.stack_name: "hadoop-hdfs-datanode"}
 
   def get_hdfs_binary(self):
     """
@@ -87,7 +88,7 @@ class DataNodeDefault(DataNode):
     Logger.info("Executing DataNode Stack Upgrade pre-restart")
     import params
     env.set_params(params)
-    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), params.stack_version_ru_support) >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
       stack_select.select("hadoop-hdfs-datanode", params.version)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4788dc27/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
index c5ae35e..a56d480 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
@@ -55,12 +55,13 @@ class HdfsClient(Script):
 class HdfsClientDefault(HdfsClient):
 
   def get_stack_to_component(self):
-    return {"HDP": "hadoop-client"}
+    import params
+    return {params.stack_name: "hadoop-client"}
 
   def pre_upgrade_restart(self, env, upgrade_type=None):
     import params
     env.set_params(params)
-    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), params.stack_version_ru_support) >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
       stack_select.select("hadoop-client", params.version)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4788dc27/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode.py
index 6f26b40..0860211 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode.py
@@ -43,14 +43,15 @@ class JournalNode(Script):
 class JournalNodeDefault(JournalNode):
 
   def get_stack_to_component(self):
-    return {"HDP": "hadoop-hdfs-journalnode"}
+    import params
+    return {params.stack_name: "hadoop-hdfs-journalnode"}
 
   def pre_upgrade_restart(self, env, upgrade_type=None):
     Logger.info("Executing Stack Upgrade pre-restart")
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), params.stack_version_ru_support) >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
       stack_select.select("hadoop-hdfs-journalnode", params.version)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4788dc27/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode_upgrade.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode_upgrade.py
index d598840..193e7d7 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode_upgrade.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode_upgrade.py
@@ -81,7 +81,7 @@ def hdfs_roll_edits():
   """
   import params
 
-  # TODO, this will be to be doc'ed since existing HDP 2.2 clusters will needs HDFS_CLIENT on all JOURNALNODE hosts
+  # TODO, this will be to be doc'ed since existing stack_version_ru_support clusters will needs HDFS_CLIENT on all JOURNALNODE hosts
   dfsadmin_base_command = get_dfsadmin_base_command('hdfs')
   command = dfsadmin_base_command + ' -rollEdits'
   Execute(command, user=params.hdfs_user, tries=1)

http://git-wip-us.apache.org/repos/asf/ambari/blob/4788dc27/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
index 02905ec..2636bbc 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
@@ -68,7 +68,8 @@ except ImportError:
 class NameNode(Script):
 
   def get_stack_to_component(self):
-    return {"HDP": "hadoop-hdfs-namenode"}
+    import params
+    return {params.stack_name: "hadoop-hdfs-namenode"}
 
   def get_hdfs_binary(self):
     """
@@ -190,7 +191,7 @@ class NameNodeDefault(NameNode):
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), params.stack_version_ru_support) >= 0:
       # When downgrading an Express Upgrade, the first thing we do is to revert the symlinks.
       # Therefore, we cannot call this code in that scenario.
       call_if = [("rolling", "upgrade"), ("rolling", "downgrade"), ("nonrolling", "upgrade")]

http://git-wip-us.apache.org/repos/asf/ambari/blob/4788dc27/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/nfsgateway.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/nfsgateway.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/nfsgateway.py
index c705fca..6386ca5 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/nfsgateway.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/nfsgateway.py
@@ -32,7 +32,8 @@ from resource_management.libraries.functions.version import compare_versions, fo
 class NFSGateway(Script):
 
   def get_stack_to_component(self):
-    return {"HDP": "hadoop-hdfs-nfs3"}
+    import params
+    return {params.stack_name: "hadoop-hdfs-nfs3"}
 
   def install(self, env):
     import params
@@ -45,7 +46,7 @@ class NFSGateway(Script):
     import params
     env.set_params(params)
 
-    if Script.is_stack_greater_or_equal('2.3.0.0'):
+    if Script.is_stack_greater_or_equal(params.stack_version_nfs_support):
       conf_select.select(params.stack_name, "hadoop", params.version)
       stack_select.select("hadoop-hdfs-nfs3", params.version)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4788dc27/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
index 7514918..0f3746c 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params.py
@@ -25,5 +25,6 @@ else:
   from params_linux import *
 
 host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
+stack_version_nfs_support = config['configurations']['cluster-env']['stack_version_nfs_support']
 nfsgateway_heapsize = config['configurations']['hadoop-env']['nfsgateway_heapsize']
 retryAble = default("/commandParams/command_retry_enabled", False)

http://git-wip-us.apache.org/repos/asf/ambari/blob/4788dc27/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
index f0bf4d2..9d4e3f7 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
@@ -45,9 +45,13 @@ config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 
 stack_name = default("/hostLevelParams/stack_name", None)
+stack_dir = config['configurations']['cluster-env']['stack_dir']
 upgrade_direction = default("/commandParams/upgrade_direction", None)
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
 stack_version_formatted = format_stack_version(stack_version_unformatted)
+stack_version_ru_support = config['configurations']['cluster-env']['stack_version_ru_support']
+stack_version_snappy_unsupport = config['configurations']['cluster-env']['stack_version_snappy_unsupport']
+stack_version_ranger_support = config['configurations']['cluster-env']['stack_version_ranger_support']
 agent_stack_retry_on_unavailability = cbool(config["hostLevelParams"]["agent_stack_retry_on_unavailability"])
 agent_stack_retry_count = cint(config["hostLevelParams"]["agent_stack_retry_count"])
 
@@ -86,9 +90,9 @@ hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
 hadoop_conf_secure_dir = os.path.join(hadoop_conf_dir, "secure")
 hadoop_lib_home = stack_select.get_hadoop_dir("lib")
 
-# hadoop parameters for 2.2+
-if Script.is_stack_greater_or_equal("2.2"):
-  mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
+# hadoop parameters for stack_version_ru_support+
+if Script.is_stack_greater_or_equal(stack_version_ru_support):
+  mapreduce_libs_path = format("{stack_dir}/current/hadoop-mapreduce-client/*")
 
   if not security_enabled:
     hadoop_secure_dn_user = '""'
@@ -114,7 +118,7 @@ limits_conf_dir = "/etc/security/limits.d"
 hdfs_user_nofile_limit = default("/configurations/hadoop-env/hdfs_user_nofile_limit", "128000")
 hdfs_user_nproc_limit = default("/configurations/hadoop-env/hdfs_user_nproc_limit", "65536")
 
-create_lib_snappy_symlinks = not Script.is_stack_greater_or_equal("2.2")
+create_lib_snappy_symlinks = not Script.is_stack_greater_or_equal(stack_version_snappy_unsupport)
 jsvc_path = "/usr/lib/bigtop-utils"
 
 execute_path = os.environ['PATH'] + os.pathsep + hadoop_bin_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/4788dc27/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py
index 209ac91..e30ff95 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py
@@ -60,9 +60,10 @@ def setup_ranger_hdfs(upgrade_type=None):
                         ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
                         stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble)
 
+
     if stack_version and params.upgrade_direction == Direction.UPGRADE:
-      # when upgrading to 2.3+, this env file must be removed
-      if compare_versions(stack_version, '2.3', format=True) > 0:
+      # when upgrading to stack_version_ranger_support+, this env file must be removed
+      if compare_versions(stack_version, params.stack_version_ranger_support, format=True) > 0:
         source_file = os.path.join(params.hadoop_conf_dir, 'set-hdfs-plugin-env.sh')
         target_file = source_file + ".bak"
         Execute(("mv", source_file, target_file), sudo=True, only_if=format("test -f {source_file}"))

http://git-wip-us.apache.org/repos/asf/ambari/blob/4788dc27/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py
index f96ac01..c19f3ce 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py
@@ -64,14 +64,15 @@ class SNameNode(Script):
 class SNameNodeDefault(SNameNode):
 
   def get_stack_to_component(self):
-    return {"HDP": "hadoop-hdfs-secondarynamenode"}
+    import params
+    return {params.stack_name: "hadoop-hdfs-secondarynamenode"}
 
   def pre_upgrade_restart(self, env, upgrade_type=None):
     Logger.info("Executing Stack Upgrade pre-restart")
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), params.stack_version_ru_support) >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
       stack_select.select("hadoop-hdfs-secondarynamenode", params.version)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4788dc27/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
index c626028..d30eb50 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
@@ -134,7 +134,7 @@ def kill_zkfc(zkfc_user):
   """
   There are two potential methods for failing over the namenode, especially during a Rolling Upgrade.
   Option 1. Kill zkfc on primary namenode provided that the secondary is up and has zkfc running on it.
-  Option 2. Silent failover (not supported as of HDP 2.2.0.0)
+  Option 2. Silent failover (not supported as of stack_version_ru_support)
   :param zkfc_user: User that started the ZKFC process.
   :return: Return True if ZKFC was killed, otherwise, false.
   """
@@ -224,12 +224,12 @@ def service(action=None, name=None, user=None, options="", create_pid_dir=False,
     hadoop_secure_dn_pid_file = format("{hadoop_secure_dn_pid_dir}/hadoop_secure_dn.pid")
 
     # At Champlain stack and further, we may start datanode as a non-root even in secure cluster
-    if not (params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.2') >= 0) or params.secure_dn_ports_are_in_use:
+    if not (params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, params.stack_version_ru_support) >= 0) or params.secure_dn_ports_are_in_use:
       user = "root"
       pid_file = format(
         "{hadoop_pid_dir_prefix}/{hdfs_user}/hadoop-{hdfs_user}-{name}.pid")
 
-    if action == 'stop' and (params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.2') >= 0) and \
+    if action == 'stop' and (params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, params.stack_version_ru_support) >= 0) and \
       os.path.isfile(hadoop_secure_dn_pid_file):
         # We need special handling for this case to handle the situation
         # when we configure non-root secure DN and then restart it
@@ -351,11 +351,11 @@ def get_hdfs_binary(distro_component_name):
   """
   import params
   hdfs_binary = "hdfs"
-  if params.stack_name == "HDP":
-    # This was used in HDP 2.1 and earlier
-    hdfs_binary = "hdfs"
-    if Script.is_stack_greater_or_equal("2.2"):
-      hdfs_binary = "/usr/hdp/current/{0}/bin/hdfs".format(distro_component_name)
+  #if params.stack_name == "HDP":
+  #  # This was used in HDP 2.1 and earlier
+  #  hdfs_binary = "hdfs"
+  if Script.is_stack_greater_or_equal(params.stack_version_ru_support):
+    hdfs_binary = "{0}/current/{1}/bin/hdfs".format(params.stack_dir, distro_component_name)
 
   return hdfs_binary
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/4788dc27/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
index 8ac1b5b..70a5fbb 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
@@ -32,6 +32,21 @@
         <description>Stack version from which rolling upgrade is supported and installation layout changed</description>
     </property>
     <property>
+        <name>stack_version_ranger_support</name>
+        <value>2.2.0.0</value>
+        <description>Stack version from which ranger is supported</description>
+    </property>
+    <property>
+        <name>stack_version_snappy_unsupport</name>
+        <value>2.2.0.0</value>
+        <description>Stack version from which snappy is not supported</description>
+    </property>
+    <property>
+        <name>stack_version_nfs_support</name>
+        <value>2.3.0.0</value>
+        <description>Stack version from which hadoop-hdfs-nfs3 is supported</description>
+    </property>
+    <property>
         <name>security_enabled</name>
         <value>false</value>
         <description>Hadoop Security</description>


[14/51] [abbrv] ambari git commit: AMBARI-15215: Update warning messages for HAWQ and PXF in stack advisor (mithmatt)

Posted by jl...@apache.org.
AMBARI-15215: Update warning messages for HAWQ and PXF in stack advisor (mithmatt)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/8615cac3
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/8615cac3
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/8615cac3

Branch: refs/heads/AMBARI-13364
Commit: 8615cac3a9cee7edfee18e0c8c6fd0da021f8d2e
Parents: 86d3780
Author: Matt <mm...@pivotal.io>
Authored: Tue Mar 8 11:06:34 2016 -0800
Committer: Matt <mm...@pivotal.io>
Committed: Tue Mar 8 11:06:34 2016 -0800

----------------------------------------------------------------------
 .../stacks/HDP/2.3/services/stack_advisor.py    | 18 +++++------
 .../stacks/2.3/common/test_stack_advisor.py     | 34 ++++++++++++++++----
 2 files changed, 37 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/8615cac3/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
index 53b46be..8e1aff2 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/services/stack_advisor.py
@@ -83,15 +83,15 @@ class HDP23StackAdvisor(HDP22StackAdvisor):
         childItems.append( { "type": 'host-component', "level": 'ERROR', "message": message, "component-name": 'HAWQSTANDBY', "host": hawqStandbyHosts[0] } )
 
       if len(hawqMasterHosts) ==  1 and hostsCount > 1 and self.isLocalHost(hawqMasterHosts[0]):
-        message = "HAWQ Master and Ambari Server should not be deployed on the same host. " \
-                  "If you leave them colocated, make sure to set HAWQ Master Port property " \
-                  "to a value different from the port number used by Ambari Server database."
+        message = "The default Postgres port (5432) on the Ambari Server conflicts with the default HAWQ Masters port. " \
+                  "If you are using port 5432 for Postgres, you must either deploy the HAWQ Master on a different host " \
+                  "or configure a different port for the HAWQ Masters in the HAWQ Configuration page."
         childItems.append( { "type": 'host-component', "level": 'WARN', "message": message, "component-name": 'HAWQMASTER', "host": hawqMasterHosts[0] } )
 
       if len(hawqStandbyHosts) ==  1 and hostsCount > 1 and self.isLocalHost(hawqStandbyHosts[0]):
-        message = "HAWQ Standby Master and Ambari Server should not be deployed on the same host. " \
-                  "If you leave them colocated, make sure to set HAWQ Master Port property " \
-                  "to a value different from the port number used by Ambari Server database."
+        message = "The default Postgres port (5432) on the Ambari Server conflicts with the default HAWQ Masters port. " \
+                  "If you are using port 5432 for Postgres, you must either deploy the HAWQ Standby Master on a different host " \
+                  "or configure a different port for the HAWQ Masters in the HAWQ Configuration page."
         childItems.append( { "type": 'host-component', "level": 'WARN', "message": message, "component-name": 'HAWQSTANDBY', "host": hawqStandbyHosts[0] } )
 
     if "PXF" in servicesList:
@@ -934,9 +934,9 @@ class HDP23StackAdvisor(HDP22StackAdvisor):
       prop_name = 'hawq_master_address_port'
       validationItems.append({"config-name": prop_name,
                                 "item": self.getWarnItem(
-                                "The default Postgres port (5432) on the Ambari Server conflicts with the default HAWQ Master port. "
-                                "If you are using port 5432 for Postgres, you must either deploy the HAWQ Master on a different host "
-                                "or configure a different port for the HAWQ Master in the HAWQ Configuration page.")})
+                                "The default Postgres port (5432) on the Ambari Server conflicts with the default HAWQ Masters port. "
+                                "If you are using port 5432 for Postgres, you must either deploy the HAWQ Masters on a different host "
+                                "or configure a different port for the HAWQ Masters in the HAWQ Configuration page.")})
 
     # 2. Check if any data directories are pointing to root dir '/'
     directories = {

http://git-wip-us.apache.org/repos/asf/ambari/blob/8615cac3/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
index cf1f606..1a3fa65 100644
--- a/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
@@ -451,7 +451,13 @@ class TestHDP23StackAdvisor(TestCase):
 
     validations = self.stackAdvisor.getComponentLayoutValidations(services, hosts)
     self.assertEquals(len(validations), 1)
-    expected={'component-name': 'HAWQSTANDBY', 'message': 'HAWQ Master and HAWQ Standby Master cannot be deployed on the same host.', 'type': 'host-component', 'host': 'c6403.ambari.apache.org', 'level': 'ERROR'}
+    expected = {
+      'component-name': 'HAWQSTANDBY',
+      'message': 'HAWQ Master and HAWQ Standby Master cannot be deployed on the same host.',
+      'type': 'host-component',
+      'host': 'c6403.ambari.apache.org',
+      'level': 'ERROR'
+    }
     self.assertEquals(validations[0], expected)
 
     # case-3: HAWQ Master and Ambari Server are collocated
@@ -467,7 +473,15 @@ class TestHDP23StackAdvisor(TestCase):
 
     validations = self.stackAdvisor.getComponentLayoutValidations(services, hosts)
     self.assertEquals(len(validations), 1)
-    expected={'component-name': 'HAWQMASTER', 'message': 'HAWQ Master and Ambari Server should not be deployed on the same host. If you leave them colocated, make sure to set HAWQ Master Port property to a value different from the port number used by Ambari Server database.', 'type': 'host-component', 'host': 'c6401.ambari.apache.org', 'level': 'WARN'}
+    expected = {
+      'component-name': 'HAWQMASTER',
+      'message': 'The default Postgres port (5432) on the Ambari Server conflicts with the default HAWQ Masters port. '  +
+                 'If you are using port 5432 for Postgres, you must either deploy the HAWQ Master on a different host ' +
+                 'or configure a different port for the HAWQ Masters in the HAWQ Configuration page.',
+      'type': 'host-component',
+      'host': 'c6401.ambari.apache.org',
+      'level': 'WARN'
+    }
     self.assertEquals(validations[0], expected)
 
     # case-4: HAWQ Standby and Ambari Server are collocated
@@ -483,7 +497,15 @@ class TestHDP23StackAdvisor(TestCase):
 
     validations = self.stackAdvisor.getComponentLayoutValidations(services, hosts)
     self.assertEquals(len(validations), 1)
-    expected={'component-name': 'HAWQSTANDBY', 'message': 'HAWQ Standby Master and Ambari Server should not be deployed on the same host. If you leave them colocated, make sure to set HAWQ Master Port property to a value different from the port number used by Ambari Server database.', 'type': 'host-component', 'host': 'c6401.ambari.apache.org', 'level': 'WARN'}
+    expected = {
+      'component-name': 'HAWQSTANDBY',
+      'message': 'The default Postgres port (5432) on the Ambari Server conflicts with the default HAWQ Masters port. '  +
+                 'If you are using port 5432 for Postgres, you must either deploy the HAWQ Standby Master on a different host ' +
+                 'or configure a different port for the HAWQ Masters in the HAWQ Configuration page.',
+      'type': 'host-component',
+      'host': 'c6401.ambari.apache.org',
+      'level': 'WARN'
+    }
     self.assertEquals(validations[0], expected)
 
 
@@ -1855,9 +1877,9 @@ class TestHDP23StackAdvisor(TestCase):
       "config-name": "hawq_master_address_port",
       "config-type": "hawq-site",
       "level": "WARN",
-      "message": "The default Postgres port (5432) on the Ambari Server conflicts with the default HAWQ Master port. "
-                 "If you are using port 5432 for Postgres, you must either deploy the HAWQ Master on a different host "
-                 "or configure a different port for the HAWQ Master in the HAWQ Configuration page.",
+      "message": "The default Postgres port (5432) on the Ambari Server conflicts with the default HAWQ Masters port. "
+                 "If you are using port 5432 for Postgres, you must either deploy the HAWQ Masters on a different host "
+                 "or configure a different port for the HAWQ Masters in the HAWQ Configuration page.",
       "type": "configuration"}
     self.assertEqual(problems[0], expected)
 


[43/51] [abbrv] ambari git commit: AMBARI-15361. Fix ordering of Alter table calls which could result in Region Close issue. (swagle)

Posted by jl...@apache.org.
AMBARI-15361. Fix ordering of Alter table calls which could result in Region Close issue. (swagle)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/6d3e2912
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/6d3e2912
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/6d3e2912

Branch: refs/heads/AMBARI-13364
Commit: 6d3e2912d49a5343bfdef54a04add9210ba16657
Parents: f7711af
Author: Siddharth Wagle <sw...@hortonworks.com>
Authored: Wed Mar 9 14:00:01 2016 -0800
Committer: Siddharth Wagle <sw...@hortonworks.com>
Committed: Wed Mar 9 14:00:01 2016 -0800

----------------------------------------------------------------------
 .../timeline/HBaseTimelineMetricStore.java      |   7 +-
 .../metrics/timeline/PhoenixHBaseAccessor.java  | 134 ++++++++++++-------
 2 files changed, 94 insertions(+), 47 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/6d3e2912/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
index f460292..465fe95 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/HBaseTimelineMetricStore.java
@@ -80,12 +80,15 @@ public class HBaseTimelineMetricStore extends AbstractService implements Timelin
                                                 Configuration metricsConf) {
     if (!isInitialized) {
       hBaseAccessor = new PhoenixHBaseAccessor(hbaseConf, metricsConf);
+      // Initialize schema
       hBaseAccessor.initMetricSchema();
       // Initialize metadata from store
       metricMetadataManager = new TimelineMetricMetadataManager(hBaseAccessor, metricsConf);
       metricMetadataManager.initializeMetadata();
-
+      // Initialize policies before TTL update
       hBaseAccessor.initPolicies();
+      // Alter TTL on tables
+      hBaseAccessor.alterMetricTableTTL();
 
       if (Boolean.parseBoolean(metricsConf.get(USE_GROUPBY_AGGREGATOR_QUERIES, "true"))) {
         LOG.info("Using group by aggregators for aggregating host and cluster metrics.");
@@ -96,7 +99,7 @@ public class HBaseTimelineMetricStore extends AbstractService implements Timelin
         TimelineMetricAggregatorFactory.createTimelineClusterAggregatorSecond(hBaseAccessor, metricsConf, metricMetadataManager);
       scheduleAggregatorThread(secondClusterAggregator, metricsConf);
 
-//      // Start the minute cluster aggregator
+      // Start the minute cluster aggregator
       TimelineMetricAggregator minuteClusterAggregator =
         TimelineMetricAggregatorFactory.createTimelineClusterAggregatorMinute(hBaseAccessor, metricsConf);
       scheduleAggregatorThread(minuteClusterAggregator, metricsConf);

http://git-wip-us.apache.org/repos/asf/ambari/blob/6d3e2912/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java
index 09da6bf..8cfe9a9 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/PhoenixHBaseAccessor.java
@@ -150,6 +150,15 @@ public class PhoenixHBaseAccessor {
   static final String BLOCKING_STORE_FILES_KEY =
     "hbase.hstore.blockingStoreFiles";
 
+  private final String precisionTtl;
+  private final String hostMinTtl;
+  private final String hostHourTtl;
+  private final String hostDailyTtl;
+  private final String clusterSecTtl;
+  private final String clusterMinTtl;
+  private final String clusterHourTtl;
+  private final String clusterDailyTtl;
+
   public PhoenixHBaseAccessor(Configuration hbaseConf,
                               Configuration metricsConf){
     this(hbaseConf, metricsConf, new DefaultPhoenixDataSource(hbaseConf));
@@ -173,6 +182,15 @@ public class PhoenixHBaseAccessor {
     this.outOfBandTimeAllowance = metricsConf.getLong(OUT_OFF_BAND_DATA_TIME_ALLOWANCE,
       DEFAULT_OUT_OF_BAND_TIME_ALLOWANCE);
     this.skipBlockCacheForAggregatorsEnabled = metricsConf.getBoolean(AGGREGATORS_SKIP_BLOCK_CACHE, false);
+
+    precisionTtl = getDaysInSeconds(metricsConf.get(PRECISION_TABLE_TTL, "1"));           //1 day
+    hostMinTtl = getDaysInSeconds(metricsConf.get(HOST_MINUTE_TABLE_TTL, "7"));           //7 days
+    hostHourTtl = getDaysInSeconds(metricsConf.get(HOST_HOUR_TABLE_TTL, "30"));           //30 days
+    hostDailyTtl = getDaysInSeconds(metricsConf.get(HOST_DAILY_TABLE_TTL, "365"));        //1 year
+    clusterSecTtl = getDaysInSeconds(metricsConf.get(CLUSTER_SECOND_TABLE_TTL, "7"));     //7 days
+    clusterMinTtl = getDaysInSeconds(metricsConf.get(CLUSTER_MINUTE_TABLE_TTL, "30"));    //30 days
+    clusterHourTtl = getDaysInSeconds(metricsConf.get(CLUSTER_HOUR_TABLE_TTL, "365"));    //1 year
+    clusterDailyTtl = getDaysInSeconds(metricsConf.get(CLUSTER_DAILY_TABLE_TTL, "730"));  //2 years
   }
 
   private static TimelineMetric getLastTimelineMetricFromResultSet(ResultSet rs)
@@ -236,20 +254,80 @@ public class PhoenixHBaseAccessor {
     return dataSource.getHBaseAdmin();
   }
 
+  /**
+   * Set TTL on tables based on user settings
+   */
+  protected void alterMetricTableTTL() {
+    Connection conn = null;
+    Statement stmt = null;
+
+    try {
+      LOG.info("Initializing metrics schema...");
+      conn = getConnectionRetryingOnException();
+      stmt = conn.createStatement();
+
+      //alter TTL options to update tables
+      stmt.executeUpdate(String.format(ALTER_SQL,
+        METRICS_RECORD_TABLE_NAME,
+        precisionTtl));
+      stmt.executeUpdate(String.format(ALTER_SQL,
+        METRICS_AGGREGATE_MINUTE_TABLE_NAME,
+        hostMinTtl));
+      stmt.executeUpdate(String.format(ALTER_SQL,
+        METRICS_AGGREGATE_HOURLY_TABLE_NAME,
+        hostHourTtl));
+      stmt.executeUpdate(String.format(ALTER_SQL,
+        METRICS_AGGREGATE_DAILY_TABLE_NAME,
+        hostDailyTtl));
+      stmt.executeUpdate(String.format(ALTER_SQL,
+        METRICS_CLUSTER_AGGREGATE_TABLE_NAME,
+        clusterSecTtl));
+      stmt.executeUpdate(String.format(ALTER_SQL,
+        METRICS_CLUSTER_AGGREGATE_MINUTE_TABLE_NAME,
+        clusterMinTtl));
+      stmt.executeUpdate(String.format(ALTER_SQL,
+        METRICS_CLUSTER_AGGREGATE_HOURLY_TABLE_NAME,
+        clusterHourTtl));
+      stmt.executeUpdate(String.format(ALTER_SQL,
+        METRICS_CLUSTER_AGGREGATE_DAILY_TABLE_NAME,
+        clusterDailyTtl));
+
+      conn.commit();
+
+
+    } catch (InterruptedException e) {
+      LOG.warn("Error updating TTL on tables.", e);
+    } catch (SQLException sql) {
+      if (sql.getErrorCode() == SQLExceptionCode.SET_UNSUPPORTED_PROP_ON_ALTER_TABLE.getErrorCode()) {
+        LOG.warn("Update TTL on tables is unsupported by the phoenix version. " + sql.getMessage());
+      } else {
+        LOG.warn("Error updating TTL on tables.", sql);
+      }
+    } finally {
+      if (stmt != null) {
+        try {
+          stmt.close();
+        } catch (SQLException e) {
+          // Ignore
+        }
+      }
+      if (conn != null) {
+        try {
+          conn.close();
+        } catch (SQLException e) {
+          // Ignore
+        }
+      }
+    }
+  }
+
   protected void initMetricSchema() {
     Connection conn = null;
     Statement stmt = null;
 
     String encoding = metricsConf.get(HBASE_ENCODING_SCHEME, DEFAULT_ENCODING);
     String compression = metricsConf.get(HBASE_COMPRESSION_SCHEME, DEFAULT_TABLE_COMPRESSION);
-    String precisionTtl = getDaysInSeconds(metricsConf.get(PRECISION_TABLE_TTL, "1"));           //1 day
-    String hostMinTtl = getDaysInSeconds(metricsConf.get(HOST_MINUTE_TABLE_TTL, "7"));           //7 days
-    String hostHourTtl = getDaysInSeconds(metricsConf.get(HOST_HOUR_TABLE_TTL, "30"));           //30 days
-    String hostDailyTtl = getDaysInSeconds(metricsConf.get(HOST_DAILY_TABLE_TTL, "365"));        //1 year
-    String clusterSecTtl = getDaysInSeconds(metricsConf.get(CLUSTER_SECOND_TABLE_TTL, "7"));     //7 days
-    String clusterMinTtl = getDaysInSeconds(metricsConf.get(CLUSTER_MINUTE_TABLE_TTL, "30"));    //30 days
-    String clusterHourTtl = getDaysInSeconds(metricsConf.get(CLUSTER_HOUR_TABLE_TTL, "365"));    //1 year
-    String clusterDailyTtl = getDaysInSeconds(metricsConf.get(CLUSTER_DAILY_TABLE_TTL, "730"));  //2 years
+
 
     try {
       LOG.info("Initializing metrics schema...");
@@ -294,48 +372,14 @@ public class PhoenixHBaseAccessor {
       stmt.executeUpdate(String.format(CREATE_METRICS_CLUSTER_AGGREGATE_GROUPED_TABLE_SQL,
         METRICS_CLUSTER_AGGREGATE_DAILY_TABLE_NAME, encoding, clusterDailyTtl, compression));
 
-      //alter TTL options to update tables
-      stmt.executeUpdate(String.format(ALTER_SQL,
-        METRICS_RECORD_TABLE_NAME,
-        precisionTtl));
-      stmt.executeUpdate(String.format(ALTER_SQL,
-        METRICS_AGGREGATE_MINUTE_TABLE_NAME,
-        hostMinTtl));
-      stmt.executeUpdate(String.format(ALTER_SQL,
-        METRICS_AGGREGATE_HOURLY_TABLE_NAME,
-        hostHourTtl));
-      stmt.executeUpdate(String.format(ALTER_SQL,
-        METRICS_AGGREGATE_DAILY_TABLE_NAME,
-        hostDailyTtl));
-      stmt.executeUpdate(String.format(ALTER_SQL,
-        METRICS_CLUSTER_AGGREGATE_TABLE_NAME,
-        clusterSecTtl));
-      stmt.executeUpdate(String.format(ALTER_SQL,
-        METRICS_CLUSTER_AGGREGATE_MINUTE_TABLE_NAME,
-        clusterMinTtl));
-      stmt.executeUpdate(String.format(ALTER_SQL,
-        METRICS_CLUSTER_AGGREGATE_HOURLY_TABLE_NAME,
-        clusterHourTtl));
-      stmt.executeUpdate(String.format(ALTER_SQL,
-        METRICS_CLUSTER_AGGREGATE_DAILY_TABLE_NAME,
-        clusterDailyTtl));
 
       conn.commit();
 
       LOG.info("Metrics schema initialized.");
-    } catch (SQLException sql) {
-      if (sql.getErrorCode() ==
-        SQLExceptionCode.SET_UNSUPPORTED_PROP_ON_ALTER_TABLE.getErrorCode()) {
-        LOG.warn("Cannot update TTL on tables. " + sql.getMessage());
-      } else {
-        LOG.error("Error creating Metrics Schema in HBase using Phoenix.", sql);
-        throw new MetricsSystemInitializationException(
-          "Error creating Metrics Schema in HBase using Phoenix.", sql);
-      }
-    } catch (InterruptedException e) {
-      LOG.error("Error creating Metrics Schema in HBase using Phoenix.", e);
+    } catch (SQLException | InterruptedException sql) {
+      LOG.error("Error creating Metrics Schema in HBase using Phoenix.", sql);
       throw new MetricsSystemInitializationException(
-        "Error creating Metrics Schema in HBase using Phoenix.", e);
+        "Error creating Metrics Schema in HBase using Phoenix.", sql);
     } finally {
       if (stmt != null) {
         try {


[38/51] [abbrv] ambari git commit: AMBARI-15355. After EU from 2.2 -> 2.3 HBase region server started failed with memory error (dlysnichenko)

Posted by jl...@apache.org.
AMBARI-15355. After EU from 2.2 -> 2.3 HBase region server started failed with memory error (dlysnichenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/28430f37
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/28430f37
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/28430f37

Branch: refs/heads/AMBARI-13364
Commit: 28430f37cdae8d84eddaf2cce9f6d99a41ed4922
Parents: 456b451
Author: Lisnichenko Dmitro <dl...@hortonworks.com>
Authored: Wed Mar 9 20:49:57 2016 +0200
Committer: Lisnichenko Dmitro <dl...@hortonworks.com>
Committed: Wed Mar 9 20:50:23 2016 +0200

----------------------------------------------------------------------
 .../src/main/resources/stacks/HDP/2.2/services/stack_advisor.py  | 4 ++--
 .../src/test/python/stacks/2.3/common/test_stack_advisor.py      | 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/28430f37/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
index 5baba42..31acba2 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/stack_advisor.py
@@ -609,9 +609,9 @@ class HDP22StackAdvisor(HDP21StackAdvisor):
       hfile_block_cache_size = '0.4'
       block_cache_heap = 8192 # int(regionserver_heap_size * hfile_block_cache_size)
       hbase_regionserver_global_memstore_size = '0.4'
-      reserved_offheap_memory = 2048
+      reserved_offheap_memory = regionserver_max_direct_memory_size / 2
       bucketcache_offheap_memory = regionserver_max_direct_memory_size - reserved_offheap_memory
-      hbase_bucketcache_size = block_cache_heap + bucketcache_offheap_memory
+      hbase_bucketcache_size = min(block_cache_heap + bucketcache_offheap_memory, regionserver_max_direct_memory_size - 1024)
       hbase_bucketcache_percentage_in_combinedcache = float(bucketcache_offheap_memory) / hbase_bucketcache_size
       hbase_bucketcache_percentage_in_combinedcache_str = "{0:.4f}".format(math.ceil(hbase_bucketcache_percentage_in_combinedcache * 10000) / 10000.0)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/28430f37/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
index 1a3fa65..d6d4518 100644
--- a/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
+++ b/ambari-server/src/test/python/stacks/2.3/common/test_stack_advisor.py
@@ -834,7 +834,7 @@ class TestHDP23StackAdvisor(TestCase):
       "hbase-site": {
         "properties": {
           "hbase.bucketcache.size": "92160",
-          "hbase.bucketcache.percentage.in.combinedcache": "0.9184",
+          "hbase.bucketcache.percentage.in.combinedcache": "0.8519",
           "hbase.regionserver.global.memstore.size": "0.4",
           "hfile.block.cache.size": "0.4",
           "hbase.coprocessor.region.classes": "org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint",


[24/51] [abbrv] ambari git commit: AMBARI-15329: Code Cleanup: Remove hdp hardcodings in functions, variables etc. (jluniya)

Posted by jl...@apache.org.
AMBARI-15329: Code Cleanup: Remove hdp hardcodings in functions, variables etc. (jluniya)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f7221e5a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f7221e5a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f7221e5a

Branch: refs/heads/AMBARI-13364
Commit: f7221e5a601abb74977389f453754432f4008616
Parents: 2efe894
Author: Jayush Luniya <jl...@hortonworks.com>
Authored: Tue Mar 8 15:33:30 2016 -0800
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Tue Mar 8 15:33:30 2016 -0800

----------------------------------------------------------------------
 .../ambari_agent/HostCheckReportFileHandler.py  |   4 +-
 .../libraries/functions/__init__.py             |   4 +-
 .../libraries/functions/conf_select.py          |  26 +-
 .../libraries/functions/copy_tarball.py         |  22 +-
 .../dynamic_variable_interpretation.py          |  32 +-
 .../libraries/functions/get_hdp_version.py      |  91 ------
 .../libraries/functions/get_lzo_packages.py     |   6 +-
 .../libraries/functions/get_stack_version.py    |  91 ++++++
 .../libraries/functions/hdp_select.py           | 307 -------------------
 .../libraries/functions/install_hdp_msi.py      | 215 -------------
 .../libraries/functions/install_windows_msi.py  | 215 +++++++++++++
 .../libraries/functions/setup_ranger_plugin.py  |   8 +-
 .../functions/setup_ranger_plugin_xml.py        |  28 +-
 .../libraries/functions/stack_select.py         | 307 +++++++++++++++++++
 .../libraries/functions/version.py              |   8 +-
 .../libraries/functions/version_select_util.py  |  10 +-
 .../libraries/script/script.py                  |  50 +--
 .../package/scripts/accumulo_client.py          |   6 +-
 .../package/scripts/accumulo_script.py          |  22 +-
 .../1.6.1.2.2.0/package/scripts/params.py       |  12 +-
 .../0.1.0.2.3/package/scripts/atlas_client.py   |   6 +-
 .../package/scripts/metadata_server.py          |   8 +-
 .../ATLAS/0.1.0.2.3/package/scripts/params.py   |   4 +-
 .../0.5.0.2.1/package/scripts/falcon_client.py  |   6 +-
 .../0.5.0.2.1/package/scripts/falcon_server.py  |   6 +-
 .../0.5.0.2.1/package/scripts/params_linux.py   |  12 +-
 .../0.5.0.2.1/package/scripts/status_params.py  |   2 +-
 .../1.4.0.2.0/package/scripts/flume_handler.py  |   6 +-
 .../FLUME/1.4.0.2.0/package/scripts/params.py   |   6 +-
 .../1.4.0.2.0/package/scripts/params_linux.py   |   2 +-
 .../HAWQ/2.0.0/package/scripts/hawqmaster.py    |   2 +-
 .../0.96.0.2.0/package/scripts/hbase_client.py  |  10 +-
 .../0.96.0.2.0/package/scripts/params_linux.py  |  10 +-
 .../package/scripts/phoenix_queryserver.py      |   6 +-
 .../0.96.0.2.0/package/scripts/status_params.py |   2 +-
 .../HBASE/0.96.0.2.0/package/scripts/upgrade.py |   8 +-
 .../HDFS/2.1.0.2.0/package/scripts/datanode.py  |   8 +-
 .../2.1.0.2.0/package/scripts/hdfs_client.py    |   6 +-
 .../2.1.0.2.0/package/scripts/journalnode.py    |   8 +-
 .../HDFS/2.1.0.2.0/package/scripts/namenode.py  |   8 +-
 .../2.1.0.2.0/package/scripts/nfsgateway.py     |   8 +-
 .../2.1.0.2.0/package/scripts/params_linux.py   |  20 +-
 .../HDFS/2.1.0.2.0/package/scripts/snamenode.py |   8 +-
 .../HDFS/2.1.0.2.0/package/scripts/utils.py     |   6 +-
 .../0.12.0.2.0/package/scripts/hcat_client.py   |   4 +-
 .../HIVE/0.12.0.2.0/package/scripts/hive.py     |   4 +-
 .../0.12.0.2.0/package/scripts/hive_client.py   |   6 +-
 .../package/scripts/hive_metastore.py           |  12 +-
 .../0.12.0.2.0/package/scripts/hive_server.py   |  10 +-
 .../package/scripts/hive_server_interactive.py  |   6 +-
 .../package/scripts/hive_server_upgrade.py      |  16 +-
 .../0.12.0.2.0/package/scripts/params_linux.py  |  16 +-
 .../package/scripts/params_windows.py           |   2 +-
 .../0.12.0.2.0/package/scripts/status_params.py |   8 +-
 .../HIVE/0.12.0.2.0/package/scripts/webhcat.py  |   2 +-
 .../package/scripts/webhcat_server.py           |   6 +-
 .../KAFKA/0.8.1.2.2/package/scripts/kafka.py    |   4 +-
 .../0.8.1.2.2/package/scripts/kafka_broker.py   |  18 +-
 .../KAFKA/0.8.1.2.2/package/scripts/params.py   |  16 +-
 .../KNOX/0.5.0.2.2/package/scripts/knox.py      |   4 +-
 .../0.5.0.2.2/package/scripts/knox_gateway.py   |  10 +-
 .../0.5.0.2.2/package/scripts/params_linux.py   |  18 +-
 .../0.5.0.2.2/package/scripts/status_params.py  |   2 +-
 .../KNOX/0.5.0.2.2/package/scripts/upgrade.py   |   4 +-
 .../1.0.0.2.3/package/scripts/mahout_client.py  |   4 +-
 .../MAHOUT/1.0.0.2.3/package/scripts/params.py  |  10 +-
 .../OOZIE/4.0.0.2.0/package/scripts/oozie.py    |   4 +-
 .../4.0.0.2.0/package/scripts/oozie_client.py   |   6 +-
 .../4.0.0.2.0/package/scripts/oozie_server.py   |  18 +-
 .../package/scripts/oozie_server_upgrade.py     |  12 +-
 .../4.0.0.2.0/package/scripts/params_linux.py   |  18 +-
 .../4.0.0.2.0/package/scripts/status_params.py  |   2 +-
 .../0.12.0.2.0/package/scripts/params_linux.py  |  12 +-
 .../0.12.0.2.0/package/scripts/pig_client.py    |   6 +-
 .../0.12.0.2.0/package/scripts/service_check.py |   2 +-
 .../RANGER/0.4.0/package/scripts/params.py      |  10 +-
 .../0.4.0/package/scripts/ranger_admin.py       |   6 +-
 .../RANGER/0.4.0/package/scripts/upgrade.py     |   4 +-
 .../0.5.0.2.3/package/scripts/params.py         |   6 +-
 .../0.5.0.2.3/package/scripts/upgrade.py        |   4 +-
 .../SLIDER/0.60.0.2.2/package/scripts/params.py |   4 +-
 .../0.60.0.2.2/package/scripts/params_linux.py  |   6 +-
 .../0.60.0.2.2/package/scripts/service_check.py |   2 +-
 .../SLIDER/0.60.0.2.2/package/scripts/slider.py |   2 +-
 .../0.60.0.2.2/package/scripts/slider_client.py |   8 +-
 .../package/scripts/job_history_server.py       |  10 +-
 .../SPARK/1.2.0.2.2/package/scripts/params.py   |  18 +-
 .../1.2.0.2.2/package/scripts/setup_spark.py    |   6 +-
 .../1.2.0.2.2/package/scripts/spark_client.py   |   8 +-
 .../1.2.0.2.2/package/scripts/spark_service.py  |   8 +-
 .../package/scripts/spark_thrift_server.py      |   8 +-
 .../1.4.4.2.0/package/scripts/params_linux.py   |   6 +-
 .../1.4.4.2.0/package/scripts/sqoop_client.py   |   8 +-
 .../0.9.1.2.1/package/scripts/drpc_server.py    |   8 +-
 .../STORM/0.9.1.2.1/package/scripts/nimbus.py   |  10 +-
 .../0.9.1.2.1/package/scripts/nimbus_prod.py    |  10 +-
 .../0.9.1.2.1/package/scripts/params_linux.py   |  10 +-
 .../0.9.1.2.1/package/scripts/params_windows.py |   2 +-
 .../STORM/0.9.1.2.1/package/scripts/rest_api.py |   4 +-
 .../0.9.1.2.1/package/scripts/status_params.py  |   2 +-
 .../STORM/0.9.1.2.1/package/scripts/storm.py    |   2 +-
 .../0.9.1.2.1/package/scripts/supervisor.py     |  10 +-
 .../package/scripts/supervisor_prod.py          |  10 +-
 .../0.9.1.2.1/package/scripts/ui_server.py      |   8 +-
 .../0.4.0.2.1/package/scripts/params_linux.py   |  12 +-
 .../0.4.0.2.1/package/scripts/params_windows.py |   8 +-
 .../0.4.0.2.1/package/scripts/pre_upgrade.py    |   4 +-
 .../0.4.0.2.1/package/scripts/service_check.py  |   2 +-
 .../TEZ/0.4.0.2.1/package/scripts/tez_client.py |  10 +-
 .../scripts/application_timeline_server.py      |   8 +-
 .../2.1.0.2.0/package/scripts/historyserver.py  |  10 +-
 .../package/scripts/mapreduce2_client.py        |   6 +-
 .../2.1.0.2.0/package/scripts/nodemanager.py    |   8 +-
 .../2.1.0.2.0/package/scripts/params_linux.py   |  16 +-
 .../package/scripts/resourcemanager.py          |   8 +-
 .../2.1.0.2.0/package/scripts/service_check.py  |   2 +-
 .../YARN/2.1.0.2.0/package/scripts/yarn.py      |   2 +-
 .../2.1.0.2.0/package/scripts/yarn_client.py    |   6 +-
 .../3.4.5.2.0/package/scripts/params_linux.py   |   6 +-
 .../3.4.5.2.0/package/scripts/status_params.py  |   2 +-
 .../3.4.5.2.0/package/scripts/zookeeper.py      |   8 +-
 .../package/scripts/zookeeper_client.py         |   8 +-
 .../package/scripts/zookeeper_server.py         |   8 +-
 .../package/scripts/zookeeper_service.py        |   8 +-
 .../custom_actions/scripts/install_packages.py  |  18 +-
 .../custom_actions/scripts/ru_set_all.py        |  18 +-
 .../main/resources/scripts/Ambaripreupload.py   |  56 ++--
 .../0.8/services/HIVE/package/scripts/params.py |   6 +-
 .../2.0.6/hooks/after-INSTALL/scripts/params.py |  10 +-
 .../scripts/shared_initialization.py            |   8 +-
 .../2.0.6/hooks/before-ANY/scripts/params.py    |  14 +-
 .../before-ANY/scripts/shared_initialization.py |   4 +-
 .../hooks/before-INSTALL/scripts/params.py      |   4 +-
 .../scripts/shared_initialization.py            |   2 +-
 .../2.0.6/hooks/before-START/scripts/params.py  |  20 +-
 .../services/ECS/package/scripts/params.py      |   8 +-
 ambari-server/src/test/python/TestVersion.py    |   4 +-
 .../custom_actions/TestInstallPackages.py       |  20 +-
 .../AMBARI_METRICS/test_metrics_collector.py    |   4 +-
 .../AMBARI_METRICS/test_metrics_grafana.py      |   2 +-
 .../python/stacks/2.0.6/FLUME/test_flume.py     |  28 +-
 .../stacks/2.0.6/FLUME/test_service_check.py    |   2 +-
 .../2.0.6/GANGLIA/test_ganglia_monitor.py       |  10 +-
 .../stacks/2.0.6/GANGLIA/test_ganglia_server.py |   8 +-
 .../stacks/2.0.6/HBASE/test_hbase_client.py     |   8 +-
 .../stacks/2.0.6/HBASE/test_hbase_master.py     |  40 +--
 .../2.0.6/HBASE/test_hbase_regionserver.py      |  32 +-
 .../2.0.6/HBASE/test_hbase_service_check.py     |   6 +-
 .../2.0.6/HBASE/test_phoenix_queryserver.py     |  16 +-
 .../python/stacks/2.0.6/HDFS/test_datanode.py   |  44 +--
 .../stacks/2.0.6/HDFS/test_hdfs_client.py       |  18 +-
 .../stacks/2.0.6/HDFS/test_journalnode.py       |  32 +-
 .../python/stacks/2.0.6/HDFS/test_namenode.py   |  84 ++---
 .../python/stacks/2.0.6/HDFS/test_nfsgateway.py |  24 +-
 .../stacks/2.0.6/HDFS/test_service_check.py     |   4 +-
 .../python/stacks/2.0.6/HDFS/test_snamenode.py  |  22 +-
 .../test/python/stacks/2.0.6/HDFS/test_zkfc.py  |  22 +-
 .../stacks/2.0.6/HIVE/test_hcat_client.py       |   6 +-
 .../stacks/2.0.6/HIVE/test_hive_client.py       |   8 +-
 .../stacks/2.0.6/HIVE/test_hive_metastore.py    |  20 +-
 .../stacks/2.0.6/HIVE/test_hive_server.py       |  72 ++---
 .../2.0.6/HIVE/test_hive_service_check.py       |   6 +-
 .../stacks/2.0.6/HIVE/test_mysql_server.py      |  16 +-
 .../stacks/2.0.6/HIVE/test_webhcat_server.py    |  28 +-
 .../stacks/2.0.6/OOZIE/test_oozie_client.py     |  10 +-
 .../stacks/2.0.6/OOZIE/test_oozie_server.py     |  40 +--
 .../2.0.6/OOZIE/test_oozie_service_check.py     |   2 +-
 .../stacks/2.0.6/OOZIE/test_service_check.py    |   4 +-
 .../python/stacks/2.0.6/PIG/test_pig_client.py  |  10 +-
 .../stacks/2.0.6/PIG/test_pig_service_check.py  |   4 +-
 .../stacks/2.0.6/SQOOP/test_service_check.py    |   4 +-
 .../python/stacks/2.0.6/SQOOP/test_sqoop.py     |   6 +-
 .../stacks/2.0.6/YARN/test_historyserver.py     |  28 +-
 .../stacks/2.0.6/YARN/test_mapreduce2_client.py |  12 +-
 .../2.0.6/YARN/test_mapreduce2_service_check.py |   4 +-
 .../stacks/2.0.6/YARN/test_nodemanager.py       |  34 +-
 .../stacks/2.0.6/YARN/test_resourcemanager.py   |  32 +-
 .../stacks/2.0.6/YARN/test_yarn_client.py       |  14 +-
 .../2.0.6/YARN/test_yarn_service_check.py       |   4 +-
 .../2.0.6/ZOOKEEPER/test_zookeeper_client.py    |   8 +-
 .../2.0.6/ZOOKEEPER/test_zookeeper_server.py    |  28 +-
 .../ZOOKEEPER/test_zookeeper_service_check.py   |   6 +-
 .../hooks/after-INSTALL/test_after_install.py   |   2 +-
 .../stacks/2.1/FALCON/test_falcon_client.py     |  10 +-
 .../stacks/2.1/FALCON/test_falcon_server.py     |  22 +-
 .../stacks/2.1/FALCON/test_service_check.py     |   4 +-
 .../stacks/2.1/HIVE/test_hive_metastore.py      |  58 ++--
 .../stacks/2.1/STORM/test_service_check.py      |   2 +-
 .../stacks/2.1/STORM/test_storm_drpc_server.py  |  26 +-
 .../2.1/STORM/test_storm_jaas_configuration.py  |   8 +-
 .../stacks/2.1/STORM/test_storm_nimbus.py       |  28 +-
 .../stacks/2.1/STORM/test_storm_nimbus_prod.py  |  16 +-
 .../2.1/STORM/test_storm_rest_api_service.py    |  12 +-
 .../stacks/2.1/STORM/test_storm_supervisor.py   |  16 +-
 .../2.1/STORM/test_storm_supervisor_prod.py     |  16 +-
 .../stacks/2.1/STORM/test_storm_ui_server.py    |  24 +-
 .../python/stacks/2.1/TEZ/test_service_check.py |   4 +-
 .../python/stacks/2.1/TEZ/test_tez_client.py    |  20 +-
 .../stacks/2.1/YARN/test_apptimelineserver.py   |  22 +-
 .../stacks/2.2/ACCUMULO/test_accumulo_client.py |   4 +-
 .../stacks/2.2/KAFKA/test_kafka_broker.py       |   8 +-
 .../stacks/2.2/KERBEROS/test_kerberos_client.py |  14 +-
 .../stacks/2.2/KERBEROS/test_kerberos_server.py |   8 +-
 .../python/stacks/2.2/KNOX/test_knox_gateway.py |  22 +-
 .../stacks/2.2/PIG/test_pig_service_check.py    |   2 +-
 .../stacks/2.2/RANGER/test_ranger_admin.py      |  14 +-
 .../stacks/2.2/RANGER/test_ranger_usersync.py   |  16 +-
 .../stacks/2.2/SLIDER/test_slider_client.py     |  10 +-
 .../stacks/2.2/SPARK/test_job_history_server.py |  16 +-
 .../stacks/2.2/SPARK/test_spark_client.py       |   8 +-
 .../2.2/SPARK/test_spark_service_check.py       |   6 +-
 .../stacks/2.3/ATLAS/test_metadata_server.py    |   8 +-
 .../python/stacks/2.3/HAWQ/test_hawqmaster.py   |  10 +-
 .../python/stacks/2.3/HAWQ/test_hawqsegment.py  |   8 +-
 .../python/stacks/2.3/HAWQ/test_hawqstandby.py  |   8 +-
 .../stacks/2.3/MAHOUT/test_mahout_client.py     |   6 +-
 .../2.3/MAHOUT/test_mahout_service_check.py     |   2 +-
 .../src/test/python/stacks/2.3/PXF/test_pxf.py  |  10 +-
 .../2.3/SPARK/test_spark_thrift_server.py       |  10 +-
 .../stacks/2.3/STORM/test_service_check.py      |   2 +-
 .../stacks/2.3/STORM/test_storm_upgrade.py      |   4 +-
 .../test/python/stacks/2.3/YARN/test_ats_1_5.py |   6 +-
 .../src/test/python/stacks/utils/RMFTestCase.py |   4 +-
 223 files changed, 1868 insertions(+), 1868 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-agent/src/main/python/ambari_agent/HostCheckReportFileHandler.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/HostCheckReportFileHandler.py b/ambari-agent/src/main/python/ambari_agent/HostCheckReportFileHandler.py
index 1f87a73..ee7db0a 100644
--- a/ambari-agent/src/main/python/ambari_agent/HostCheckReportFileHandler.py
+++ b/ambari-agent/src/main/python/ambari_agent/HostCheckReportFileHandler.py
@@ -87,7 +87,7 @@ class HostCheckReportFileHandler:
       logger.error("Can't write host check file at %s :%s " % (self.hostCheckCustomActionsFilePath, err.message))
       traceback.print_exc()
 
-  def _hdp_list_directory(self):
+  def _stack_list_directory(self):
     """
     Return filtered list of /usr/hdp directory allowed to be removed
     :rtype list
@@ -152,7 +152,7 @@ class HostCheckReportFileHandler:
         items = []
         for itemDetail in hostInfo['stackFoldersAndFiles']:
           items.append(itemDetail['name'])
-        items += self._hdp_list_directory()
+        items += self._stack_list_directory()
         config.add_section('directories')
         config.set('directories', 'dir_list', ','.join(items))
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-common/src/main/python/resource_management/libraries/functions/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/__init__.py b/ambari-common/src/main/python/resource_management/libraries/functions/__init__.py
index 1ab0ff1..e886fe4 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/__init__.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/__init__.py
@@ -38,7 +38,7 @@ from resource_management.libraries.functions.hive_check import *
 from resource_management.libraries.functions.version import *
 from resource_management.libraries.functions.format_jvm_option import *
 from resource_management.libraries.functions.constants import *
-from resource_management.libraries.functions.get_hdp_version import *
+from resource_management.libraries.functions.get_stack_version import *
 from resource_management.libraries.functions.get_lzo_packages import *
 from resource_management.libraries.functions.setup_ranger_plugin import *
 from resource_management.libraries.functions.curl_krb_request import *
@@ -47,6 +47,6 @@ IS_WINDOWS = platform.system() == "Windows"
 
 if IS_WINDOWS:
   from resource_management.libraries.functions.windows_service_utils import *
-  from resource_management.libraries.functions.install_hdp_msi import *
+  from resource_management.libraries.functions.install_stack_msi import *
   from resource_management.libraries.functions.install_jdbc_driver import *
   from resource_management.libraries.functions.reload_windows_env import *

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py b/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
index dc7fa6e..59c717b 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
@@ -22,7 +22,7 @@ __all__ = ["select", "create", "get_hadoop_conf_dir", "get_hadoop_dir"]
 
 import os
 import version
-import hdp_select
+import stack_select
 import subprocess
 
 from resource_management.core import shell
@@ -34,7 +34,7 @@ from resource_management.core.resources.system import Execute
 from resource_management.core.resources.system import Link
 from resource_management.libraries.functions.default import default
 from resource_management.core.exceptions import Fail
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
 from resource_management.core.shell import as_sudo
 
 
@@ -192,7 +192,7 @@ def _valid(stack_name, package, ver):
   if stack_name != "HDP":
     return False
 
-  if version.compare_versions(version.format_hdp_stack_version(ver), "2.3.0.0") < 0:
+  if version.compare_versions(version.format_stack_version(ver), "2.3.0.0") < 0:
     return False
 
   return True
@@ -298,10 +298,10 @@ def get_hadoop_conf_dir(force_latest_on_upgrade=False):
 
   if not Script.in_stack_upgrade():
     # During normal operation, the HDP stack must be 2.3 or higher
-    if Script.is_hdp_stack_greater_or_equal("2.2"):
+    if Script.is_stack_greater_or_equal("2.2"):
       hadoop_conf_dir = "/usr/hdp/current/hadoop-client/conf"
 
-    if Script.is_hdp_stack_greater_or_equal("2.3"):
+    if Script.is_stack_greater_or_equal("2.3"):
       hadoop_conf_dir = "/usr/hdp/current/hadoop-client/conf"
       stack_name = default("/hostLevelParams/stack_name", None)
       version = default("/commandParams/version", None)
@@ -326,16 +326,16 @@ def get_hadoop_conf_dir(force_latest_on_upgrade=False):
     EU/RU | 2.3    | 2.3.*  | Any                   | Use /usr/hdp/$version/hadoop/conf, which should be a symlink destination
     '''
 
-    # The method "is_hdp_stack_greater_or_equal" uses "stack_version" which is the desired stack, e.g., 2.2 or 2.3
+    # The method "is_stack_greater_or_equal" uses "stack_version" which is the desired stack, e.g., 2.2 or 2.3
     # In an RU, it is always the desired stack, and doesn't change even during the Downgrade!
     # In an RU Downgrade from HDP 2.3 to 2.2, the first thing we do is 
     # rm /etc/[component]/conf and then mv /etc/[component]/conf.backup /etc/[component]/conf
-    if Script.is_hdp_stack_greater_or_equal("2.2"):
+    if Script.is_stack_greater_or_equal("2.2"):
       hadoop_conf_dir = "/usr/hdp/current/hadoop-client/conf"
 
       # This contains the "version", including the build number, that is actually used during a stack upgrade and
       # is the version upgrading/downgrading to.
-      stack_info = hdp_select._get_upgrade_stack()
+      stack_info = stack_select._get_upgrade_stack()
 
       if stack_info is not None:
         stack_name = stack_info[0]
@@ -345,14 +345,14 @@ def get_hadoop_conf_dir(force_latest_on_upgrade=False):
       
       Logger.info("In the middle of a stack upgrade/downgrade for Stack {0} and destination version {1}, determining which hadoop conf dir to use.".format(stack_name, version))
       # This is the version either upgrading or downgrading to.
-      if compare_versions(format_hdp_stack_version(version), "2.3.0.0") >= 0:
+      if compare_versions(format_stack_version(version), "2.3.0.0") >= 0:
         # Determine if hdp-select has been run and if not, then use the current
         # hdp version until this component is upgraded.
         if not force_latest_on_upgrade:
-          current_hdp_version = hdp_select.get_role_component_current_hdp_version()
-          if current_hdp_version is not None and version != current_hdp_version:
-            version = current_hdp_version
-            Logger.info("hdp-select has not yet been called to update the symlink for this component, keep using version {0}".format(current_hdp_version))
+          current_stack_version = stack_select.get_role_component_current_stack_version()
+          if current_stack_version is not None and version != current_stack_version:
+            version = current_stack_version
+            Logger.info("hdp-select has not yet been called to update the symlink for this component, keep using version {0}".format(current_stack_version))
 
         # Only change the hadoop_conf_dir path, don't conf-select this older version
         hadoop_conf_dir = "/usr/hdp/{0}/hadoop/conf".format(version)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py b/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
index b4c8bc8..647b8b6 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py
@@ -61,7 +61,7 @@ TARBALL_MAP = {
 }
 
 
-def _get_single_version_from_hdp_select():
+def _get_single_version_from_stack_select():
   """
   Call "hdp-select versions" and return the version string if only one version is available.
   :return: Returns a version string if successful, and None otherwise.
@@ -70,12 +70,12 @@ def _get_single_version_from_hdp_select():
   tmpfile = tempfile.NamedTemporaryFile()
   tmp_dir = Script.get_tmp_dir()
   tmp_file = os.path.join(tmp_dir, "copy_tarball_out.txt")
-  hdp_version = None
+  stack_version = None
 
   out = None
-  get_hdp_versions_cmd = "/usr/bin/hdp-select versions > {0}".format(tmp_file)
+  get_stack_versions_cmd = "/usr/bin/hdp-select versions > {0}".format(tmp_file)
   try:
-    code, stdoutdata = shell.call(get_hdp_versions_cmd, logoutput=True)
+    code, stdoutdata = shell.call(get_stack_versions_cmd, logoutput=True)
     with open(tmp_file, 'r+') as file:
       out = file.read()
   except Exception, e:
@@ -88,17 +88,17 @@ def _get_single_version_from_hdp_select():
       Logger.logger.exception("Could not remove file {0}. Error: {1}".format(str(tmp_file), str(e)))
 
   if code != 0 or out is None or out == "":
-    Logger.error("Could not verify HDP version by calling '{0}'. Return Code: {1}, Output: {2}.".format(get_hdp_versions_cmd, str(code), str(out)))
+    Logger.error("Could not verify HDP version by calling '{0}'. Return Code: {1}, Output: {2}.".format(get_stack_versions_cmd, str(code), str(out)))
     return None
 
   matches = re.findall(r"([\d\.]+\-\d+)", out)
 
   if matches and len(matches) == 1:
-    hdp_version = matches[0]
+    stack_version = matches[0]
   elif matches and len(matches) > 1:
     Logger.error("Found multiple matches for HDP version, cannot identify the correct one from: {0}".format(", ".join(matches)))
 
-  return hdp_version
+  return stack_version
 
 def copy_to_hdfs(name, user_group, owner, file_mode=0444, custom_source_file=None, custom_dest_file=None, force_execute=False,
                  use_upgrading_version_during_uprade=True, replace_existing_files=False, host_sys_prepped=False):
@@ -152,10 +152,10 @@ def copy_to_hdfs(name, user_group, owner, file_mode=0444, custom_source_file=Non
     if current_version is None:
       # During normal operation, the first installation of services won't yet know about the version, so must rely
       # on hdp-select to get it.
-      hdp_version = _get_single_version_from_hdp_select()
-      if hdp_version:
-        Logger.info("Will use stack version {0}".format(hdp_version))
-        current_version = hdp_version
+      stack_version = _get_single_version_from_stack_select()
+      if stack_version:
+        Logger.info("Will use stack version {0}".format(stack_version))
+        current_version = stack_version
 
   if current_version is None:
     message_suffix = "during rolling %s" % str(upgrade_direction) if is_stack_upgrade else ""

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-common/src/main/python/resource_management/libraries/functions/dynamic_variable_interpretation.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/dynamic_variable_interpretation.py b/ambari-common/src/main/python/resource_management/libraries/functions/dynamic_variable_interpretation.py
index 31f0c4a..a20b03c 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/dynamic_variable_interpretation.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/dynamic_variable_interpretation.py
@@ -35,10 +35,10 @@ from resource_management.core import shell
 
 """
 This file provides helper methods needed for the versioning of RPMs. Specifically, it does dynamic variable
-interpretation to replace strings like {{ hdp_stack_version }}  where the value of the
+interpretation to replace strings like {{ stack_version_formatted }}  where the value of the
 variables cannot be determined ahead of time, but rather, depends on what files are found.
 
-It assumes that {{ hdp_stack_version }} is constructed as ${major.minor.patch.rev}-${build_number}
+It assumes that {{ stack_version_formatted }} is constructed as ${major.minor.patch.rev}-${build_number}
 E.g., 998.2.2.1.0-998
 Please note that "-${build_number}" is optional.
 """
@@ -54,10 +54,10 @@ def _get_tar_source_and_dest_folder(tarball_prefix):
   :return: Returns a tuple of (x, y) after verifying the properties
   """
   component_tar_source_file = default("/configurations/cluster-env/%s%s" % (tarball_prefix.lower(), TAR_SOURCE_SUFFIX), None)
-  # E.g., /usr/hdp/current/hadoop-client/tez-{{ hdp_stack_version }}.tar.gz
+  # E.g., /usr/hdp/current/hadoop-client/tez-{{ stack_version_formatted }}.tar.gz
 
   component_tar_destination_folder = default("/configurations/cluster-env/%s%s" % (tarball_prefix.lower(), TAR_DESTINATION_FOLDER_SUFFIX), None)
-  # E.g., hdfs:///hdp/apps/{{ hdp_stack_version }}/mapreduce/
+  # E.g., hdfs:///hdp/apps/{{ stack_version_formatted }}/mapreduce/
 
   if not component_tar_source_file or not component_tar_destination_folder:
     Logger.warning("Did not find %s tar source file and destination folder properties in cluster-env.xml" %
@@ -137,10 +137,10 @@ def _copy_files(source_and_dest_pairs, component_user, file_owner, group_owner,
   return return_value
 
 
-def copy_tarballs_to_hdfs(tarball_prefix, hdp_select_component_name, component_user, file_owner, group_owner, ignore_sysprep=False):
+def copy_tarballs_to_hdfs(tarball_prefix, stack_select_component_name, component_user, file_owner, group_owner, ignore_sysprep=False):
   """
   :param tarball_prefix: Prefix of the tarball must be one of tez, hive, mr, pig
-  :param hdp_select_component_name: Component name to get the status to determine the version
+  :param stack_select_component_name: Component name to get the status to determine the version
   :param component_user: User that will execute the Hadoop commands, usually smokeuser
   :param file_owner: Owner of the files copied to HDFS (typically hdfs user)
   :param group_owner: Group owner of the files copied to HDFS (typically hadoop group)
@@ -148,17 +148,17 @@ def copy_tarballs_to_hdfs(tarball_prefix, hdp_select_component_name, component_u
   :return: Returns 0 on success, 1 if no files were copied, and in some cases may raise an exception.
 
   In order to call this function, params.py must have all of the following,
-  hdp_stack_version, kinit_path_local, security_enabled, hdfs_user, hdfs_principal_name, hdfs_user_keytab,
+  stack_version_formatted, kinit_path_local, security_enabled, hdfs_user, hdfs_principal_name, hdfs_user_keytab,
   hadoop_bin_dir, hadoop_conf_dir, and HdfsDirectory as a partial function.
   """
   import params
 
   if not ignore_sysprep and hasattr(params, "host_sys_prepped") and params.host_sys_prepped:
-    Logger.info("Host is sys-prepped. Tarball %s will not be copied for %s." % (tarball_prefix, hdp_select_component_name))
+    Logger.info("Host is sys-prepped. Tarball %s will not be copied for %s." % (tarball_prefix, stack_select_component_name))
     return 0
 
-  if not hasattr(params, "hdp_stack_version") or params.hdp_stack_version is None:
-    Logger.warning("Could not find hdp_stack_version")
+  if not hasattr(params, "stack_version_formatted") or params.stack_version_formatted is None:
+    Logger.warning("Could not find stack_version_formatted")
     return 1
 
   component_tar_source_file, component_tar_destination_folder = _get_tar_source_and_dest_folder(tarball_prefix)
@@ -174,25 +174,25 @@ def copy_tarballs_to_hdfs(tarball_prefix, hdp_select_component_name, component_u
   tmpfile = tempfile.NamedTemporaryFile()
   out = None
   with open(tmpfile.name, 'r+') as file:
-    get_hdp_version_cmd = '/usr/bin/hdp-select status %s > %s' % (hdp_select_component_name, tmpfile.name)
-    code, stdoutdata = shell.call(get_hdp_version_cmd)
+    get_stack_version_cmd = '/usr/bin/hdp-select status %s > %s' % (stack_select_component_name, tmpfile.name)
+    code, stdoutdata = shell.call(get_stack_version_cmd)
     out = file.read()
   pass
   if code != 0 or out is None:
     Logger.warning("Could not verify HDP version by calling '%s'. Return Code: %s, Output: %s." %
-                   (get_hdp_version_cmd, str(code), str(out)))
+                   (get_stack_version_cmd, str(code), str(out)))
     return 1
 
   matches = re.findall(r"([\d\.]+\-\d+)", out)
-  hdp_version = matches[0] if matches and len(matches) > 0 else None
+  stack_version = matches[0] if matches and len(matches) > 0 else None
 
-  if not hdp_version:
+  if not stack_version:
     Logger.error("Could not parse HDP version from output of hdp-select: %s" % str(out))
     return 1
 
   file_name = os.path.basename(component_tar_source_file)
   destination_file = os.path.join(component_tar_destination_folder, file_name)
-  destination_file = destination_file.replace("{{ hdp_stack_version }}", hdp_version)
+  destination_file = destination_file.replace("{{ stack_version_formatted }}", stack_version)
 
   does_hdfs_file_exist_cmd = "fs -ls %s" % destination_file
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-common/src/main/python/resource_management/libraries/functions/get_hdp_version.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/get_hdp_version.py b/ambari-common/src/main/python/resource_management/libraries/functions/get_hdp_version.py
deleted file mode 100644
index a56d33a..0000000
--- a/ambari-common/src/main/python/resource_management/libraries/functions/get_hdp_version.py
+++ /dev/null
@@ -1,91 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-__all__ = ["get_hdp_version"]
-
-import os
-import re
-
-from ambari_commons import OSConst
-from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
-
-from resource_management.core.logger import Logger
-from resource_management.core.exceptions import Fail
-from resource_management.core import shell
-
-HDP_SELECT_BINARY = "/usr/bin/hdp-select"
-
-@OsFamilyFuncImpl(OSConst.WINSRV_FAMILY)
-def get_hdp_version(package_name):
-  """
-  @param package_name, name of the package, from which, function will try to get hdp version
-  """
-  try:
-    component_home_dir = os.environ[package_name.upper() + "_HOME"]
-  except KeyError:
-    Logger.info('Skipping get_hdp_version since the component {0} is not yet available'.format(package_name))
-    return None # lazy fail
-
-  #As a rule, component_home_dir is of the form <hdp_root_dir>\[\]<component_versioned_subdir>[\]
-  home_dir_split = os.path.split(component_home_dir)
-  iSubdir = len(home_dir_split) - 1
-  while not home_dir_split[iSubdir]:
-    iSubdir -= 1
-
-  #The component subdir is expected to be of the form <package_name>-<package_version>.<hdp_stack_version>
-  # with package_version = #.#.# and hdp_stack_version=#.#.#.#-<build_number>
-  match = re.findall('[0-9]+.[0-9]+.[0-9]+.[0-9]+-[0-9]+', home_dir_split[iSubdir])
-  if not match:
-    Logger.info('Failed to get extracted version for component {0}. Home dir not in expected format.'.format(package_name))
-    return None # lazy fail
-
-  return match[0]
-
-@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
-def get_hdp_version(package_name):
-  """
-  @param package_name, name of the package, from which, function will try to get hdp version
-  """
-  
-  if not os.path.exists(HDP_SELECT_BINARY):
-    Logger.info('Skipping get_hdp_version since hdp-select is not yet available')
-    return None # lazy fail
-  
-  try:
-    command = 'ambari-python-wrap {HDP_SELECT_BINARY} status {package_name}'.format(HDP_SELECT_BINARY=HDP_SELECT_BINARY, package_name=package_name)
-    return_code, hdp_output = shell.call(command, timeout=20)
-  except Exception, e:
-    Logger.error(str(e))
-    raise Fail('Unable to execute hdp-select command to retrieve the version.')
-
-  if return_code != 0:
-    raise Fail(
-      'Unable to determine the current version because of a non-zero return code of {0}'.format(str(return_code)))
-
-  hdp_version = re.sub(package_name + ' - ', '', hdp_output)
-  hdp_version = hdp_version.rstrip()
-  match = re.match('[0-9]+.[0-9]+.[0-9]+.[0-9]+-[0-9]+', hdp_version)
-
-  if match is None:
-    Logger.info('Failed to get extracted version with hdp-select')
-    return None # lazy fail
-
-  return hdp_version

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-common/src/main/python/resource_management/libraries/functions/get_lzo_packages.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/get_lzo_packages.py b/ambari-common/src/main/python/resource_management/libraries/functions/get_lzo_packages.py
index afb4314..870bb0c 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/get_lzo_packages.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/get_lzo_packages.py
@@ -22,7 +22,7 @@ Ambari Agent
 __all__ = ["get_lzo_packages"]
 
 from ambari_commons.os_check import OSCheck
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
 from resource_management.libraries.functions.format import format
 
 def get_lzo_packages(stack_version_unformatted):
@@ -35,9 +35,9 @@ def get_lzo_packages(stack_version_unformatted):
     
   underscored_version = stack_version_unformatted.replace('.', '_')
   dashed_version = stack_version_unformatted.replace('.', '-')
-  hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
+  stack_version_formatted = format_stack_version(stack_version_unformatted)
 
-  if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
+  if stack_version_formatted != "" and compare_versions(stack_version_formatted, '2.2') >= 0:
     lzo_packages += ["hadooplzo_*"]
   else:
     lzo_packages += ["hadoop-lzo"]

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-common/src/main/python/resource_management/libraries/functions/get_stack_version.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/get_stack_version.py b/ambari-common/src/main/python/resource_management/libraries/functions/get_stack_version.py
new file mode 100644
index 0000000..c00b541
--- /dev/null
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/get_stack_version.py
@@ -0,0 +1,91 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+__all__ = ["get_stack_version"]
+
+import os
+import re
+
+from ambari_commons import OSConst
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+
+from resource_management.core.logger import Logger
+from resource_management.core.exceptions import Fail
+from resource_management.core import shell
+
+STACK_SELECT_BINARY = "/usr/bin/hdp-select"
+
+@OsFamilyFuncImpl(OSConst.WINSRV_FAMILY)
+def get_stack_version(package_name):
+  """
+  @param package_name, name of the package, from which, function will try to get stack version
+  """
+  try:
+    component_home_dir = os.environ[package_name.upper() + "_HOME"]
+  except KeyError:
+    Logger.info('Skipping get_stack_version since the component {0} is not yet available'.format(package_name))
+    return None # lazy fail
+
+  #As a rule, component_home_dir is of the form <stack_root_dir>\[\]<component_versioned_subdir>[\]
+  home_dir_split = os.path.split(component_home_dir)
+  iSubdir = len(home_dir_split) - 1
+  while not home_dir_split[iSubdir]:
+    iSubdir -= 1
+
+  #The component subdir is expected to be of the form <package_name>-<package_version>.<stack_version>
+  # with package_version = #.#.# and stack_version=#.#.#.#-<build_number>
+  match = re.findall('[0-9]+.[0-9]+.[0-9]+.[0-9]+-[0-9]+', home_dir_split[iSubdir])
+  if not match:
+    Logger.info('Failed to get extracted version for component {0}. Home dir not in expected format.'.format(package_name))
+    return None # lazy fail
+
+  return match[0]
+
+@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
+def get_stack_version(package_name):
+  """
+  @param package_name, name of the package, from which, function will try to get stack version
+  """
+  
+  if not os.path.exists(STACK_SELECT_BINARY):
+    Logger.info('Skipping get_stack_version since " + STACK_SELECT_BINARY + " is not yet available')
+    return None # lazy fail
+  
+  try:
+    command = 'ambari-python-wrap {STACK_SELECT_BINARY} status {package_name}'.format(STACK_SELECT_BINARY=STACK_SELECT_BINARY, package_name=package_name)
+    return_code, stack_output = shell.call(command, timeout=20)
+  except Exception, e:
+    Logger.error(str(e))
+    raise Fail('Unable to execute " + STACK_SELECT_BINARY + " command to retrieve the version.')
+
+  if return_code != 0:
+    raise Fail(
+      'Unable to determine the current version because of a non-zero return code of {0}'.format(str(return_code)))
+
+  stack_version = re.sub(package_name + ' - ', '', stack_output)
+  stack_version = stack_output.rstrip()
+  match = re.match('[0-9]+.[0-9]+.[0-9]+.[0-9]+-[0-9]+', stack_version)
+
+  if match is None:
+    Logger.info('Failed to get extracted version with ' + STACK_SELECT_BINARY)
+    return None # lazy fail
+
+  return stack_version

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-common/src/main/python/resource_management/libraries/functions/hdp_select.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/hdp_select.py b/ambari-common/src/main/python/resource_management/libraries/functions/hdp_select.py
deleted file mode 100644
index f5ad7e2..0000000
--- a/ambari-common/src/main/python/resource_management/libraries/functions/hdp_select.py
+++ /dev/null
@@ -1,307 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-import sys
-import re
-from resource_management.core.logger import Logger
-from resource_management.core.exceptions import Fail
-from resource_management.core.resources.system import Execute
-from resource_management.libraries.functions.default import default
-from resource_management.libraries.functions.get_hdp_version import get_hdp_version
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.script.script import Script
-from resource_management.core.shell import call
-from resource_management.libraries.functions.version import format_hdp_stack_version
-from resource_management.libraries.functions.version_select_util import get_versions_from_stack_root
-
-HDP_SELECT = '/usr/bin/hdp-select'
-HDP_SELECT_PREFIX = ('ambari-python-wrap', HDP_SELECT)
-
-# hdp-select set oozie-server 2.2.0.0-1234
-TEMPLATE = HDP_SELECT_PREFIX + ('set',)
-
-# a mapping of Ambari server role to hdp-select component name for all
-# non-clients
-SERVER_ROLE_DIRECTORY_MAP = {
-  'ACCUMULO_MASTER' : 'accumulo-master',
-  'ACCUMULO_MONITOR' : 'accumulo-monitor',
-  'ACCUMULO_GC' : 'accumulo-gc',
-  'ACCUMULO_TRACER' : 'accumulo-tracer',
-  'ACCUMULO_TSERVER' : 'accumulo-tablet',
-  'ATLAS_SERVER' : 'atlas-server',
-  'FLUME_HANDLER' : 'flume-server',
-  'FALCON_SERVER' : 'falcon-server',
-  'NAMENODE' : 'hadoop-hdfs-namenode',
-  'DATANODE' : 'hadoop-hdfs-datanode',
-  'SECONDARY_NAMENODE' : 'hadoop-hdfs-secondarynamenode',
-  'NFS_GATEWAY' : 'hadoop-hdfs-nfs3',
-  'JOURNALNODE' : 'hadoop-hdfs-journalnode',
-  'HBASE_MASTER' : 'hbase-master',
-  'HBASE_REGIONSERVER' : 'hbase-regionserver',
-  'HIVE_METASTORE' : 'hive-metastore',
-  'HIVE_SERVER' : 'hive-server2',
-  'WEBHCAT_SERVER' : 'hive-webhcat',
-  'KAFKA_BROKER' : 'kafka-broker',
-  'KNOX_GATEWAY' : 'knox-server',
-  'OOZIE_SERVER' : 'oozie-server',
-  'RANGER_ADMIN' : 'ranger-admin',
-  'RANGER_USERSYNC' : 'ranger-usersync',
-  'SPARK_JOBHISTORYSERVER' : 'spark-historyserver',
-  'SPARK_THRIFTSERVER' : 'spark-thriftserver',
-  'NIMBUS' : 'storm-nimbus',
-  'SUPERVISOR' : 'storm-supervisor',
-  'HISTORYSERVER' : 'hadoop-mapreduce-historyserver',
-  'APP_TIMELINE_SERVER' : 'hadoop-yarn-timelineserver',
-  'NODEMANAGER' : 'hadoop-yarn-nodemanager',
-  'RESOURCEMANAGER' : 'hadoop-yarn-resourcemanager',
-  'ZOOKEEPER_SERVER' : 'zookeeper-server',
-
-  # ZKFC is tied to NN since it doesn't have its own componnet in hdp-select and there is
-  # a requirement that the ZKFC is installed on each NN
-  'ZKFC' : 'hadoop-hdfs-namenode'
-}
-
-# mapping of service check to hdp-select component
-SERVICE_CHECK_DIRECTORY_MAP = {
-  "HDFS_SERVICE_CHECK" : "hadoop-client",
-  "TEZ_SERVICE_CHECK" : "hadoop-client",
-  "PIG_SERVICE_CHECK" : "hadoop-client",
-  "HIVE_SERVICE_CHECK" : "hadoop-client",
-  "OOZIE_SERVICE_CHECK" : "hadoop-client",
-  "MAHOUT_SERVICE_CHECK" : "mahout-client"
-}
-
-# /usr/hdp/current/hadoop-client/[bin|sbin|libexec|lib]
-# /usr/hdp/2.3.0.0-1234/hadoop/[bin|sbin|libexec|lib]
-HADOOP_DIR_TEMPLATE = "/usr/hdp/{0}/{1}/{2}"
-
-# /usr/hdp/current/hadoop-client
-# /usr/hdp/2.3.0.0-1234/hadoop
-HADOOP_HOME_DIR_TEMPLATE = "/usr/hdp/{0}/{1}"
-
-HADOOP_DIR_DEFAULTS = {
-  "home": "/usr/lib/hadoop",
-  "libexec": "/usr/lib/hadoop/libexec",
-  "sbin": "/usr/lib/hadoop/sbin",
-  "bin": "/usr/bin",
-  "lib": "/usr/lib/hadoop/lib"
-}
-
-def select_all(version_to_select):
-  """
-  Executes hdp-select on every component for the specified version. If the value passed in is a
-  stack version such as "2.3", then this will find the latest installed version which
-  could be "2.3.0.0-9999". If a version is specified instead, such as 2.3.0.0-1234, it will use
-  that exact version.
-  :param version_to_select: the version to hdp-select on, such as "2.3" or "2.3.0.0-1234"
-  """
-  # it's an error, but it shouldn't really stop anything from working
-  if version_to_select is None:
-    Logger.error("Unable to execute hdp-select after installing because there was no version specified")
-    return
-
-  Logger.info("Executing hdp-select set all on {0}".format(version_to_select))
-
-  command = format('{sudo} /usr/bin/hdp-select set all `ambari-python-wrap /usr/bin/hdp-select versions | grep ^{version_to_select} | tail -1`')
-  only_if_command = format('ls -d /usr/hdp/{version_to_select}*')
-  Execute(command, only_if = only_if_command)
-
-
-def select(component, version):
-  """
-  Executes hdp-select on the specific component and version. Some global
-  variables that are imported via params/status_params/params_linux will need
-  to be recalcuated after the hdp-select. However, python does not re-import
-  existing modules. The only way to ensure that the configuration variables are
-  recalculated is to call reload(...) on each module that has global parameters.
-  After invoking hdp-select, this function will also reload params, status_params,
-  and params_linux.
-  :param component: the hdp-select component, such as oozie-server. If "all", then all components
-  will be updated.
-  :param version: the version to set the component to, such as 2.2.0.0-1234
-  """
-  command = TEMPLATE + (component, version)
-  Execute(command, sudo=True)
-
-  # don't trust the ordering of modules:
-  # 1) status_params
-  # 2) params_linux
-  # 3) params
-  modules = sys.modules
-  param_modules = "status_params", "params_linux", "params"
-  for moduleName in param_modules:
-    if moduleName in modules:
-      module = modules.get(moduleName)
-      reload(module)
-      Logger.info("After {0}, reloaded module {1}".format(command, moduleName))
-
-
-def get_role_component_current_hdp_version():
-  """
-  Gets the current HDP version of the component that this role command is for.
-  :return:  the current HDP version of the specified component or None
-  """
-  hdp_select_component = None
-  role = default("/role", "")
-  role_command =  default("/roleCommand", "")
-
-  if role in SERVER_ROLE_DIRECTORY_MAP:
-    hdp_select_component = SERVER_ROLE_DIRECTORY_MAP[role]
-  elif role_command == "SERVICE_CHECK" and role in SERVICE_CHECK_DIRECTORY_MAP:
-    hdp_select_component = SERVICE_CHECK_DIRECTORY_MAP[role]
-
-  if hdp_select_component is None:
-    return None
-
-  current_hdp_version = get_hdp_version(hdp_select_component)
-
-  if current_hdp_version is None:
-    Logger.warning("Unable to determine hdp-select version for {0}".format(
-      hdp_select_component))
-  else:
-    Logger.info("{0} is currently at version {1}".format(
-      hdp_select_component, current_hdp_version))
-
-  return current_hdp_version
-
-
-def get_hadoop_dir(target, force_latest_on_upgrade=False):
-  """
-  Return the hadoop shared directory in the following override order
-  1. Use default for 2.1 and lower
-  2. If 2.2 and higher, use /usr/hdp/current/hadoop-client/{target}
-  3. If 2.2 and higher AND for an upgrade, use /usr/hdp/<version>/hadoop/{target}.
-  However, if the upgrade has not yet invoked hdp-select, return the current
-  version of the component.
-  :target: the target directory
-  :force_latest_on_upgrade: if True, then this will return the "current" directory
-  without the HDP version built into the path, such as /usr/hdp/current/hadoop-client
-  """
-
-  if not target in HADOOP_DIR_DEFAULTS:
-    raise Fail("Target {0} not defined".format(target))
-
-  hadoop_dir = HADOOP_DIR_DEFAULTS[target]
-
-  if Script.is_hdp_stack_greater_or_equal("2.2"):
-    # home uses a different template
-    if target == "home":
-      hadoop_dir = HADOOP_HOME_DIR_TEMPLATE.format("current", "hadoop-client")
-    else:
-      hadoop_dir = HADOOP_DIR_TEMPLATE.format("current", "hadoop-client", target)
-
-    # if we are not forcing "current" for HDP 2.2, then attempt to determine
-    # if the exact version needs to be returned in the directory
-    if not force_latest_on_upgrade:
-      stack_info = _get_upgrade_stack()
-
-      if stack_info is not None:
-        stack_version = stack_info[1]
-
-        # determine if hdp-select has been run and if not, then use the current
-        # hdp version until this component is upgraded
-        current_hdp_version = get_role_component_current_hdp_version()
-        if current_hdp_version is not None and stack_version != current_hdp_version:
-          stack_version = current_hdp_version
-
-        if target == "home":
-          # home uses a different template
-          hadoop_dir = HADOOP_HOME_DIR_TEMPLATE.format(stack_version, "hadoop")
-        else:
-          hadoop_dir = HADOOP_DIR_TEMPLATE.format(stack_version, "hadoop", target)
-
-  return hadoop_dir
-
-def get_hadoop_dir_for_stack_version(target, stack_version):
-  """
-  Return the hadoop shared directory for the provided stack version. This is necessary
-  when folder paths of downgrade-source stack-version are needed after hdp-select. 
-  :target: the target directory
-  :stack_version: stack version to get hadoop dir for
-  """
-
-  if not target in HADOOP_DIR_DEFAULTS:
-    raise Fail("Target {0} not defined".format(target))
-
-  hadoop_dir = HADOOP_DIR_DEFAULTS[target]
-
-  formatted_stack_version = format_hdp_stack_version(stack_version)
-  if Script.is_hdp_stack_greater_or_equal_to(formatted_stack_version, "2.2"):
-    # home uses a different template
-    if target == "home":
-      hadoop_dir = HADOOP_HOME_DIR_TEMPLATE.format(stack_version, "hadoop")
-    else:
-      hadoop_dir = HADOOP_DIR_TEMPLATE.format(stack_version, "hadoop", target)
-
-  return hadoop_dir
-
-
-def _get_upgrade_stack():
-  """
-  Gets the stack name and stack version if an upgrade is currently in progress.
-  :return:  the stack name and stack version as a tuple, or None if an
-  upgrade is not in progress.
-  """
-  from resource_management.libraries.functions.default import default
-  direction = default("/commandParams/upgrade_direction", None)
-  stack_name = default("/hostLevelParams/stack_name", None)
-  stack_version = default("/commandParams/version", None)
-
-  if direction and stack_name and stack_version:
-    return (stack_name, stack_version)
-
-  return None
-
-
-def get_hdp_versions(stack_root):
-  """
-  Gets list of stack versions installed on the host.
-  Be default a call to hdp-select versions is made to get the list of installed stack versions.
-  As a fallback list of installed versions is collected from stack version directories in stack install root.
-  :param stack_root: Stack install root
-  :return: Returns list of installed stack versions.
-  """
-  code, out = call(HDP_SELECT_PREFIX + ('versions',))
-  versions = []
-  if 0 == code:
-    for line in out.splitlines():
-      versions.append(line.rstrip('\n'))
-  if not versions:
-    versions = get_versions_from_stack_root(stack_root)
-  return versions
-
-def get_hdp_version_before_install(component_name):
-  """
-  Works in the similar way to 'hdp-select status component', 
-  but also works for not yet installed packages.
-  
-  Note: won't work if doing initial install.
-  """
-  component_dir = HADOOP_HOME_DIR_TEMPLATE.format("current", component_name)
-  if os.path.islink(component_dir):
-    hdp_version = os.path.basename(os.path.dirname(os.readlink(component_dir)))
-    match = re.match('[0-9]+.[0-9]+.[0-9]+.[0-9]+-[0-9]+', hdp_version)
-    if match is None:
-      Logger.info('Failed to get extracted version with hdp-select in method get_hdp_version_before_install')
-      return None # lazy fail
-    return hdp_version
-  else:
-    return None

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-common/src/main/python/resource_management/libraries/functions/install_hdp_msi.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/install_hdp_msi.py b/ambari-common/src/main/python/resource_management/libraries/functions/install_hdp_msi.py
deleted file mode 100644
index 7e94b5d..0000000
--- a/ambari-common/src/main/python/resource_management/libraries/functions/install_hdp_msi.py
+++ /dev/null
@@ -1,215 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-from ambari_commons import os_utils
-from ambari_commons.inet_utils import download_file
-from ambari_commons.os_windows import SystemWideLock
-
-from resource_management.core.resources.system import Execute
-from resource_management.core.resources.system import File
-from resource_management.core.logger import Logger
-from resource_management.core.exceptions import Fail
-from resource_management.libraries.functions.reload_windows_env import reload_windows_env
-from resource_management.libraries.functions.windows_service_utils import check_windows_service_exists
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
-import socket
-import os
-import glob
-import urlparse
-
-
-__all__ = ['install_windows_msi']
-
-msi_save_dir = None
-hdp_log_dir = "c:\\hadoop\\logs"
-hdp_data_dir = "c:\\hadoopDefaultData"
-local_host = socket.getfqdn()
-db_flavor = "DERBY"
-hdp_22 = """#Namenode Data directory
-HDFS_NAMENODE_DATA_DIR={hdp_data_dir}\\hdpdatann
-
-#Datanode Data directory
-HDFS_DATANODE_DATA_DIR={hdp_data_dir}\\hdpdatadn
-
-IS_SLIDER=yes
-IS_PHOENIX=no
-"""
-cluster_properties = """#Log directory
-HDP_LOG_DIR={hdp_log_dir}
-
-#Data directory
-HDP_DATA_DIR={hdp_data_dir}
-
-{hdp_22_specific_props}
-
-#hosts
-NAMENODE_HOST={local_host}
-SECONDARY_NAMENODE_HOST={local_host}
-RESOURCEMANAGER_HOST={local_host}
-HIVE_SERVER_HOST={local_host}
-OOZIE_SERVER_HOST={local_host}
-WEBHCAT_HOST={local_host}
-SLAVE_HOSTS={local_host}
-ZOOKEEPER_HOSTS={local_host}
-CLIENT_HOSTS={local_host}
-HBASE_MASTER={local_host}
-HBASE_REGIONSERVERS={local_host}
-FLUME_HOSTS={local_host}
-FALCON_HOST={local_host}
-KNOX_HOST={local_host}
-STORM_NIMBUS={local_host}
-STORM_SUPERVISORS={local_host}
-
-#Database host
-DB_FLAVOR={db_flavor}
-DB_HOSTNAME={local_host}
-DB_PORT=1527
-
-#Hive properties
-HIVE_DB_NAME=hive
-HIVE_DB_USERNAME=hive
-HIVE_DB_PASSWORD=hive
-
-#Oozie properties
-OOZIE_DB_NAME=oozie
-OOZIE_DB_USERNAME=oozie
-OOZIE_DB_PASSWORD=oozie
-"""
-
-INSTALL_MSI_CMD = 'cmd /C start /wait msiexec /qn /i  {hdp_msi_path} /lv {hdp_log_path} MSIUSEREALADMINDETECTION=1 ' \
-                  'HDP_LAYOUT={hdp_layout_path} DESTROY_DATA=yes HDP_USER={hadoop_user} HDP_USER_PASSWORD={hadoop_password_arg} HDP=yes ' \
-                  'KNOX=yes KNOX_MASTER_SECRET="AmbariHDP2Windows" FALCON=yes STORM=yes HBase=yes STORM=yes FLUME=yes SLIDER=yes PHOENIX=no RANGER=no'
-CREATE_SERVICE_SCRIPT = os.path.abspath("sbin\createservice.ps1")
-CREATE_SERVICE_CMD = 'cmd /C powershell -ExecutionPolicy Bypass -File "{script}" -username {username} -password "{password}" -servicename ' \
-                     '{servicename} -hdpresourcesdir "{resourcedir}" -servicecmdpath "{servicecmd}"'
-INSTALL_MARKER_OK = "msi.installed"
-INSTALL_MARKER_FAILED = "msi.failed"
-_working_dir = None
-
-
-def _ensure_services_created(hadoop_user, hadoop_password):
-  resource_dir_hdfs = os.path.join(os.environ["HADOOP_HDFS_HOME"], "bin")
-  service_cmd_hdfs = os.path.join(os.environ["HADOOP_HDFS_HOME"], "bin", "hdfs.cmd")
-  if not check_windows_service_exists("journalnode"):
-    Execute(CREATE_SERVICE_CMD.format(script=CREATE_SERVICE_SCRIPT, username=hadoop_user, password=hadoop_password, servicename="journalnode",
-                                      resourcedir=resource_dir_hdfs, servicecmd=service_cmd_hdfs), logoutput=True)
-  if not check_windows_service_exists("zkfc"):
-    Execute(CREATE_SERVICE_CMD.format(script=CREATE_SERVICE_SCRIPT, username=hadoop_user, password=hadoop_password, servicename="zkfc",
-                                      resourcedir=resource_dir_hdfs, servicecmd=service_cmd_hdfs), logoutput=True)
-
-
-# creating symlinks to services folders to avoid using stack-dependent paths
-def _create_symlinks(stack_version):
-  # folders
-  Execute("cmd /c mklink /d %HADOOP_NODE%\\hadoop %HADOOP_HOME%")
-  Execute("cmd /c mklink /d %HADOOP_NODE%\\hive %HIVE_HOME%")
-  hdp_stack_version = format_hdp_stack_version(stack_version)
-  if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
-    Execute("cmd /c mklink /d %HADOOP_NODE%\\knox %KNOX_HOME%")
-  # files pairs (symlink_path, path_template_to_target_file), use * to replace file version
-  links_pairs = [
-    ("%HADOOP_HOME%\\share\\hadoop\\tools\\lib\\hadoop-streaming.jar",
-     "%HADOOP_HOME%\\share\\hadoop\\tools\\lib\\hadoop-streaming-*.jar"),
-    ("%HIVE_HOME%\\hcatalog\\share\\webhcat\\svr\\lib\\hive-webhcat.jar",
-     "%HIVE_HOME%\\hcatalog\\share\\webhcat\\svr\\lib\\hive-webhcat-*.jar"),
-    ("%HIVE_HOME%\\lib\\zookeeper.jar", "%HIVE_HOME%\\lib\\zookeeper-*.jar")
-  ]
-  for link_pair in links_pairs:
-    link, target = link_pair
-    target = glob.glob(os.path.expandvars(target))[0].replace("\\\\", "\\")
-    Execute('cmd /c mklink "{0}" "{1}"'.format(link, target))
-
-
-# check if services exists and marker file present
-def _is_msi_installed():
-  return os.path.exists(os.path.join(_working_dir, INSTALL_MARKER_OK)) and check_windows_service_exists("namenode")
-
-
-# check if msi was installed correctly and raise Fail in case of broken install
-def _validate_msi_install():
-  if not _is_msi_installed() and os.path.exists(os.path.join(_working_dir, INSTALL_MARKER_FAILED)):
-    raise Fail("Current or previous hdp.msi install failed. Check hdp.msi install logs")
-  return _is_msi_installed()
-
-
-def _write_marker():
-  if check_windows_service_exists("namenode"):
-    open(os.path.join(_working_dir, INSTALL_MARKER_OK), "w").close()
-  else:
-    open(os.path.join(_working_dir, INSTALL_MARKER_FAILED), "w").close()
-
-
-def install_windows_msi(url_base, save_dir, save_files, hadoop_user, hadoop_password, stack_version):
-  global _working_dir
-  _working_dir = save_dir
-  save_dir = os.path.abspath(save_dir)
-  msi_save_dir = save_dir
-  # system wide lock to prevent simultaneous installations(when first task failed on timeout)
-  install_lock = SystemWideLock("Global\\hdp_msi_lock")
-  try:
-    # try to acquire lock
-    if not install_lock.lock():
-      Logger.info("Some other task currently installing hdp.msi, waiting for 10 min for finish")
-      if not install_lock.lock(600000):
-        raise Fail("Timeout on acquiring lock")
-    if _validate_msi_install():
-      Logger.info("hdp.msi already installed")
-      return
-
-    hdp_stack_version = format_hdp_stack_version(stack_version)
-    hdp_22_specific_props = ''
-    if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
-      hdp_22_specific_props = hdp_22.format(hdp_data_dir=hdp_data_dir)
-
-    # MSIs cannot be larger than 2GB. HDPWIN 2.3 needed split in order to accommodate this limitation
-    hdp_msi_file = ''
-    for save_file in save_files:
-      if save_file.lower().endswith(".msi"):
-        hdp_msi_file = save_file
-      file_url = urlparse.urljoin(url_base, save_file)
-      try:
-        download_file(file_url, os.path.join(msi_save_dir, save_file))
-      except:
-        raise Fail("Failed to download {url}".format(url=file_url))
-
-    File(os.path.join(msi_save_dir, "properties.txt"), content=cluster_properties.format(hdp_log_dir=hdp_log_dir,
-                                                                                         hdp_data_dir=hdp_data_dir,
-                                                                                         local_host=local_host,
-                                                                                         db_flavor=db_flavor,
-                                                                                         hdp_22_specific_props=hdp_22_specific_props))
-
-    # install msi
-    hdp_msi_path = os_utils.quote_path(os.path.join(save_dir, hdp_msi_file))
-    hdp_log_path = os_utils.quote_path(os.path.join(save_dir, hdp_msi_file[:-3] + "log"))
-    hdp_layout_path = os_utils.quote_path(os.path.join(save_dir, "properties.txt"))
-    hadoop_password_arg = os_utils.quote_path(hadoop_password)
-
-    Execute(
-      INSTALL_MSI_CMD.format(hdp_msi_path=hdp_msi_path, hdp_log_path=hdp_log_path, hdp_layout_path=hdp_layout_path,
-                             hadoop_user=hadoop_user, hadoop_password_arg=hadoop_password_arg))
-    reload_windows_env()
-    # create additional services manually due to hdp.msi limitaitons
-    _ensure_services_created(hadoop_user, hadoop_password)
-    _create_symlinks(stack_version)
-    # finalizing install
-    _write_marker()
-    _validate_msi_install()
-  finally:
-    install_lock.unlock()

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-common/src/main/python/resource_management/libraries/functions/install_windows_msi.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/install_windows_msi.py b/ambari-common/src/main/python/resource_management/libraries/functions/install_windows_msi.py
new file mode 100644
index 0000000..f1cd9cb
--- /dev/null
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/install_windows_msi.py
@@ -0,0 +1,215 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from ambari_commons import os_utils
+from ambari_commons.inet_utils import download_file
+from ambari_commons.os_windows import SystemWideLock
+
+from resource_management.core.resources.system import Execute
+from resource_management.core.resources.system import File
+from resource_management.core.logger import Logger
+from resource_management.core.exceptions import Fail
+from resource_management.libraries.functions.reload_windows_env import reload_windows_env
+from resource_management.libraries.functions.windows_service_utils import check_windows_service_exists
+from resource_management.libraries.functions.version import format_stack_version, compare_versions
+import socket
+import os
+import glob
+import urlparse
+
+
+__all__ = ['install_windows_msi']
+
+msi_save_dir = None
+hdp_log_dir = "c:\\hadoop\\logs"
+hdp_data_dir = "c:\\hadoopDefaultData"
+local_host = socket.getfqdn()
+db_flavor = "DERBY"
+hdp_22 = """#Namenode Data directory
+HDFS_NAMENODE_DATA_DIR={hdp_data_dir}\\hdpdatann
+
+#Datanode Data directory
+HDFS_DATANODE_DATA_DIR={hdp_data_dir}\\hdpdatadn
+
+IS_SLIDER=yes
+IS_PHOENIX=no
+"""
+cluster_properties = """#Log directory
+HDP_LOG_DIR={hdp_log_dir}
+
+#Data directory
+HDP_DATA_DIR={hdp_data_dir}
+
+{hdp_22_specific_props}
+
+#hosts
+NAMENODE_HOST={local_host}
+SECONDARY_NAMENODE_HOST={local_host}
+RESOURCEMANAGER_HOST={local_host}
+HIVE_SERVER_HOST={local_host}
+OOZIE_SERVER_HOST={local_host}
+WEBHCAT_HOST={local_host}
+SLAVE_HOSTS={local_host}
+ZOOKEEPER_HOSTS={local_host}
+CLIENT_HOSTS={local_host}
+HBASE_MASTER={local_host}
+HBASE_REGIONSERVERS={local_host}
+FLUME_HOSTS={local_host}
+FALCON_HOST={local_host}
+KNOX_HOST={local_host}
+STORM_NIMBUS={local_host}
+STORM_SUPERVISORS={local_host}
+
+#Database host
+DB_FLAVOR={db_flavor}
+DB_HOSTNAME={local_host}
+DB_PORT=1527
+
+#Hive properties
+HIVE_DB_NAME=hive
+HIVE_DB_USERNAME=hive
+HIVE_DB_PASSWORD=hive
+
+#Oozie properties
+OOZIE_DB_NAME=oozie
+OOZIE_DB_USERNAME=oozie
+OOZIE_DB_PASSWORD=oozie
+"""
+
+INSTALL_MSI_CMD = 'cmd /C start /wait msiexec /qn /i  {hdp_msi_path} /lv {hdp_log_path} MSIUSEREALADMINDETECTION=1 ' \
+                  'HDP_LAYOUT={hdp_layout_path} DESTROY_DATA=yes HDP_USER={hadoop_user} HDP_USER_PASSWORD={hadoop_password_arg} HDP=yes ' \
+                  'KNOX=yes KNOX_MASTER_SECRET="AmbariHDP2Windows" FALCON=yes STORM=yes HBase=yes STORM=yes FLUME=yes SLIDER=yes PHOENIX=no RANGER=no'
+CREATE_SERVICE_SCRIPT = os.path.abspath("sbin\createservice.ps1")
+CREATE_SERVICE_CMD = 'cmd /C powershell -ExecutionPolicy Bypass -File "{script}" -username {username} -password "{password}" -servicename ' \
+                     '{servicename} -hdpresourcesdir "{resourcedir}" -servicecmdpath "{servicecmd}"'
+INSTALL_MARKER_OK = "msi.installed"
+INSTALL_MARKER_FAILED = "msi.failed"
+_working_dir = None
+
+
+def _ensure_services_created(hadoop_user, hadoop_password):
+  resource_dir_hdfs = os.path.join(os.environ["HADOOP_HDFS_HOME"], "bin")
+  service_cmd_hdfs = os.path.join(os.environ["HADOOP_HDFS_HOME"], "bin", "hdfs.cmd")
+  if not check_windows_service_exists("journalnode"):
+    Execute(CREATE_SERVICE_CMD.format(script=CREATE_SERVICE_SCRIPT, username=hadoop_user, password=hadoop_password, servicename="journalnode",
+                                      resourcedir=resource_dir_hdfs, servicecmd=service_cmd_hdfs), logoutput=True)
+  if not check_windows_service_exists("zkfc"):
+    Execute(CREATE_SERVICE_CMD.format(script=CREATE_SERVICE_SCRIPT, username=hadoop_user, password=hadoop_password, servicename="zkfc",
+                                      resourcedir=resource_dir_hdfs, servicecmd=service_cmd_hdfs), logoutput=True)
+
+
+# creating symlinks to services folders to avoid using stack-dependent paths
+def _create_symlinks(stack_version):
+  # folders
+  Execute("cmd /c mklink /d %HADOOP_NODE%\\hadoop %HADOOP_HOME%")
+  Execute("cmd /c mklink /d %HADOOP_NODE%\\hive %HIVE_HOME%")
+  stack_version_formatted = format_stack_version(stack_version)
+  if stack_version_formatted != "" and compare_versions(stack_version_formatted, '2.2') >= 0:
+    Execute("cmd /c mklink /d %HADOOP_NODE%\\knox %KNOX_HOME%")
+  # files pairs (symlink_path, path_template_to_target_file), use * to replace file version
+  links_pairs = [
+    ("%HADOOP_HOME%\\share\\hadoop\\tools\\lib\\hadoop-streaming.jar",
+     "%HADOOP_HOME%\\share\\hadoop\\tools\\lib\\hadoop-streaming-*.jar"),
+    ("%HIVE_HOME%\\hcatalog\\share\\webhcat\\svr\\lib\\hive-webhcat.jar",
+     "%HIVE_HOME%\\hcatalog\\share\\webhcat\\svr\\lib\\hive-webhcat-*.jar"),
+    ("%HIVE_HOME%\\lib\\zookeeper.jar", "%HIVE_HOME%\\lib\\zookeeper-*.jar")
+  ]
+  for link_pair in links_pairs:
+    link, target = link_pair
+    target = glob.glob(os.path.expandvars(target))[0].replace("\\\\", "\\")
+    Execute('cmd /c mklink "{0}" "{1}"'.format(link, target))
+
+
+# check if services exists and marker file present
+def _is_msi_installed():
+  return os.path.exists(os.path.join(_working_dir, INSTALL_MARKER_OK)) and check_windows_service_exists("namenode")
+
+
+# check if msi was installed correctly and raise Fail in case of broken install
+def _validate_msi_install():
+  if not _is_msi_installed() and os.path.exists(os.path.join(_working_dir, INSTALL_MARKER_FAILED)):
+    raise Fail("Current or previous hdp.msi install failed. Check hdp.msi install logs")
+  return _is_msi_installed()
+
+
+def _write_marker():
+  if check_windows_service_exists("namenode"):
+    open(os.path.join(_working_dir, INSTALL_MARKER_OK), "w").close()
+  else:
+    open(os.path.join(_working_dir, INSTALL_MARKER_FAILED), "w").close()
+
+
+def install_windows_msi(url_base, save_dir, save_files, hadoop_user, hadoop_password, stack_version):
+  global _working_dir
+  _working_dir = save_dir
+  save_dir = os.path.abspath(save_dir)
+  msi_save_dir = save_dir
+  # system wide lock to prevent simultaneous installations(when first task failed on timeout)
+  install_lock = SystemWideLock("Global\\hdp_msi_lock")
+  try:
+    # try to acquire lock
+    if not install_lock.lock():
+      Logger.info("Some other task currently installing hdp.msi, waiting for 10 min for finish")
+      if not install_lock.lock(600000):
+        raise Fail("Timeout on acquiring lock")
+    if _validate_msi_install():
+      Logger.info("hdp.msi already installed")
+      return
+
+    stack_version_formatted = format_stack_version(stack_version)
+    hdp_22_specific_props = ''
+    if stack_version_formatted != "" and compare_versions(stack_version_formatted, '2.2') >= 0:
+      hdp_22_specific_props = hdp_22.format(hdp_data_dir=hdp_data_dir)
+
+    # MSIs cannot be larger than 2GB. HDPWIN 2.3 needed split in order to accommodate this limitation
+    hdp_msi_file = ''
+    for save_file in save_files:
+      if save_file.lower().endswith(".msi"):
+        hdp_msi_file = save_file
+      file_url = urlparse.urljoin(url_base, save_file)
+      try:
+        download_file(file_url, os.path.join(msi_save_dir, save_file))
+      except:
+        raise Fail("Failed to download {url}".format(url=file_url))
+
+    File(os.path.join(msi_save_dir, "properties.txt"), content=cluster_properties.format(hdp_log_dir=hdp_log_dir,
+                                                                                         hdp_data_dir=hdp_data_dir,
+                                                                                         local_host=local_host,
+                                                                                         db_flavor=db_flavor,
+                                                                                         hdp_22_specific_props=hdp_22_specific_props))
+
+    # install msi
+    hdp_msi_path = os_utils.quote_path(os.path.join(save_dir, hdp_msi_file))
+    hdp_log_path = os_utils.quote_path(os.path.join(save_dir, hdp_msi_file[:-3] + "log"))
+    hdp_layout_path = os_utils.quote_path(os.path.join(save_dir, "properties.txt"))
+    hadoop_password_arg = os_utils.quote_path(hadoop_password)
+
+    Execute(
+      INSTALL_MSI_CMD.format(hdp_msi_path=hdp_msi_path, hdp_log_path=hdp_log_path, hdp_layout_path=hdp_layout_path,
+                             hadoop_user=hadoop_user, hadoop_password_arg=hadoop_password_arg))
+    reload_windows_env()
+    # create additional services manually due to hdp.msi limitaitons
+    _ensure_services_created(hadoop_user, hadoop_password)
+    _create_symlinks(stack_version)
+    # finalizing install
+    _write_marker()
+    _validate_msi_install()
+  finally:
+    install_lock.unlock()

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin.py b/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin.py
index e5e4266..4d9d8a4 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin.py
@@ -24,7 +24,7 @@ from datetime import datetime
 from resource_management.libraries.functions.ranger_functions import Rangeradmin
 from resource_management.core.resources import File, Execute
 from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.get_hdp_version import get_hdp_version
+from resource_management.libraries.functions.get_stack_version import get_stack_version
 from resource_management.core.logger import Logger
 from resource_management.core.source import DownloadSource
 from resource_management.libraries.resources import ModifyPropertiesFile
@@ -50,8 +50,8 @@ def setup_ranger_plugin(component_select_name, service_name,
 
   File(driver_curl_target, mode=0644)
 
-  hdp_version = get_hdp_version(component_select_name)
-  file_path = format('/usr/hdp/{hdp_version}/ranger-{service_name}-plugin/install.properties')
+  stack_version = get_stack_version(component_select_name)
+  file_path = format('/usr/hdp/{stack_version}/ranger-{service_name}-plugin/install.properties')
   
   if not os.path.isfile(file_path):
     raise Fail(format('Ranger {service_name} plugin install.properties file does not exist at {file_path}'))
@@ -79,7 +79,7 @@ def setup_ranger_plugin(component_select_name, service_name,
   else:
     cmd = (format('disable-{service_name}-plugin.sh'),)
     
-  cmd_env = {'JAVA_HOME': java_home, 'PWD': format('/usr/hdp/{hdp_version}/ranger-{service_name}-plugin'), 'PATH': format('/usr/hdp/{hdp_version}/ranger-{service_name}-plugin')}
+  cmd_env = {'JAVA_HOME': java_home, 'PWD': format('/usr/hdp/{stack_version}/ranger-{service_name}-plugin'), 'PATH': format('/usr/hdp/{stack_version}/ranger-{service_name}-plugin')}
   
   Execute(cmd, 
         environment=cmd_env, 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py b/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
index d6f6deb..2ccc0c6 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
@@ -26,7 +26,7 @@ from resource_management.libraries.functions.ranger_functions import Rangeradmin
 from resource_management.core.resources import File, Directory, Execute
 from resource_management.libraries.resources.xml_config import XmlConfig
 from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.get_hdp_version import get_hdp_version
+from resource_management.libraries.functions.get_stack_version import get_stack_version
 from resource_management.core.logger import Logger
 from resource_management.core.source import DownloadSource, InlineTemplate
 from resource_management.libraries.functions.ranger_functions_v2 import RangeradminV2
@@ -44,7 +44,7 @@ def setup_ranger_plugin(component_select_name, service_name,
                         plugin_policymgr_ssl_properties, plugin_policymgr_ssl_attributes,
                         component_list, audit_db_is_enabled, credential_file, 
                         xa_audit_db_password, ssl_truststore_password,
-                        ssl_keystore_password, api_version=None, hdp_version_override = None, skip_if_rangeradmin_down = True):
+                        ssl_keystore_password, api_version=None, stack_version_override = None, skip_if_rangeradmin_down = True):
 
   if audit_db_is_enabled:
     File(component_downloaded_custom_connector,
@@ -59,9 +59,9 @@ def setup_ranger_plugin(component_select_name, service_name,
 
     File(component_driver_curl_target, mode=0644)
 
-  hdp_version = get_hdp_version(component_select_name)
-  if hdp_version_override is not None:
-    hdp_version = hdp_version_override
+  stack_version = get_stack_version(component_select_name)
+  if stack_version_override is not None:
+    stack_version = stack_version_override
 
   component_conf_dir = conf_dict
   
@@ -135,9 +135,9 @@ def setup_ranger_plugin(component_select_name, service_name,
         mode=0744) 
 
     #This should be done by rpm
-    #setup_ranger_plugin_jar_symblink(hdp_version, service_name, component_list)
+    #setup_ranger_plugin_jar_symblink(stack_version, service_name, component_list)
 
-    setup_ranger_plugin_keystore(service_name, audit_db_is_enabled, hdp_version, credential_file,
+    setup_ranger_plugin_keystore(service_name, audit_db_is_enabled, stack_version, credential_file,
               xa_audit_db_password, ssl_truststore_password, ssl_keystore_password,
               component_user, component_group, java_home)
 
@@ -147,22 +147,22 @@ def setup_ranger_plugin(component_select_name, service_name,
     )    
 
 
-def setup_ranger_plugin_jar_symblink(hdp_version, service_name, component_list):
+def setup_ranger_plugin_jar_symblink(stack_version, service_name, component_list):
 
-  jar_files = os.listdir(format('/usr/hdp/{hdp_version}/ranger-{service_name}-plugin/lib'))
+  jar_files = os.listdir(format('/usr/hdp/{stack_version}/ranger-{service_name}-plugin/lib'))
 
   for jar_file in jar_files:
     for component in component_list:
-      Execute(('ln','-sf',format('/usr/hdp/{hdp_version}/ranger-{service_name}-plugin/lib/{jar_file}'),format('/usr/hdp/current/{component}/lib/{jar_file}')),
+      Execute(('ln','-sf',format('/usr/hdp/{stack_version}/ranger-{service_name}-plugin/lib/{jar_file}'),format('/usr/hdp/current/{component}/lib/{jar_file}')),
       not_if=format('ls /usr/hdp/current/{component}/lib/{jar_file}'),
-      only_if=format('ls /usr/hdp/{hdp_version}/ranger-{service_name}-plugin/lib/{jar_file}'),
+      only_if=format('ls /usr/hdp/{stack_version}/ranger-{service_name}-plugin/lib/{jar_file}'),
       sudo=True)
 
-def setup_ranger_plugin_keystore(service_name, audit_db_is_enabled, hdp_version, credential_file, xa_audit_db_password,
+def setup_ranger_plugin_keystore(service_name, audit_db_is_enabled, stack_version, credential_file, xa_audit_db_password,
                                 ssl_truststore_password, ssl_keystore_password, component_user, component_group, java_home):
 
-  cred_lib_path = format('/usr/hdp/{hdp_version}/ranger-{service_name}-plugin/install/lib/*')
-  cred_setup_prefix = (format('/usr/hdp/{hdp_version}/ranger-{service_name}-plugin/ranger_credential_helper.py'), '-l', cred_lib_path)
+  cred_lib_path = format('/usr/hdp/{stack_version}/ranger-{service_name}-plugin/install/lib/*')
+  cred_setup_prefix = (format('/usr/hdp/{stack_version}/ranger-{service_name}-plugin/ranger_credential_helper.py'), '-l', cred_lib_path)
 
   if audit_db_is_enabled:
     cred_setup = cred_setup_prefix + ('-f', credential_file, '-k', 'auditDBCred', '-v', PasswordString(xa_audit_db_password), '-c', '1')


[28/51] [abbrv] ambari git commit: AMBARI-14830. Clients names different on host details page and filter. (xiwang via yusaku)

Posted by jl...@apache.org.
AMBARI-14830. Clients names different on host details page and filter. (xiwang via yusaku)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/ec4b1d14
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/ec4b1d14
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/ec4b1d14

Branch: refs/heads/AMBARI-13364
Commit: ec4b1d14bbd5d38c9e4c42cab1c9b6b37730f762
Parents: 249014b
Author: Yusaku Sako <yu...@hortonworks.com>
Authored: Tue Mar 8 19:05:56 2016 -0800
Committer: Yusaku Sako <yu...@hortonworks.com>
Committed: Tue Mar 8 19:05:56 2016 -0800

----------------------------------------------------------------------
 ambari-web/app/models/service.js       |  2 +-
 ambari-web/app/utils/ember_computed.js |  4 ++--
 ambari-web/app/utils/helper.js         | 13 +++++++++++--
 3 files changed, 14 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/ec4b1d14/ambari-web/app/models/service.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/models/service.js b/ambari-web/app/models/service.js
index c1ed776..8546812 100644
--- a/ambari-web/app/models/service.js
+++ b/ambari-web/app/models/service.js
@@ -22,7 +22,7 @@ require('utils/config');
 
 App.Service = DS.Model.extend({
   serviceName: DS.attr('string'),
-  displayName: Em.computed.formatRole('serviceName'),
+  displayName: Em.computed.formatRole('serviceName', 'SERVICE'),
   passiveState: DS.attr('string'),
   workStatus: DS.attr('string'),
   rand: DS.attr('string'),

http://git-wip-us.apache.org/repos/asf/ambari/blob/ec4b1d14/ambari-web/app/utils/ember_computed.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/ember_computed.js b/ambari-web/app/utils/ember_computed.js
index e3a2f63..ecab9a2 100644
--- a/ambari-web/app/utils/ember_computed.js
+++ b/ambari-web/app/utils/ember_computed.js
@@ -887,10 +887,10 @@ computed.percents = function (dependentKey1, dependentKey2, accuracy) {
  * @param {string} dependentKey
  * @returns {Ember.ComputedProperty}
  */
-computed.formatRole = function (dependentKey) {
+computed.formatRole = function (dependentKey, level) {
   return computed(dependentKey, function () {
     var value = get(this, dependentKey);
-    return App.format.role(value);
+    return App.format.role(value, level);
   });
 };
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/ec4b1d14/ambari-web/app/utils/helper.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/utils/helper.js b/ambari-web/app/utils/helper.js
index eb73667..ec2dc42 100644
--- a/ambari-web/app/utils/helper.js
+++ b/ambari-web/app/utils/helper.js
@@ -535,8 +535,17 @@ App.format = {
    * @param {string} role
    * return {string}
    */
-  role: function (role) {
-    var models = [App.StackService, App.StackServiceComponent];
+  role: function (role, level) {
+    switch (level) {
+      case 'SERVICE':
+        var models = [App.StackService];
+        break;
+      case 'COMPONENT':
+        var models = [App.StackServiceComponent];
+        break;
+      default:
+        var models = [App.StackService, App.StackServiceComponent];
+    }
 
     if (App.isEmptyObject(this.stackRolesMap)) {
       models.forEach(function (model) {


[10/51] [abbrv] ambari git commit: AMBARI-15314. Introduce possibility to retry stack installation in case of network instability (Dmytro Grinenko vi alejandro)

Posted by jl...@apache.org.
AMBARI-15314. Introduce possibility to retry stack installation in case of network instability (Dmytro Grinenko vi alejandro)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7e81d376
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7e81d376
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7e81d376

Branch: refs/heads/AMBARI-13364
Commit: 7e81d37669aefa4aea6c1ab8f1aebbb1e23768d9
Parents: 549e70e
Author: Alejandro Fernandez <af...@hortonworks.com>
Authored: Mon Mar 7 20:53:55 2016 -0800
Committer: Alejandro Fernandez <af...@hortonworks.com>
Committed: Mon Mar 7 20:53:55 2016 -0800

----------------------------------------------------------------------
 .../resource_management/TestPackageResource.py  |   6 +
 .../python/resource_management/TestScript.py    |  12 +-
 .../src/main/python/ambari_commons/str_utils.py |  18 ++
 .../core/providers/package/__init__.py          |  73 +++--
 .../core/providers/package/apt.py               |  11 +-
 .../core/providers/package/yumrpm.py            |   6 +-
 .../core/providers/package/zypper.py            |  11 +-
 .../core/resources/packaging.py                 |  12 +-
 .../libraries/script/script.py                  |   8 +-
 ambari-server/conf/unix/ambari.properties       |   5 +
 .../ambari/server/agent/ExecutionCommand.java   |   2 +
 .../ambari/server/agent/HeartbeatMonitor.java   |   2 -
 .../server/configuration/Configuration.java     | 270 ++++++++++---------
 .../controller/AmbariActionExecutionHelper.java |   8 +-
 .../AmbariCustomCommandExecutionHelper.java     |   4 +
 .../HBASE/0.96.0.2.0/package/scripts/hbase.py   |   4 +-
 .../0.96.0.2.0/package/scripts/params_linux.py  |   4 +
 .../HDFS/2.1.0.2.0/package/scripts/hdfs.py      |   4 +-
 .../2.1.0.2.0/package/scripts/params_linux.py   |   3 +
 .../0.12.0.2.0/package/scripts/params_linux.py  |   3 +
 .../package/scripts/setup_atlas_hive.py         |   2 +-
 .../OOZIE/4.0.0.2.0/package/scripts/oozie.py    |   4 +-
 .../4.0.0.2.0/package/scripts/params_linux.py   |   3 +
 .../custom_actions/scripts/install_packages.py  |  15 +-
 .../0.8/hooks/before-INSTALL/scripts/params.py  |   4 +
 .../scripts/shared_initialization.py            |   5 +-
 .../hooks/before-INSTALL/scripts/params.py      |   4 +
 .../scripts/shared_initialization.py            |   4 +-
 .../AmbariManagementControllerImplTest.java     |   2 +-
 ...ClusterStackVersionResourceProviderTest.java |   3 +
 .../custom_actions/TestInstallPackages.py       |  64 ++---
 .../configs/install_packages_config.json        |   2 +
 .../stacks/2.0.6/HBASE/test_hbase_master.py     |  12 +-
 .../2.0.6/HBASE/test_hbase_regionserver.py      |   2 +-
 .../stacks/2.0.6/configs/altfs_plus_hdfs.json   |   2 +
 .../stacks/2.0.6/configs/client-upgrade.json    |   2 +
 .../2.0.6/configs/default.hbasedecom.json       |   2 +
 .../python/stacks/2.0.6/configs/default.json    |   5 +-
 .../stacks/2.0.6/configs/default_client.json    |   2 +
 .../2.0.6/configs/default_hive_nn_ha.json       |   2 +
 .../2.0.6/configs/default_hive_nn_ha_2.json     |   2 +
 .../2.0.6/configs/default_hive_non_hdfs.json    |   2 +
 .../2.0.6/configs/default_no_install.json       |   2 +
 .../2.0.6/configs/default_oozie_mysql.json      |   2 +
 .../default_update_exclude_file_only.json       |   2 +
 .../2.0.6/configs/ha_bootstrap_active_node.json |   2 +
 .../configs/ha_bootstrap_standby_node.json      |   2 +
 ...ha_bootstrap_standby_node_initial_start.json |   2 +
 .../python/stacks/2.0.6/configs/ha_default.json |   2 +
 .../python/stacks/2.0.6/configs/ha_secured.json |   2 +
 .../python/stacks/2.0.6/configs/hbase-2.2.json  |   2 +
 .../stacks/2.0.6/configs/hbase-check-2.2.json   |   2 +
 .../stacks/2.0.6/configs/hbase-preupgrade.json  |   2 +
 .../2.0.6/configs/hbase-rs-2.2-phoenix.json     |   2 +
 .../stacks/2.0.6/configs/hbase-rs-2.2.json      |   2 +
 .../stacks/2.0.6/configs/hbase_no_phx.json      |   2 +
 .../stacks/2.0.6/configs/hbase_with_phx.json    |   2 +
 .../test/python/stacks/2.0.6/configs/nn_eu.json |   2 +
 .../stacks/2.0.6/configs/nn_eu_standby.json     |   2 +
 .../python/stacks/2.0.6/configs/nn_ru_lzo.json  |   2 +
 .../2.0.6/configs/oozie_existing_sqla.json      |   2 +
 .../2.0.6/configs/ranger-namenode-start.json    |   2 +
 .../2.0.6/configs/rebalancehdfs_default.json    |   2 +
 .../2.0.6/configs/rebalancehdfs_secured.json    |   2 +
 .../python/stacks/2.0.6/configs/secured.json    |   2 +
 .../stacks/2.0.6/configs/secured_client.json    |   2 +
 .../hooks/before-INSTALL/test_before_install.py |   4 +-
 .../stacks/2.1/configs/client-upgrade.json      |   2 +
 .../test/python/stacks/2.1/configs/default.json |   2 +
 .../2.1/configs/hive-metastore-upgrade.json     |   2 +
 .../test/python/stacks/2.1/configs/secured.json |   2 +
 .../test/python/stacks/2.2/configs/default.json |   2 +
 .../python/stacks/2.2/configs/hive-upgrade.json |   2 +
 .../journalnode-upgrade-hdfs-secure.json        |   2 +
 .../stacks/2.2/configs/journalnode-upgrade.json |   2 +
 .../stacks/2.2/configs/oozie-downgrade.json     |   2 +
 .../stacks/2.2/configs/oozie-upgrade.json       |   2 +
 .../test/python/stacks/2.2/configs/secured.json |   2 +
 .../src/test/python/stacks/2.3/PXF/test_pxf.py  |  20 +-
 .../stacks/2.3/configs/default.hbasedecom.json  |   2 +
 .../test/python/stacks/2.3/configs/default.json |   2 +
 .../stacks/2.3/configs/hbase_default.json       |   2 +
 .../python/stacks/2.3/configs/hbase_secure.json |   2 +
 .../python/stacks/2.3/configs/pxf_default.json  |   2 +
 84 files changed, 488 insertions(+), 227 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-agent/src/test/python/resource_management/TestPackageResource.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/resource_management/TestPackageResource.py b/ambari-agent/src/test/python/resource_management/TestPackageResource.py
index 1d8ef01..1f2250d 100644
--- a/ambari-agent/src/test/python/resource_management/TestPackageResource.py
+++ b/ambari-agent/src/test/python/resource_management/TestPackageResource.py
@@ -93,6 +93,7 @@ class TestPackageResource(TestCase):
   @patch.object(shell, "checked_call")
   @patch.object(System, "os_family", new = 'redhat')
   def test_action_install_rhel(self, shell_mock):
+    shell_mock.return_value = (0,'')
     sys.modules['rpm'] = MagicMock()
     sys.modules['rpm'].TransactionSet.return_value = MagicMock()
     sys.modules['rpm'].TransactionSet.return_value.dbMatch.return_value = [{'name':'some_packag'}]
@@ -106,6 +107,7 @@ class TestPackageResource(TestCase):
   @patch.object(shell, "checked_call")
   @patch.object(System, "os_family", new = 'redhat')
   def test_action_install_pattern_rhel(self, shell_mock):
+    shell_mock.return_value = (0,'')
     sys.modules['rpm'] = MagicMock()
     sys.modules['rpm'].TransactionSet.return_value = MagicMock()
     sys.modules['rpm'].TransactionSet.return_value.dbMatch.return_value = [{'name':'some_packag'}]
@@ -118,12 +120,14 @@ class TestPackageResource(TestCase):
   @patch.object(shell, "checked_call")
   @patch.object(System, "os_family", new = 'redhat')
   def test_action_install_pattern_installed_rhel(self, shell_mock):
+    shell_mock.return_value = (0,'')
     sys.modules['yum'] = MagicMock()
     sys.modules['yum'].YumBase.return_value = MagicMock()
     sys.modules['yum'].YumBase.return_value.rpmdb = MagicMock()
     sys.modules['yum'].YumBase.return_value.rpmdb.simplePkgList.return_value = [('some_package_1_2_3',)]
     with Environment('/') as env:
       Package("some_package*",
+              logoutput = False
       )
     self.assertEqual(shell_mock.call_count, 0, "shell.checked_call shouldn't be called")
 
@@ -179,6 +183,7 @@ class TestPackageResource(TestCase):
   @patch.object(shell, "checked_call")
   @patch.object(System, "os_family", new = 'redhat')
   def test_action_install_use_repos_rhel(self, shell_mock):
+    shell_mock.return_value = (0,'')
     with Environment('/') as env:
       Package("some_package", use_repos=['HDP-UTILS-2.2.0.1-885', 'HDP-2.2.0.1-885'],
               logoutput = False
@@ -233,6 +238,7 @@ class TestPackageResource(TestCase):
   @patch.object(shell, "checked_call")
   @patch.object(System, "os_family", new = 'redhat')
   def test_action_install_version_attr(self, shell_mock):
+    shell_mock.return_value = (0,'')
     with Environment('/') as env:
       Package("some_package",
               version = "3.5.0",

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-agent/src/test/python/resource_management/TestScript.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/resource_management/TestScript.py b/ambari-agent/src/test/python/resource_management/TestScript.py
index f6a5c8c..adb8501 100644
--- a/ambari-agent/src/test/python/resource_management/TestScript.py
+++ b/ambari-agent/src/test/python/resource_management/TestScript.py
@@ -48,13 +48,17 @@ class TestScript(TestCase):
   def test_install_packages(self, package_provider_mock):
     no_packages_config = {
       'hostLevelParams' : {
-        'repo_info' : "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0._\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]"
+        'repo_info' : "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0._\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]",
+        'agent_stack_retry_count': '5',
+        'agent_stack_retry_on_unavailability': 'false'
       }
     }
     empty_config = {
       'hostLevelParams' : {
         'package_list' : '',
-        'repo_info' : "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0._\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]"
+        'repo_info' : "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0._\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]",
+        'agent_stack_retry_count': '5',
+        'agent_stack_retry_on_unavailability': 'false'
       }
     }
     dummy_config = {
@@ -62,7 +66,9 @@ class TestScript(TestCase):
         'package_list' : "[{\"type\":\"rpm\",\"name\":\"hbase\", \"condition\": \"\"},"
                          "{\"type\":\"rpm\",\"name\":\"yet-another-package\", \"condition\": \"\"}]",
         'repo_info' : "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0._\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]",
-        'service_repo_info' : "[{\"mirrorsList\":\"abc\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0._\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]"
+        'service_repo_info' : "[{\"mirrorsList\":\"abc\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0._\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]",
+        'agent_stack_retry_count': '5',
+        'agent_stack_retry_on_unavailability': 'false'
       }
     }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-common/src/main/python/ambari_commons/str_utils.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/ambari_commons/str_utils.py b/ambari-common/src/main/python/ambari_commons/str_utils.py
index 538d7c6..1a3e4f2 100644
--- a/ambari-common/src/main/python/ambari_commons/str_utils.py
+++ b/ambari-common/src/main/python/ambari_commons/str_utils.py
@@ -18,17 +18,20 @@ See the License for the specific language governing permissions and
 limitations under the License.
 '''
 
+
 def compress_backslashes(s):
   s1 = s
   while (-1 != s1.find('\\\\')):
     s1 = s1.replace('\\\\', '\\')
   return s1
 
+
 def ensure_double_backslashes(s):
   s1 = compress_backslashes(s)
   s2 = s1.replace('\\', '\\\\')
   return s2
 
+
 def cbool(obj):
   """
   Interprets an object as a boolean value.
@@ -44,3 +47,18 @@ def cbool(obj):
     raise ValueError('Unable to interpret value "%s" as boolean' % obj)
   return bool(obj)
 
+
+def cint(obj):
+  """
+  Interprets an object as a integer value.
+  :param obj:
+  :return:
+  """
+  if isinstance(obj, str):
+    obj = obj.strip().lower()
+    try:
+      return int(obj)
+    except ValueError:
+      raise ValueError('Unable to interpret value "%s" as integer' % obj)
+  return int(obj)
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-common/src/main/python/resource_management/core/providers/package/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/core/providers/package/__init__.py b/ambari-common/src/main/python/resource_management/core/providers/package/__init__.py
index 1fc4214..04da9b6 100644
--- a/ambari-common/src/main/python/resource_management/core/providers/package/__init__.py
+++ b/ambari-common/src/main/python/resource_management/core/providers/package/__init__.py
@@ -31,7 +31,10 @@ from resource_management.core.logger import Logger
 from resource_management.core.utils import suppress_stdout
 from resource_management.core import shell
 
-PACKAGE_MANAGER_LOCK_ACQUIRED = "Package manager lock is acquired. Retrying after {0} seconds. Reason: {1}"
+
+PACKAGE_MANAGER_LOCK_ACQUIRED_MSG = "Cannot obtain lock for Package manager. Retrying after {0} seconds. Reason: {1}"
+PACKAGE_MANAGER_REPO_ERROR_MSG = "Cannot download the package due to repository unavailability. Retrying after {0} seconds. Reason: {1}"
+
 
 class PackageProvider(Provider):
   def __init__(self, *args, **kwargs):
@@ -39,8 +42,10 @@ class PackageProvider(Provider):
   
   def install_package(self, name, version):
     raise NotImplementedError()
+
   def remove_package(self, name):
     raise NotImplementedError()
+
   def upgrade_package(self, name, version):
     raise NotImplementedError()
 
@@ -61,41 +66,63 @@ class PackageProvider(Provider):
       return self.resource.package_name + '-' + self.resource.version
     else:
       return self.resource.package_name
-    
+
+  def is_locked_output(self, out):
+    return False
+
+  def is_repo_error_output(self, out):
+    return False
+
   def get_logoutput(self):
     return self.resource.logoutput==True and Logger.logger.isEnabledFor(logging.INFO) or self.resource.logoutput==None and Logger.logger.isEnabledFor(logging.DEBUG)
-    
-  def call_until_not_locked(self, cmd, **kwargs):
-    return self.wait_until_not_locked(cmd, is_checked=False, **kwargs)
+
+  def call_with_retries(self, cmd, **kwargs):
+    return self._call_with_retries(cmd, is_checked=False, **kwargs)
   
-  def checked_call_until_not_locked(self, cmd, **kwargs):
-    return self.wait_until_not_locked(cmd, is_checked=True, **kwargs)
-    
-  def wait_until_not_locked(self, cmd, is_checked=True, **kwargs):
+  def checked_call_with_retries(self, cmd, **kwargs):
+    return self._call_with_retries(cmd, is_checked=True, **kwargs)
+
+  def _call_with_retries(self, cmd, is_checked=True, **kwargs):
     func = shell.checked_call if is_checked else shell.call
-      
-    for i in range(self.resource.locked_tries):
-      is_last_time = (i == self.resource.locked_tries - 1)
+
+    for i in range(self.resource.retry_count):
+      is_last_time = (i == self.resource.retry_count - 1)
       try:
         code, out = func(cmd, **kwargs)
       except Fail as ex:
         # non-lock error
-        if not self.is_locked_output(str(ex)) or is_last_time:
+        if not self._is_handled_error(str(ex), is_last_time) or is_last_time:
           raise
-        
-        Logger.info(PACKAGE_MANAGER_LOCK_ACQUIRED.format(self.resource.locked_try_sleep, str(ex)))
+
+        self._notify_about_handled_error(str(ex), is_last_time)
       else:
         # didn't fail or failed with non-lock error.
-        if not code or not self.is_locked_output(out):
-           break
-         
-        Logger.info(PACKAGE_MANAGER_LOCK_ACQUIRED.format(self.resource.locked_try_sleep, str(out)))
-      
-      time.sleep(self.resource.locked_try_sleep)
+        if not code or not self._is_handled_error(out, is_last_time):
+          break
+
+        self._notify_about_handled_error(str(out), is_last_time)
+
+      time.sleep(self.resource.retry_sleep)
 
     return code, out
-       
-    
+
+  def _is_handled_error(self, output, is_last_time):
+    if self.resource.retry_on_locked and self.is_locked_output(output):
+      return True
+    elif self.resource.retry_on_repo_unavailability and self.is_repo_error_output(output):
+      return True
+
+    return False
+
+  def _notify_about_handled_error(self, output, is_last_time):
+    if is_last_time:
+      return
+
+    if self.resource.retry_on_locked and self.is_locked_output(output):
+      Logger.info(PACKAGE_MANAGER_LOCK_ACQUIRED_MSG.format(self.resource.retry_sleep, str(output)))
+    elif self.resource.retry_on_repo_unavailability and self.is_repo_error_output(output):
+      Logger.info(PACKAGE_MANAGER_REPO_ERROR_MSG.format(self.resource.retry_sleep, str(output)))
+
   def yum_check_package_available(self, name):
     """
     Does the same as rpm_check_package_avaiable, but faster.

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-common/src/main/python/resource_management/core/providers/package/apt.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/core/providers/package/apt.py b/ambari-common/src/main/python/resource_management/core/providers/package/apt.py
index ea8ad98..476e39b 100644
--- a/ambari-common/src/main/python/resource_management/core/providers/package/apt.py
+++ b/ambari-common/src/main/python/resource_management/core/providers/package/apt.py
@@ -78,7 +78,7 @@ class AptProvider(PackageProvider):
 
       cmd = cmd + [name]
       Logger.info("Installing package %s ('%s')" % (name, string_cmd_from_args_list(cmd)))
-      code, out = self.call_until_not_locked(cmd, sudo=True, env=INSTALL_CMD_ENV, logoutput=self.get_logoutput())
+      code, out = self.call_with_retries(cmd, sudo=True, env=INSTALL_CMD_ENV, logoutput=self.get_logoutput())
       
       if self.is_locked_output(out):
         err_msg = Logger.filter_text("Execution of '%s' returned %d. %s" % (cmd, code, out))
@@ -88,13 +88,13 @@ class AptProvider(PackageProvider):
       if code:
         Logger.info("Execution of '%s' returned %d. %s" % (cmd, code, out))
         Logger.info("Failed to install package %s. Executing `%s`" % (name, string_cmd_from_args_list(REPO_UPDATE_CMD)))
-        code, out = self.call_until_not_locked(REPO_UPDATE_CMD, sudo=True, logoutput=self.get_logoutput())
+        code, out = self.call_with_retries(REPO_UPDATE_CMD, sudo=True, logoutput=self.get_logoutput())
         
         if code:
           Logger.info("Execution of '%s' returned %d. %s" % (REPO_UPDATE_CMD, code, out))
           
         Logger.info("Retrying to install package %s" % (name))
-        self.checked_call_until_not_locked(cmd, sudo=True, env=INSTALL_CMD_ENV, logoutput=self.get_logoutput())
+        self.checked_call_with_retries(cmd, sudo=True, env=INSTALL_CMD_ENV, logoutput=self.get_logoutput())
 
       if is_tmp_dir_created:
         for temporal_sources_file in copied_sources_files:
@@ -108,6 +108,9 @@ class AptProvider(PackageProvider):
   def is_locked_output(self, out):
     return "Unable to lock the administration directory" in out
 
+  def is_repo_error_output(self, out):
+    return "Failure when receiving data from the peer" in out
+
   @replace_underscores
   def upgrade_package(self, name, use_repos=[], skip_repos=[], is_upgrade=True):
     return self.install_package(name, use_repos, skip_repos, is_upgrade)
@@ -117,7 +120,7 @@ class AptProvider(PackageProvider):
     if self._check_existence(name):
       cmd = REMOVE_CMD[self.get_logoutput()] + [name]
       Logger.info("Removing package %s ('%s')" % (name, string_cmd_from_args_list(cmd)))
-      self.checked_call_until_not_locked(cmd, sudo=True, logoutput=self.get_logoutput())
+      self.checked_call_with_retries(cmd, sudo=True, logoutput=self.get_logoutput())
     else:
       Logger.info("Skipping removal of non-existing package %s" % (name))
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-common/src/main/python/resource_management/core/providers/package/yumrpm.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/core/providers/package/yumrpm.py b/ambari-common/src/main/python/resource_management/core/providers/package/yumrpm.py
index ea86395..0739f66 100644
--- a/ambari-common/src/main/python/resource_management/core/providers/package/yumrpm.py
+++ b/ambari-common/src/main/python/resource_management/core/providers/package/yumrpm.py
@@ -46,7 +46,7 @@ class YumProvider(PackageProvider):
         cmd = cmd + [disable_repo_option, enable_repo_option]
       cmd = cmd + [name]
       Logger.info("Installing package %s ('%s')" % (name, string_cmd_from_args_list(cmd)))
-      shell.checked_call(cmd, sudo=True, logoutput=self.get_logoutput())
+      self.checked_call_with_retries(cmd, sudo=True, logoutput=self.get_logoutput())
     else:
       Logger.info("Skipping installation of existing package %s" % (name))
 
@@ -61,6 +61,10 @@ class YumProvider(PackageProvider):
     else:
       Logger.info("Skipping removal of non-existing package %s" % (name))
 
+  def is_repo_error_output(self, out):
+    return "Failure when receiving data from the peer" in out or \
+           "No more mirrors to try" in out
+
   def _check_existence(self, name):
     """
     For regexp names:

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-common/src/main/python/resource_management/core/providers/package/zypper.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/core/providers/package/zypper.py b/ambari-common/src/main/python/resource_management/core/providers/package/zypper.py
index d0f3198..2d00b0d 100644
--- a/ambari-common/src/main/python/resource_management/core/providers/package/zypper.py
+++ b/ambari-common/src/main/python/resource_management/core/providers/package/zypper.py
@@ -54,7 +54,7 @@ class ZypperProvider(PackageProvider):
 
       cmd = cmd + [name]
       Logger.info("Installing package %s ('%s')" % (name, string_cmd_from_args_list(cmd)))
-      self.checked_call_until_not_locked(cmd, sudo=True, logoutput=self.get_logoutput())
+      self.checked_call_with_retries(cmd, sudo=True, logoutput=self.get_logoutput())
     else:
       Logger.info("Skipping installation of existing package %s" % (name))
 
@@ -65,12 +65,12 @@ class ZypperProvider(PackageProvider):
     if self._check_existence(name):
       cmd = REMOVE_CMD[self.get_logoutput()] + [name]
       Logger.info("Removing package %s ('%s')" % (name, string_cmd_from_args_list(cmd)))
-      self.checked_call_until_not_locked(cmd, sudo=True, logoutput=self.get_logoutput())
+      self.checked_call_with_retries(cmd, sudo=True, logoutput=self.get_logoutput())
     else:
       Logger.info("Skipping removal of non-existing package %s" % (name))
       
   def get_active_base_repos(self):
-    (code, output) = self.call_until_not_locked(LIST_ACTIVE_REPOS_CMD)
+    (code, output) = self.call_with_retries(LIST_ACTIVE_REPOS_CMD)
     enabled_repos = []
     if not code:
       for line in output.split('\n')[2:]:
@@ -81,9 +81,12 @@ class ZypperProvider(PackageProvider):
           return [line_list[1].strip()]
     return enabled_repos
       
-  def is_locked_output(self ,out):
+  def is_locked_output(self, out):
     return "System management is locked by the application" in out
 
+  def is_repo_error_output(self, out):
+    return "Failure when receiving data from the peer" in out
+
   def _check_existence(self, name):
     """
     For regexp names:

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-common/src/main/python/resource_management/core/resources/packaging.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/core/resources/packaging.py b/ambari-common/src/main/python/resource_management/core/resources/packaging.py
index bb0aa56..e3adc30 100644
--- a/ambari-common/src/main/python/resource_management/core/resources/packaging.py
+++ b/ambari-common/src/main/python/resource_management/core/resources/packaging.py
@@ -22,7 +22,7 @@ Ambari Agent
 
 __all__ = ["Package"]
 
-from resource_management.core.base import Resource, ForcedListArgument, ResourceArgument
+from resource_management.core.base import Resource, ForcedListArgument, ResourceArgument, BooleanArgument
 
 
 class Package(Resource):
@@ -41,11 +41,13 @@ class Package(Resource):
   logoutput = ResourceArgument(default=None)
   
   """
-  Retry if package manager is locked. (usually another process is running).
-  Note that this works only for apt-get and zypper, while yum manages lock retries itself.
+  Retry if package manager is locked or unavailable.
+  Note that retry_on_lock works only for apt-get and zypper, while yum manages lock retries itself.
   """
-  locked_tries = ResourceArgument(default=8)
-  locked_try_sleep = ResourceArgument(default=30) # seconds
+  retry_count = ResourceArgument(default=4)
+  retry_sleep = ResourceArgument(default=30)
+  retry_on_repo_unavailability = BooleanArgument(default=False)
+  retry_on_locked = BooleanArgument(default=True)
 
   version = ResourceArgument()
   actions = ["install", "upgrade", "remove"]

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-common/src/main/python/resource_management/libraries/script/script.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/script/script.py b/ambari-common/src/main/python/resource_management/libraries/script/script.py
index 3bef342..5e76562 100644
--- a/ambari-common/src/main/python/resource_management/libraries/script/script.py
+++ b/ambari-common/src/main/python/resource_management/libraries/script/script.py
@@ -432,6 +432,7 @@ class Script(object):
     NOTE: regexes don't have Python syntax, but simple package regexes which support only * and .* and ?
     """
     config = self.get_config()
+
     if 'host_sys_prepped' in config['hostLevelParams']:
       # do not install anything on sys-prepped host
       if config['hostLevelParams']['host_sys_prepped'] == True:
@@ -440,6 +441,9 @@ class Script(object):
       pass
     try:
       package_list_str = config['hostLevelParams']['package_list']
+      agent_stack_retry_on_unavailability = bool(config['hostLevelParams']['agent_stack_retry_on_unavailability'])
+      agent_stack_retry_count = int(config['hostLevelParams']['agent_stack_retry_count'])
+
       if isinstance(package_list_str, basestring) and len(package_list_str) > 0:
         package_list = json.loads(package_list_str)
         for package in package_list:
@@ -452,7 +456,9 @@ class Script(object):
               if "ambari-metrics" in name:
                 Package(name)
             else:
-              Package(name)
+              Package(name,
+                      retry_on_repo_unavailability=agent_stack_retry_on_unavailability,
+                      retry_count=agent_stack_retry_count)
     except KeyError:
       pass  # No reason to worry
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/conf/unix/ambari.properties
----------------------------------------------------------------------
diff --git a/ambari-server/conf/unix/ambari.properties b/ambari-server/conf/unix/ambari.properties
index ba5090c..92dec24 100644
--- a/ambari-server/conf/unix/ambari.properties
+++ b/ambari-server/conf/unix/ambari.properties
@@ -75,6 +75,11 @@ agent.task.timeout=900
 # Default timeout in seconds before package installation task is killed
 agent.package.install.task.timeout=1800
 
+# Enables package installation retry on repository unavailability error
+agent.stack.retry.on_repo_unavailability=false
+# Default count of tries
+agent.stack.retry.tries=5
+
 # Default timeout in seconds before a server-side task is killed
 server.task.timeout=1200
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
index a540a36..788e9c3 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/ExecutionCommand.java
@@ -371,6 +371,8 @@ public class ExecutionCommand extends AgentCommand {
     String HOST_SYS_PREPPED = "host_sys_prepped";
     String MAX_DURATION_OF_RETRIES = "max_duration_for_retries";
     String COMMAND_RETRY_ENABLED = "command_retry_enabled";
+    String AGENT_STACK_RETRY_ON_UNAVAILABILITY = "agent_stack_retry_on_unavailability";
+    String AGENT_STACK_RETRY_COUNT = "agent_stack_retry_count";
     /**
      * Comma separated list of config-types whose tags have be refreshed
      * at runtime before being executed. If all config-type tags have to be

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java
index 378e123..a902a2c 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java
@@ -33,7 +33,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.TreeMap;
 
-import com.google.inject.Inject;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.RoleCommand;
 import org.apache.ambari.server.actionmanager.ActionManager;
@@ -341,7 +340,6 @@ public class HeartbeatMonitor implements Runnable {
     hostLevelParams.put(STACK_NAME, stackId.getStackName());
     hostLevelParams.put(STACK_VERSION, stackId.getStackVersion());
 
-
     if (statusCmd.getPayloadLevel() == StatusCommand.StatusCommandPayload.EXECUTION_COMMAND) {
       ExecutionCommand ec = ambariManagementController.getExecutionCommand(cluster, sch, RoleCommand.START);
       statusCmd.setExecutionCommand(ec);

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
index 17fb42d..92d4f47 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
@@ -591,8 +591,16 @@ public class Configuration {
    */
   private static final int VERSION_DEFINITION_READ_TIMEOUT_DEFAULT = 5000;
 
+  /**
+   * For Agent Stack Install retry configuration
+   */
+  public static final String AGENT_STACK_RETRY_ON_REPO_UNAVAILABILITY_KEY = "agent.stack.retry.on_repo_unavailability";
+  public static final String AGENT_STACK_RETRY_ON_REPO_UNAVAILABILITY_DEFAULT = "false";
+  public static final String AGENT_STACK_RETRY_COUNT_KEY = "agent.stack.retry.tries";
+  public static final String AGENT_STACK_RETRY_COUNT_DEFAULT = "5";
+
   private static final Logger LOG = LoggerFactory.getLogger(
-      Configuration.class);
+    Configuration.class);
 
   private Properties properties;
   private JsonObject hostChangesJson;
@@ -625,7 +633,7 @@ public class Configuration {
     SQL_ANYWHERE("sqlanywhere");
 
     private static final Map<String, DatabaseType> m_mappedTypes =
-        new HashMap<String, Configuration.DatabaseType>(5);
+      new HashMap<String, Configuration.DatabaseType>(5);
 
     static {
       for (DatabaseType databaseType : EnumSet.allOf(DatabaseType.class)) {
@@ -711,59 +719,59 @@ public class Configuration {
       CHECK_MOUNTS_TIMEOUT_KEY, CHECK_MOUNTS_TIMEOUT_DEFAULT));
 
     agentConfigsMap.put(ENABLE_AUTO_AGENT_CACHE_UPDATE_KEY, properties.getProperty(
-        ENABLE_AUTO_AGENT_CACHE_UPDATE_KEY, ENABLE_AUTO_AGENT_CACHE_UPDATE_DEFAULT));
+      ENABLE_AUTO_AGENT_CACHE_UPDATE_KEY, ENABLE_AUTO_AGENT_CACHE_UPDATE_DEFAULT));
 
     configsMap = new HashMap<String, String>();
     configsMap.putAll(agentConfigsMap);
     configsMap.put(AMBARI_PYTHON_WRAP_KEY, properties.getProperty(
-        AMBARI_PYTHON_WRAP_KEY, AMBARI_PYTHON_WRAP_DEFAULT));
+      AMBARI_PYTHON_WRAP_KEY, AMBARI_PYTHON_WRAP_DEFAULT));
     configsMap.put(SRVR_TWO_WAY_SSL_KEY, properties.getProperty(
-        SRVR_TWO_WAY_SSL_KEY, SRVR_TWO_WAY_SSL_DEFAULT));
+      SRVR_TWO_WAY_SSL_KEY, SRVR_TWO_WAY_SSL_DEFAULT));
     configsMap.put(SRVR_TWO_WAY_SSL_PORT_KEY, properties.getProperty(
-        SRVR_TWO_WAY_SSL_PORT_KEY, SRVR_TWO_WAY_SSL_PORT_DEFAULT));
+      SRVR_TWO_WAY_SSL_PORT_KEY, SRVR_TWO_WAY_SSL_PORT_DEFAULT));
     configsMap.put(SRVR_ONE_WAY_SSL_PORT_KEY, properties.getProperty(
-        SRVR_ONE_WAY_SSL_PORT_KEY, SRVR_ONE_WAY_SSL_PORT_DEFAULT));
+      SRVR_ONE_WAY_SSL_PORT_KEY, SRVR_ONE_WAY_SSL_PORT_DEFAULT));
     configsMap.put(SRVR_KSTR_DIR_KEY, properties.getProperty(
-        SRVR_KSTR_DIR_KEY, SRVR_KSTR_DIR_DEFAULT));
+      SRVR_KSTR_DIR_KEY, SRVR_KSTR_DIR_DEFAULT));
     configsMap.put(SRVR_CRT_NAME_KEY, properties.getProperty(
-        SRVR_CRT_NAME_KEY, SRVR_CRT_NAME_DEFAULT));
+      SRVR_CRT_NAME_KEY, SRVR_CRT_NAME_DEFAULT));
     configsMap.put(SRVR_KEY_NAME_KEY, properties.getProperty(
       SRVR_KEY_NAME_KEY, SRVR_KEY_NAME_DEFAULT));
     configsMap.put(SRVR_CSR_NAME_KEY, properties.getProperty(
       SRVR_CSR_NAME_KEY, SRVR_CSR_NAME_DEFAULT));
     configsMap.put(KSTR_NAME_KEY, properties.getProperty(
-        KSTR_NAME_KEY, KSTR_NAME_DEFAULT));
+      KSTR_NAME_KEY, KSTR_NAME_DEFAULT));
     configsMap.put(KSTR_TYPE_KEY, properties.getProperty(
-        KSTR_TYPE_KEY, KSTR_TYPE_DEFAULT));
+      KSTR_TYPE_KEY, KSTR_TYPE_DEFAULT));
     configsMap.put(TSTR_NAME_KEY, properties.getProperty(
-        TSTR_NAME_KEY, TSTR_NAME_DEFAULT));
+      TSTR_NAME_KEY, TSTR_NAME_DEFAULT));
     configsMap.put(TSTR_TYPE_KEY, properties.getProperty(
-        TSTR_TYPE_KEY, TSTR_TYPE_DEFAULT));
+      TSTR_TYPE_KEY, TSTR_TYPE_DEFAULT));
     configsMap.put(SRVR_CRT_PASS_FILE_KEY, properties.getProperty(
-        SRVR_CRT_PASS_FILE_KEY, SRVR_CRT_PASS_FILE_DEFAULT));
+      SRVR_CRT_PASS_FILE_KEY, SRVR_CRT_PASS_FILE_DEFAULT));
     configsMap.put(PASSPHRASE_ENV_KEY, properties.getProperty(
-        PASSPHRASE_ENV_KEY, PASSPHRASE_ENV_DEFAULT));
+      PASSPHRASE_ENV_KEY, PASSPHRASE_ENV_DEFAULT));
     configsMap.put(PASSPHRASE_KEY, System.getenv(configsMap.get(
-        PASSPHRASE_ENV_KEY)));
+      PASSPHRASE_ENV_KEY)));
     configsMap.put(RESOURCES_DIR_KEY, properties.getProperty(
-        RESOURCES_DIR_KEY, RESOURCES_DIR_DEFAULT));
+      RESOURCES_DIR_KEY, RESOURCES_DIR_DEFAULT));
     configsMap.put(SRVR_CRT_PASS_LEN_KEY, properties.getProperty(
-        SRVR_CRT_PASS_LEN_KEY, SRVR_CRT_PASS_LEN_DEFAULT));
+      SRVR_CRT_PASS_LEN_KEY, SRVR_CRT_PASS_LEN_DEFAULT));
     configsMap.put(SRVR_DISABLED_CIPHERS, properties.getProperty(
-        SRVR_DISABLED_CIPHERS, SRVR_DISABLED_CIPHERS_DEFAULT));
+      SRVR_DISABLED_CIPHERS, SRVR_DISABLED_CIPHERS_DEFAULT));
     configsMap.put(SRVR_DISABLED_PROTOCOLS, properties.getProperty(
-        SRVR_DISABLED_PROTOCOLS, SRVR_DISABLED_PROTOCOLS_DEFAULT));
+      SRVR_DISABLED_PROTOCOLS, SRVR_DISABLED_PROTOCOLS_DEFAULT));
 
     configsMap.put(CLIENT_API_SSL_KSTR_DIR_NAME_KEY, properties.getProperty(
       CLIENT_API_SSL_KSTR_DIR_NAME_KEY, configsMap.get(SRVR_KSTR_DIR_KEY)));
     configsMap.put(CLIENT_API_SSL_KSTR_NAME_KEY, properties.getProperty(
       CLIENT_API_SSL_KSTR_NAME_KEY, CLIENT_API_SSL_KSTR_NAME_DEFAULT));
     configsMap.put(CLIENT_API_SSL_KSTR_TYPE_KEY, properties.getProperty(
-        CLIENT_API_SSL_KSTR_TYPE_KEY, CLIENT_API_SSL_KSTR_TYPE_DEFAULT));
+      CLIENT_API_SSL_KSTR_TYPE_KEY, CLIENT_API_SSL_KSTR_TYPE_DEFAULT));
     configsMap.put(CLIENT_API_SSL_TSTR_NAME_KEY, properties.getProperty(
-        CLIENT_API_SSL_TSTR_NAME_KEY, CLIENT_API_SSL_TSTR_NAME_DEFAULT));
+      CLIENT_API_SSL_TSTR_NAME_KEY, CLIENT_API_SSL_TSTR_NAME_DEFAULT));
     configsMap.put(CLIENT_API_SSL_TSTR_TYPE_KEY, properties.getProperty(
-        CLIENT_API_SSL_TSTR_TYPE_KEY, CLIENT_API_SSL_TSTR_TYPE_DEFAULT));
+      CLIENT_API_SSL_TSTR_TYPE_KEY, CLIENT_API_SSL_TSTR_TYPE_DEFAULT));
     configsMap.put(CLIENT_API_SSL_CRT_PASS_FILE_NAME_KEY, properties.getProperty(
       CLIENT_API_SSL_CRT_PASS_FILE_NAME_KEY, CLIENT_API_SSL_CRT_PASS_FILE_NAME_DEFAULT));
     configsMap.put(CLIENT_API_SSL_KEY_NAME_KEY, properties.getProperty(
@@ -771,41 +779,41 @@ public class Configuration {
     configsMap.put(CLIENT_API_SSL_CRT_NAME_KEY, properties.getProperty(
       CLIENT_API_SSL_CRT_NAME_KEY, CLIENT_API_SSL_CRT_NAME_DEFAULT));
     configsMap.put(JAVA_HOME_KEY, properties.getProperty(
-        JAVA_HOME_KEY));
+      JAVA_HOME_KEY));
     configsMap.put(PARALLEL_STAGE_EXECUTION_KEY, properties.getProperty(
-            PARALLEL_STAGE_EXECUTION_KEY, PARALLEL_STAGE_EXECUTION_DEFAULT));
+      PARALLEL_STAGE_EXECUTION_KEY, PARALLEL_STAGE_EXECUTION_DEFAULT));
     configsMap.put(SERVER_TMP_DIR_KEY, properties.getProperty(
-            SERVER_TMP_DIR_KEY, SERVER_TMP_DIR_DEFAULT));
+      SERVER_TMP_DIR_KEY, SERVER_TMP_DIR_DEFAULT));
     configsMap.put(EXTERNAL_SCRIPT_TIMEOUT_KEY, properties.getProperty(
-            EXTERNAL_SCRIPT_TIMEOUT_KEY, EXTERNAL_SCRIPT_TIMEOUT_DEFAULT));
+      EXTERNAL_SCRIPT_TIMEOUT_KEY, EXTERNAL_SCRIPT_TIMEOUT_DEFAULT));
 
     configsMap.put(SHARED_RESOURCES_DIR_KEY, properties.getProperty(
-       SHARED_RESOURCES_DIR_KEY, SHARED_RESOURCES_DIR_DEFAULT));
+      SHARED_RESOURCES_DIR_KEY, SHARED_RESOURCES_DIR_DEFAULT));
 
     configsMap.put(KDC_PORT_KEY, properties.getProperty(
-        KDC_PORT_KEY, KDC_PORT_KEY_DEFAULT));
+      KDC_PORT_KEY, KDC_PORT_KEY_DEFAULT));
 
     configsMap.put(AGENT_PACKAGE_PARALLEL_COMMANDS_LIMIT_KEY, properties.getProperty(
-            AGENT_PACKAGE_PARALLEL_COMMANDS_LIMIT_KEY, AGENT_PACKAGE_PARALLEL_COMMANDS_LIMIT_DEFAULT));
+      AGENT_PACKAGE_PARALLEL_COMMANDS_LIMIT_KEY, AGENT_PACKAGE_PARALLEL_COMMANDS_LIMIT_DEFAULT));
     configsMap.put(PROXY_ALLOWED_HOST_PORTS, properties.getProperty(
-        PROXY_ALLOWED_HOST_PORTS, PROXY_ALLOWED_HOST_PORTS_DEFAULT));
+      PROXY_ALLOWED_HOST_PORTS, PROXY_ALLOWED_HOST_PORTS_DEFAULT));
 
     File passFile = new File(configsMap.get(SRVR_KSTR_DIR_KEY) + File.separator
-        + configsMap.get(SRVR_CRT_PASS_FILE_KEY));
+      + configsMap.get(SRVR_CRT_PASS_FILE_KEY));
     String password = null;
 
     if (!passFile.exists()) {
       LOG.info("Generation of file with password");
       try {
         password = RandomStringUtils.randomAlphanumeric(Integer
-            .parseInt(configsMap.get(SRVR_CRT_PASS_LEN_KEY)));
+          .parseInt(configsMap.get(SRVR_CRT_PASS_LEN_KEY)));
         FileUtils.writeStringToFile(passFile, password);
         ShellCommandUtil.setUnixFilePermissions(
-               ShellCommandUtil.MASK_OWNER_ONLY_RW, passFile.getAbsolutePath());
+          ShellCommandUtil.MASK_OWNER_ONLY_RW, passFile.getAbsolutePath());
       } catch (IOException e) {
         e.printStackTrace();
         throw new RuntimeException(
-            "Error reading certificate password from file");
+          "Error reading certificate password from file");
       }
     } else {
       LOG.info("Reading password from existing file");
@@ -884,12 +892,12 @@ public class Configuration {
     }
     if (properties.getProperty(SSL_TRUSTSTORE_PASSWORD_KEY) != null) {
       String ts_password = readPasswordFromStore(
-              properties.getProperty(SSL_TRUSTSTORE_PASSWORD_KEY));
+        properties.getProperty(SSL_TRUSTSTORE_PASSWORD_KEY));
       if (ts_password != null) {
         System.setProperty(JAVAX_SSL_TRUSTSTORE_PASSWORD, ts_password);
       } else {
         System.setProperty(JAVAX_SSL_TRUSTSTORE_PASSWORD,
-                properties.getProperty(SSL_TRUSTSTORE_PASSWORD_KEY));
+          properties.getProperty(SSL_TRUSTSTORE_PASSWORD_KEY));
       }
     }
     if (properties.getProperty(SSL_TRUSTSTORE_TYPE_KEY) != null) {
@@ -901,9 +909,9 @@ public class Configuration {
     if (!credentialProviderInitialized) {
       try {
         credentialProvider = new CredentialProvider(null,
-            getMasterKeyLocation(),
-            isMasterKeyPersisted(),
-            getMasterKeyStoreLocation());
+          getMasterKeyLocation(),
+          isMasterKeyPersisted(),
+          getMasterKeyStoreLocation());
       } catch (Exception e) {
         LOG.info("Credential provider creation failed. Reason: " + e.getMessage());
         if (LOG.isDebugEnabled()) {
@@ -937,7 +945,7 @@ public class Configuration {
       LOG.info("No configuration file " + CONFIG_FILE + " found in classpath.", fnf);
     } catch (IOException ie) {
       throw new IllegalArgumentException("Can't read configuration file " +
-          CONFIG_FILE, ie);
+        CONFIG_FILE, ie);
     }
 
     return properties;
@@ -1023,7 +1031,7 @@ public class Configuration {
 
   public String getBootSetupAgentScript() {
     return properties.getProperty(BOOTSTRAP_SETUP_AGENT_SCRIPT,
-        AmbariPath.getPath("/usr/lib/python2.6/site-packages/ambari_server/setupAgent.py"));
+      AmbariPath.getPath("/usr/lib/python2.6/site-packages/ambari_server/setupAgent.py"));
   }
 
   public String getBootSetupAgentPassword() {
@@ -1056,7 +1064,7 @@ public class Configuration {
    */
   public List<String> getRollingUpgradeSkipPackagesPrefixes() {
     String propertyValue = properties.getProperty(ROLLING_UPGRADE_SKIP_PACKAGES_PREFIXES_KEY,
-            ROLLING_UPGRADE_SKIP_PACKAGES_PREFIXES_DEFAULT);
+      ROLLING_UPGRADE_SKIP_PACKAGES_PREFIXES_DEFAULT);
     ArrayList<String> res = new ArrayList<>();
     for (String prefix : propertyValue.split(",")) {
       if (! prefix.isEmpty()) {
@@ -1114,7 +1122,7 @@ public class Configuration {
   }
 
   public void setLdap(String host, String userClass, String userNameAttr, String groupClass, String groupName, String groupMember,
-      String baseDN, boolean anon, String managerDN, String managerPass) {
+                      String baseDN, boolean anon, String managerDN, String managerPass) {
     properties.setProperty(LDAP_PRIMARY_URL_KEY, host);
     properties.setProperty(LDAP_USER_OBJECT_CLASS_KEY, userClass);
     properties.setProperty(LDAP_USERNAME_ATTRIBUTE_KEY, userNameAttr);
@@ -1192,7 +1200,7 @@ public class Configuration {
    */
   public int getClientSSLApiPort() {
     return Integer.parseInt(properties.getProperty(CLIENT_API_SSL_PORT_KEY,
-                                                   String.valueOf(CLIENT_API_SSL_PORT_DEFAULT)));
+      String.valueOf(CLIENT_API_SSL_PORT_DEFAULT)));
   }
 
   /**
@@ -1322,8 +1330,8 @@ public class Configuration {
    */
   public boolean isApiGzipped() {
     return "true".equalsIgnoreCase(properties.getProperty(
-        API_GZIP_COMPRESSION_ENABLED_KEY,
-        API_GZIP_COMPRESSION_ENABLED_DEFAULT));
+      API_GZIP_COMPRESSION_ENABLED_KEY,
+      API_GZIP_COMPRESSION_ENABLED_DEFAULT));
   }
 
   /**
@@ -1474,16 +1482,16 @@ public class Configuration {
     LdapServerProperties ldapServerProperties = new LdapServerProperties();
 
     ldapServerProperties.setPrimaryUrl(properties.getProperty(
-        LDAP_PRIMARY_URL_KEY, LDAP_PRIMARY_URL_DEFAULT));
+      LDAP_PRIMARY_URL_KEY, LDAP_PRIMARY_URL_DEFAULT));
     ldapServerProperties.setSecondaryUrl(properties.getProperty(
-        LDAP_SECONDARY_URL_KEY));
+      LDAP_SECONDARY_URL_KEY));
     ldapServerProperties.setUseSsl("true".equalsIgnoreCase(properties.
-        getProperty(LDAP_USE_SSL_KEY)));
+      getProperty(LDAP_USE_SSL_KEY)));
     ldapServerProperties.setAnonymousBind("true".
-        equalsIgnoreCase(properties.getProperty(LDAP_BIND_ANONYMOUSLY_KEY,
-            LDAP_BIND_ANONYMOUSLY_DEFAULT)));
+      equalsIgnoreCase(properties.getProperty(LDAP_BIND_ANONYMOUSLY_KEY,
+        LDAP_BIND_ANONYMOUSLY_DEFAULT)));
     ldapServerProperties.setManagerDn(properties.getProperty(
-        LDAP_MANAGER_DN_KEY));
+      LDAP_MANAGER_DN_KEY));
     String ldapPasswordProperty = properties.getProperty(LDAP_MANAGER_PASSWORD_KEY);
     String ldapPassword = null;
     if (CredentialProvider.isAliasString(ldapPasswordProperty)) {
@@ -1497,9 +1505,9 @@ public class Configuration {
       }
     }
     ldapServerProperties.setBaseDN(properties.getProperty
-        (LDAP_BASE_DN_KEY, LDAP_BASE_DN_DEFAULT));
+      (LDAP_BASE_DN_KEY, LDAP_BASE_DN_DEFAULT));
     ldapServerProperties.setUsernameAttribute(properties.
-        getProperty(LDAP_USERNAME_ATTRIBUTE_KEY, LDAP_USERNAME_ATTRIBUTE_DEFAULT));
+      getProperty(LDAP_USERNAME_ATTRIBUTE_KEY, LDAP_USERNAME_ATTRIBUTE_DEFAULT));
 
     ldapServerProperties.setUserBase(properties.getProperty(
       LDAP_USER_BASE_KEY, LDAP_USER_BASE_DEFAULT));
@@ -1509,28 +1517,28 @@ public class Configuration {
       LDAP_DN_ATTRIBUTE_KEY, LDAP_DN_ATTRIBUTE_DEFAULT));
 
     ldapServerProperties.setGroupBase(properties.
-        getProperty(LDAP_GROUP_BASE_KEY, LDAP_GROUP_BASE_DEFAULT));
+      getProperty(LDAP_GROUP_BASE_KEY, LDAP_GROUP_BASE_DEFAULT));
     ldapServerProperties.setGroupObjectClass(properties.
-        getProperty(LDAP_GROUP_OBJECT_CLASS_KEY, LDAP_GROUP_OBJECT_CLASS_DEFAULT));
+      getProperty(LDAP_GROUP_OBJECT_CLASS_KEY, LDAP_GROUP_OBJECT_CLASS_DEFAULT));
     ldapServerProperties.setGroupMembershipAttr(properties.getProperty(
-        LDAP_GROUP_MEMEBERSHIP_ATTR_KEY, LDAP_GROUP_MEMBERSHIP_ATTR_DEFAULT));
+      LDAP_GROUP_MEMEBERSHIP_ATTR_KEY, LDAP_GROUP_MEMBERSHIP_ATTR_DEFAULT));
     ldapServerProperties.setGroupNamingAttr(properties.
-        getProperty(LDAP_GROUP_NAMING_ATTR_KEY, LDAP_GROUP_NAMING_ATTR_DEFAULT));
+      getProperty(LDAP_GROUP_NAMING_ATTR_KEY, LDAP_GROUP_NAMING_ATTR_DEFAULT));
     ldapServerProperties.setAdminGroupMappingRules(properties.getProperty(
-        LDAP_ADMIN_GROUP_MAPPING_RULES_KEY, LDAP_ADMIN_GROUP_MAPPING_RULES_DEFAULT));
+      LDAP_ADMIN_GROUP_MAPPING_RULES_KEY, LDAP_ADMIN_GROUP_MAPPING_RULES_DEFAULT));
     ldapServerProperties.setGroupSearchFilter(properties.getProperty(
-        LDAP_GROUP_SEARCH_FILTER_KEY, LDAP_GROUP_SEARCH_FILTER_DEFAULT));
+      LDAP_GROUP_SEARCH_FILTER_KEY, LDAP_GROUP_SEARCH_FILTER_DEFAULT));
     ldapServerProperties.setReferralMethod(properties.getProperty(
       LDAP_REFERRAL_KEY, LDAP_REFERRAL_DEFAULT));
     ldapServerProperties.setPaginationEnabled("true".equalsIgnoreCase(
       properties.getProperty(LDAP_PAGINATION_ENABLED_KEY, LDAP_PAGINATION_ENABLED_DEFAULT)));
 
     if (properties.containsKey(LDAP_GROUP_BASE_KEY) ||
-        properties.containsKey(LDAP_GROUP_OBJECT_CLASS_KEY) ||
-        properties.containsKey(LDAP_GROUP_MEMEBERSHIP_ATTR_KEY) ||
-        properties.containsKey(LDAP_GROUP_NAMING_ATTR_KEY) ||
-        properties.containsKey(LDAP_ADMIN_GROUP_MAPPING_RULES_KEY) ||
-        properties.containsKey(LDAP_GROUP_SEARCH_FILTER_KEY)) {
+      properties.containsKey(LDAP_GROUP_OBJECT_CLASS_KEY) ||
+      properties.containsKey(LDAP_GROUP_MEMEBERSHIP_ATTR_KEY) ||
+      properties.containsKey(LDAP_GROUP_NAMING_ATTR_KEY) ||
+      properties.containsKey(LDAP_ADMIN_GROUP_MAPPING_RULES_KEY) ||
+      properties.containsKey(LDAP_GROUP_SEARCH_FILTER_KEY)) {
       ldapServerProperties.setGroupMappingEnabled(true);
     }
 
@@ -1558,7 +1566,7 @@ public class Configuration {
   }
 
   public String getOjdbcJarName() {
-	return properties.getProperty(OJDBC_JAR_NAME_KEY, OJDBC_JAR_NAME_DEFAULT);
+    return properties.getProperty(OJDBC_JAR_NAME_KEY, OJDBC_JAR_NAME_DEFAULT);
   }
 
   public String getJavaHome() {
@@ -1574,11 +1582,11 @@ public class Configuration {
   }
 
   public String getServerDBName() {
-	return properties.getProperty(SERVER_DB_NAME_KEY, SERVER_DB_NAME_DEFAULT);
+    return properties.getProperty(SERVER_DB_NAME_KEY, SERVER_DB_NAME_DEFAULT);
   }
 
   public String getMySQLJarName() {
-	return properties.getProperty(MYSQL_JAR_NAME_KEY, MYSQL_JAR_NAME_DEFAULT);
+    return properties.getProperty(MYSQL_JAR_NAME_KEY, MYSQL_JAR_NAME_DEFAULT);
   }
 
   public JPATableGenerationStrategy getJPATableGenerationStrategy() {
@@ -1606,8 +1614,8 @@ public class Configuration {
   public File getServerKeyStoreDirectory() {
     String path = properties.getProperty(SRVR_KSTR_DIR_KEY, SRVR_KSTR_DIR_DEFAULT);
     return ((path == null) || path.isEmpty())
-        ? new File(".")
-        : new File(path);
+      ? new File(".")
+      : new File(path);
   }
 
   /**
@@ -1684,7 +1692,7 @@ public class Configuration {
 
     if(StringUtils.isEmpty(value)) {
       LOG.debug("Value of {} is not set, using default value ({})",
-          TEMPORARY_KEYSTORE_RETENTION_MINUTES, TEMPORARY_KEYSTORE_RETENTION_MINUTES_DEFAULT);
+        TEMPORARY_KEYSTORE_RETENTION_MINUTES, TEMPORARY_KEYSTORE_RETENTION_MINUTES_DEFAULT);
       minutes = TEMPORARY_KEYSTORE_RETENTION_MINUTES_DEFAULT;
     }
     else {
@@ -1693,7 +1701,7 @@ public class Configuration {
         LOG.debug("Value of {} is {}", TEMPORARY_KEYSTORE_RETENTION_MINUTES, value);
       } catch (NumberFormatException e) {
         LOG.warn("Value of {} ({}) should be a number, falling back to default value ({})",
-            TEMPORARY_KEYSTORE_RETENTION_MINUTES, value, TEMPORARY_KEYSTORE_RETENTION_MINUTES_DEFAULT);
+          TEMPORARY_KEYSTORE_RETENTION_MINUTES, value, TEMPORARY_KEYSTORE_RETENTION_MINUTES_DEFAULT);
         minutes = TEMPORARY_KEYSTORE_RETENTION_MINUTES_DEFAULT;
       }
     }
@@ -1715,7 +1723,7 @@ public class Configuration {
 
     if (StringUtils.isEmpty(value)) {
       LOG.debug("Value of {} is not set, using default value ({})",
-          TEMPORARY_KEYSTORE_ACTIVELY_PURGE, TEMPORARY_KEYSTORE_ACTIVELY_PURGE_DEFAULT);
+        TEMPORARY_KEYSTORE_ACTIVELY_PURGE, TEMPORARY_KEYSTORE_ACTIVELY_PURGE_DEFAULT);
       return TEMPORARY_KEYSTORE_ACTIVELY_PURGE_DEFAULT;
     } else if ("true".equalsIgnoreCase(value)) {
       LOG.debug("Value of {} is {}", TEMPORARY_KEYSTORE_ACTIVELY_PURGE, value);
@@ -1725,22 +1733,22 @@ public class Configuration {
       return false;
     } else {
       LOG.warn("Value of {} should be either \"true\" or \"false\" but is \"{}\", falling back to default value ({})",
-          TEMPORARY_KEYSTORE_ACTIVELY_PURGE, value, TEMPORARY_KEYSTORE_ACTIVELY_PURGE_DEFAULT);
+        TEMPORARY_KEYSTORE_ACTIVELY_PURGE, value, TEMPORARY_KEYSTORE_ACTIVELY_PURGE_DEFAULT);
       return TEMPORARY_KEYSTORE_ACTIVELY_PURGE_DEFAULT;
     }
   }
 
   public String getSrvrDisabledCiphers() {
     String disabledCiphers = properties.getProperty(SRVR_DISABLED_CIPHERS,
-                                                    properties.getProperty(SRVR_DISABLED_CIPHERS,
-                                                                           SRVR_DISABLED_CIPHERS_DEFAULT));
+      properties.getProperty(SRVR_DISABLED_CIPHERS,
+        SRVR_DISABLED_CIPHERS_DEFAULT));
     return disabledCiphers.trim();
   }
 
   public String getSrvrDisabledProtocols() {
     String disabledProtocols = properties.getProperty(SRVR_DISABLED_PROTOCOLS,
-                                                      properties.getProperty(SRVR_DISABLED_PROTOCOLS,
-                                                                             SRVR_DISABLED_PROTOCOLS_DEFAULT));
+      properties.getProperty(SRVR_DISABLED_PROTOCOLS,
+        SRVR_DISABLED_PROTOCOLS_DEFAULT));
     return disabledProtocols.trim();
   }
 
@@ -1751,7 +1759,7 @@ public class Configuration {
 
   public int getTwoWayAuthPort() {
     return Integer.parseInt(properties.getProperty(SRVR_TWO_WAY_SSL_PORT_KEY,
-                                                   String.valueOf(SRVR_TWO_WAY_SSL_PORT_DEFAULT)));
+      String.valueOf(SRVR_TWO_WAY_SSL_PORT_DEFAULT)));
   }
 
   /**
@@ -1887,10 +1895,10 @@ public class Configuration {
 
     if(osFamily.isUbuntuFamily(osType)) {
       repoSuffixes = properties.getProperty(REPO_SUFFIX_KEY_UBUNTU,
-          REPO_SUFFIX_UBUNTU);
+        REPO_SUFFIX_UBUNTU);
     } else {
       repoSuffixes = properties.getProperty(REPO_SUFFIX_KEY_DEFAULT,
-          REPO_SUFFIX_DEFAULT);
+        REPO_SUFFIX_DEFAULT);
     }
 
     return repoSuffixes.split(",");
@@ -1903,7 +1911,7 @@ public class Configuration {
 
   public String getExecutionSchedulerThreads() {
     return properties.getProperty(EXECUTION_SCHEDULER_THREADS_KEY,
-                                  DEFAULT_SCHEDULER_THREAD_COUNT);
+      DEFAULT_SCHEDULER_THREAD_COUNT);
   }
 
   public Integer getRequestReadTimeout() {
@@ -1913,7 +1921,7 @@ public class Configuration {
 
   public Integer getRequestConnectTimeout() {
     return Integer.parseInt(properties.getProperty(REQUEST_CONNECT_TIMEOUT,
-                                                   REQUEST_CONNECT_TIMEOUT_DEFAULT));
+      REQUEST_CONNECT_TIMEOUT_DEFAULT));
   }
 
   public String getExecutionSchedulerConnections() {
@@ -1930,7 +1938,7 @@ public class Configuration {
 
   public Integer getExecutionSchedulerStartDelay() {
     String delay = properties.getProperty(EXECUTION_SCHEDULER_START_DELAY_KEY,
-                                          DEFAULT_SCHEDULER_START_DELAY_SECONDS);
+      DEFAULT_SCHEDULER_START_DELAY_SECONDS);
     return Integer.parseInt(delay);
   }
 
@@ -1944,7 +1952,7 @@ public class Configuration {
         sleepTime = Long.valueOf(stringValue);
       } catch (NumberFormatException ignored) {
         LOG.warn("Value of {} ({}) should be a number, " +
-          "falling back to default value ({})", EXECUTION_SCHEDULER_WAIT_KEY,
+            "falling back to default value ({})", EXECUTION_SCHEDULER_WAIT_KEY,
           stringValue, DEFAULT_EXECUTION_SCHEDULER_WAIT_SECONDS);
       }
 
@@ -1969,13 +1977,13 @@ public class Configuration {
 
   public String getCustomActionDefinitionPath() {
     return properties.getProperty(CUSTOM_ACTION_DEFINITION_KEY,
-                                  CUSTOM_ACTION_DEFINITION_DEF_VALUE);
+      CUSTOM_ACTION_DEFINITION_DEF_VALUE);
   }
 
   public int getAgentPackageParallelCommandsLimit() {
     int value = Integer.parseInt(properties.getProperty(
-            AGENT_PACKAGE_PARALLEL_COMMANDS_LIMIT_KEY,
-            AGENT_PACKAGE_PARALLEL_COMMANDS_LIMIT_DEFAULT));
+      AGENT_PACKAGE_PARALLEL_COMMANDS_LIMIT_KEY,
+      AGENT_PACKAGE_PARALLEL_COMMANDS_LIMIT_DEFAULT));
     if (value < 1) {
       value = 1;
     }
@@ -1996,7 +2004,7 @@ public class Configuration {
     } else {
       LOG.warn(String.format("Value of %s (%s) should be a number, " +
           "falling back to default value (%s)",
-          key, value, defaultValue));
+        key, value, defaultValue));
       return defaultValue;
     }
   }
@@ -2010,7 +2018,7 @@ public class Configuration {
       return Integer.parseInt(value);
     } else {
       LOG.warn("Value of {} ({}) should be a number, falling back to default value ({})",
-          SERVER_TASK_TIMEOUT_KEY, value, SERVER_TASK_TIMEOUT_DEFAULT);
+        SERVER_TASK_TIMEOUT_KEY, value, SERVER_TASK_TIMEOUT_DEFAULT);
       return Integer.parseInt(SERVER_TASK_TIMEOUT_DEFAULT);
     }
   }
@@ -2020,7 +2028,7 @@ public class Configuration {
   }
 
   public String getSharedResourcesDirPath(){
-      return properties.getProperty(SHARED_RESOURCES_DIR_KEY, SHARED_RESOURCES_DIR_DEFAULT);
+    return properties.getProperty(SHARED_RESOURCES_DIR_KEY, SHARED_RESOURCES_DIR_DEFAULT);
   }
 
   public String getServerJDBCPostgresSchemaName() {
@@ -2032,7 +2040,7 @@ public class Configuration {
    */
   public int getClientThreadPoolSize() {
     return Integer.parseInt(properties.getProperty(
-        CLIENT_THREADPOOL_SIZE_KEY, String.valueOf(CLIENT_THREADPOOL_SIZE_DEFAULT)));
+      CLIENT_THREADPOOL_SIZE_KEY, String.valueOf(CLIENT_THREADPOOL_SIZE_DEFAULT)));
   }
 
   /**
@@ -2040,7 +2048,7 @@ public class Configuration {
    */
   public int getAgentThreadPoolSize() {
     return Integer.parseInt(properties.getProperty(
-        AGENT_THREADPOOL_SIZE_KEY, String.valueOf(AGENT_THREADPOOL_SIZE_DEFAULT)));
+      AGENT_THREADPOOL_SIZE_KEY, String.valueOf(AGENT_THREADPOOL_SIZE_DEFAULT)));
   }
 
   /**
@@ -2050,7 +2058,7 @@ public class Configuration {
    */
   public int getViewExtractionThreadPoolMaxSize() {
     return Integer.parseInt(properties.getProperty(
-        VIEW_EXTRACTION_THREADPOOL_MAX_SIZE_KEY, String.valueOf(VIEW_EXTRACTION_THREADPOOL_MAX_SIZE_DEFAULT)));
+      VIEW_EXTRACTION_THREADPOOL_MAX_SIZE_KEY, String.valueOf(VIEW_EXTRACTION_THREADPOOL_MAX_SIZE_DEFAULT)));
   }
 
   /**
@@ -2060,7 +2068,7 @@ public class Configuration {
    */
   public int getViewExtractionThreadPoolCoreSize() {
     return Integer.parseInt(properties.getProperty(
-        VIEW_EXTRACTION_THREADPOOL_CORE_SIZE_KEY, String.valueOf(VIEW_EXTRACTION_THREADPOOL_CORE_SIZE_DEFAULT)));
+      VIEW_EXTRACTION_THREADPOOL_CORE_SIZE_KEY, String.valueOf(VIEW_EXTRACTION_THREADPOOL_CORE_SIZE_DEFAULT)));
   }
 
   /**
@@ -2070,7 +2078,7 @@ public class Configuration {
    */
   public int getPropertyProvidersThreadPoolCoreSize() {
     return Integer.parseInt(properties.getProperty(PROPERTY_PROVIDER_THREADPOOL_CORE_SIZE_KEY,
-        String.valueOf(PROPERTY_PROVIDER_THREADPOOL_CORE_SIZE_DEFAULT)));
+      String.valueOf(PROPERTY_PROVIDER_THREADPOOL_CORE_SIZE_DEFAULT)));
   }
 
   /**
@@ -2080,7 +2088,7 @@ public class Configuration {
    */
   public int getPropertyProvidersThreadPoolMaxSize() {
     return Integer.parseInt(properties.getProperty(PROPERTY_PROVIDER_THREADPOOL_MAX_SIZE_KEY,
-        String.valueOf(PROPERTY_PROVIDER_THREADPOOL_MAX_SIZE_DEFAULT)));
+      String.valueOf(PROPERTY_PROVIDER_THREADPOOL_MAX_SIZE_DEFAULT)));
   }
 
   /**
@@ -2090,7 +2098,7 @@ public class Configuration {
    */
   public long getViewExtractionThreadPoolTimeout() {
     return Long.parseLong(properties.getProperty(
-        VIEW_EXTRACTION_THREADPOOL_TIMEOUT_KEY, String.valueOf(VIEW_EXTRACTION_THREADPOOL_TIMEOUT_DEFAULT)));
+      VIEW_EXTRACTION_THREADPOOL_TIMEOUT_KEY, String.valueOf(VIEW_EXTRACTION_THREADPOOL_TIMEOUT_DEFAULT)));
   }
 
   /**
@@ -2251,8 +2259,8 @@ public class Configuration {
       databaseType = DatabaseType.SQL_ANYWHERE;
     } else {
       throw new RuntimeException(
-          "The database type could be not determined from the JDBC URL "
-              + dbUrl);
+        "The database type could be not determined from the JDBC URL "
+          + dbUrl);
     }
 
     return databaseType;
@@ -2289,7 +2297,7 @@ public class Configuration {
    */
   public ConnectionPoolType getConnectionPoolType(){
     String connectionPoolType = properties.getProperty(
-        SERVER_JDBC_CONNECTION_POOL, ConnectionPoolType.INTERNAL.getName());
+      SERVER_JDBC_CONNECTION_POOL, ConnectionPoolType.INTERNAL.getName());
 
     if (connectionPoolType.equals(ConnectionPoolType.C3P0.getName())) {
       return ConnectionPoolType.C3P0;
@@ -2306,7 +2314,7 @@ public class Configuration {
    */
   public int getConnectionPoolMinimumSize() {
     return Integer.parseInt(properties.getProperty(
-        SERVER_JDBC_CONNECTION_POOL_MIN_SIZE, DEFAULT_JDBC_POOL_MIN_CONNECTIONS));
+      SERVER_JDBC_CONNECTION_POOL_MIN_SIZE, DEFAULT_JDBC_POOL_MIN_CONNECTIONS));
   }
 
   /**
@@ -2317,7 +2325,7 @@ public class Configuration {
    */
   public int getConnectionPoolMaximumSize() {
     return Integer.parseInt(properties.getProperty(
-        SERVER_JDBC_CONNECTION_POOL_MAX_SIZE, DEFAULT_JDBC_POOL_MAX_CONNECTIONS));
+      SERVER_JDBC_CONNECTION_POOL_MAX_SIZE, DEFAULT_JDBC_POOL_MAX_CONNECTIONS));
   }
 
   /**
@@ -2329,7 +2337,7 @@ public class Configuration {
    */
   public int getConnectionPoolMaximumAge() {
     return Integer.parseInt(properties.getProperty(
-        SERVER_JDBC_CONNECTION_POOL_MAX_AGE, DEFAULT_JDBC_POOL_MAX_AGE_SECONDS));
+      SERVER_JDBC_CONNECTION_POOL_MAX_AGE, DEFAULT_JDBC_POOL_MAX_AGE_SECONDS));
   }
 
   /**
@@ -2341,8 +2349,8 @@ public class Configuration {
    */
   public int getConnectionPoolMaximumIdle() {
     return Integer.parseInt(properties.getProperty(
-        SERVER_JDBC_CONNECTION_POOL_MAX_IDLE_TIME,
-        DEFAULT_JDBC_POOL_MAX_IDLE_TIME_SECONDS));
+      SERVER_JDBC_CONNECTION_POOL_MAX_IDLE_TIME,
+      DEFAULT_JDBC_POOL_MAX_IDLE_TIME_SECONDS));
   }
 
   /**
@@ -2354,8 +2362,8 @@ public class Configuration {
    */
   public int getConnectionPoolMaximumExcessIdle() {
     return Integer.parseInt(properties.getProperty(
-        SERVER_JDBC_CONNECTION_POOL_MAX_IDLE_TIME_EXCESS,
-        DEFAULT_JDBC_POOL_EXCESS_MAX_IDLE_TIME_SECONDS));
+      SERVER_JDBC_CONNECTION_POOL_MAX_IDLE_TIME_EXCESS,
+      DEFAULT_JDBC_POOL_EXCESS_MAX_IDLE_TIME_SECONDS));
   }
 
   /**
@@ -2367,8 +2375,8 @@ public class Configuration {
    */
   public int getConnectionPoolAcquisitionSize() {
     return Integer.parseInt(properties.getProperty(
-        SERVER_JDBC_CONNECTION_POOL_AQUISITION_SIZE,
-        DEFAULT_JDBC_POOL_ACQUISITION_SIZE));
+      SERVER_JDBC_CONNECTION_POOL_AQUISITION_SIZE,
+      DEFAULT_JDBC_POOL_ACQUISITION_SIZE));
   }
 
   /**
@@ -2379,8 +2387,8 @@ public class Configuration {
    */
   public int getConnectionPoolAcquisitionRetryAttempts() {
     return Integer.parseInt(properties.getProperty(
-        SERVER_JDBC_CONNECTION_POOL_ACQUISITION_RETRY_ATTEMPTS,
-        DEFAULT_JDBC_POOL_ACQUISITION_RETRY_ATTEMPTS));
+      SERVER_JDBC_CONNECTION_POOL_ACQUISITION_RETRY_ATTEMPTS,
+      DEFAULT_JDBC_POOL_ACQUISITION_RETRY_ATTEMPTS));
   }
 
   /**
@@ -2390,8 +2398,8 @@ public class Configuration {
    */
   public int getConnectionPoolAcquisitionRetryDelay() {
     return Integer.parseInt(properties.getProperty(
-        SERVER_JDBC_CONNECTION_POOL_ACQUISITION_RETRY_DELAY,
-        DEFAULT_JDBC_POOL_ACQUISITION_RETRY_DELAY));
+      SERVER_JDBC_CONNECTION_POOL_ACQUISITION_RETRY_DELAY,
+      DEFAULT_JDBC_POOL_ACQUISITION_RETRY_DELAY));
   }
 
 
@@ -2403,8 +2411,8 @@ public class Configuration {
    */
   public int getConnectionPoolIdleTestInternval() {
     return Integer.parseInt(properties.getProperty(
-        SERVER_JDBC_CONNECTION_POOL_IDLE_TEST_INTERVAL,
-        DEFAULT_JDBC_POOL_IDLE_TEST_INTERVAL));
+      SERVER_JDBC_CONNECTION_POOL_IDLE_TEST_INTERVAL,
+      DEFAULT_JDBC_POOL_IDLE_TEST_INTERVAL));
   }
 
   /**
@@ -2446,7 +2454,7 @@ public class Configuration {
    */
   public int getMetricCacheIdleSeconds() {
     return Integer.parseInt(properties.getProperty(TIMELINE_METRICS_CACHE_IDLE_TIME,
-        DEFAULT_TIMELINE_METRICS_CACHE_IDLE_TIME));
+      DEFAULT_TIMELINE_METRICS_CACHE_IDLE_TIME));
   }
 
   /**
@@ -2577,7 +2585,7 @@ public class Configuration {
   @Experimental(feature = ExperimentalFeature.PARALLEL_PROCESSING)
   public boolean isExperimentalConcurrentStageProcessingEnabled() {
     return Boolean.parseBoolean(properties.getProperty(
-        EXPERIMENTAL_CONCURRENCY_STAGE_PROCESSING_ENABLED, Boolean.FALSE.toString()));
+      EXPERIMENTAL_CONCURRENCY_STAGE_PROCESSING_ENABLED, Boolean.FALSE.toString()));
   }
 
   /**
@@ -2593,7 +2601,7 @@ public class Configuration {
   @Experimental(feature = ExperimentalFeature.ALERT_CACHING)
   public boolean isAlertCacheEnabled() {
     return Boolean.parseBoolean(
-        properties.getProperty(ALERTS_CACHE_ENABLED, Boolean.FALSE.toString()));
+      properties.getProperty(ALERTS_CACHE_ENABLED, Boolean.FALSE.toString()));
   }
 
   /**
@@ -2606,7 +2614,7 @@ public class Configuration {
   @Experimental(feature = ExperimentalFeature.ALERT_CACHING)
   public int getAlertCacheFlushInterval() {
     return Integer.parseInt(
-        properties.getProperty(ALERTS_CACHE_FLUSH_INTERVAL, ALERTS_CACHE_FLUSH_INTERVAL_DEFAULT));
+      properties.getProperty(ALERTS_CACHE_FLUSH_INTERVAL, ALERTS_CACHE_FLUSH_INTERVAL_DEFAULT));
   }
 
   /**
@@ -2637,11 +2645,11 @@ public class Configuration {
     Integer attempts = Integer.valueOf(property);
     if (attempts < 0) {
       LOG.warn("Invalid operations retry attempts number ({}), should be [0,{}]. Value reset to default {}",
-          attempts, RETRY_ATTEMPTS_LIMIT, OPERATIONS_RETRY_ATTEMPTS_DEFAULT);
+        attempts, RETRY_ATTEMPTS_LIMIT, OPERATIONS_RETRY_ATTEMPTS_DEFAULT);
       attempts = Integer.valueOf(OPERATIONS_RETRY_ATTEMPTS_DEFAULT);
     } else if (attempts > RETRY_ATTEMPTS_LIMIT) {
       LOG.warn("Invalid operations retry attempts number ({}), should be [0,{}]. Value set to {}",
-          attempts, RETRY_ATTEMPTS_LIMIT, RETRY_ATTEMPTS_LIMIT);
+        attempts, RETRY_ATTEMPTS_LIMIT, RETRY_ATTEMPTS_LIMIT);
       attempts = RETRY_ATTEMPTS_LIMIT;
     }
     if (attempts > 0) {
@@ -2655,15 +2663,23 @@ public class Configuration {
    */
   public int getVersionDefinitionConnectTimeout() {
     return NumberUtils.toInt(
-        properties.getProperty(VERSION_DEFINITION_CONNECT_TIMEOUT),
-            VERSION_DEFINITION_CONNECT_TIMEOUT_DEFAULT);
+      properties.getProperty(VERSION_DEFINITION_CONNECT_TIMEOUT),
+      VERSION_DEFINITION_CONNECT_TIMEOUT_DEFAULT);
   }
   /**
    * @return the read timeout used when loading a version definition URL
    */
   public int getVersionDefinitionReadTimeout() {
     return NumberUtils.toInt(
-        properties.getProperty(VERSION_DEFINITION_READ_TIMEOUT),
-            VERSION_DEFINITION_READ_TIMEOUT_DEFAULT);
+      properties.getProperty(VERSION_DEFINITION_READ_TIMEOUT),
+      VERSION_DEFINITION_READ_TIMEOUT_DEFAULT);
+  }
+
+  public String getAgentStackRetryOnInstallCount(){
+    return properties.getProperty(AGENT_STACK_RETRY_COUNT_KEY, AGENT_STACK_RETRY_COUNT_DEFAULT);
+  }
+
+  public String isAgentStackRetryOnInstallEnabled(){
+    return properties.getProperty(AGENT_STACK_RETRY_ON_REPO_UNAVAILABILITY_KEY, AGENT_STACK_RETRY_ON_REPO_UNAVAILABILITY_DEFAULT);
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
index 88180c0..7a7bc21 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
@@ -18,6 +18,8 @@
 
 package org.apache.ambari.server.controller;
 
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.AGENT_STACK_RETRY_ON_UNAVAILABILITY;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.AGENT_STACK_RETRY_COUNT;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.COMMAND_TIMEOUT;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.COMPONENT_CATEGORY;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.REPO_INFO;
@@ -58,7 +60,6 @@ import org.apache.ambari.server.state.ServiceInfo;
 import org.apache.ambari.server.state.StackId;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostOpInProgressEvent;
 import org.apache.ambari.server.utils.SecretReference;
-import org.apache.ambari.server.utils.StageUtils;
 import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -414,7 +415,10 @@ public class AmbariActionExecutionHelper {
       execCmd.setComponentName(componentName == null || componentName.isEmpty() ?
         resourceFilter.getComponentName() : componentName);
 
-      addRepoInfoToHostLevelParams(cluster, execCmd.getHostLevelParams(), hostName);
+      Map<String, String> hostLevelParams = execCmd.getHostLevelParams();
+      hostLevelParams.put(AGENT_STACK_RETRY_ON_UNAVAILABILITY, configs.isAgentStackRetryOnInstallEnabled());
+      hostLevelParams.put(AGENT_STACK_RETRY_COUNT, configs.getAgentStackRetryOnInstallCount());
+      addRepoInfoToHostLevelParams(cluster, hostLevelParams, hostName);
 
       Map<String, String> roleParams = execCmd.getRoleParams();
       if (roleParams == null) {

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
index 24728bf..a94c6b4 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
@@ -18,6 +18,8 @@
 
 package org.apache.ambari.server.controller;
 
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.AGENT_STACK_RETRY_ON_UNAVAILABILITY;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.AGENT_STACK_RETRY_COUNT;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.CLIENTS_TO_UPDATE_CONFIGS;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.COMMAND_TIMEOUT;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.COMPONENT_CATEGORY;
@@ -1149,6 +1151,8 @@ public class AmbariCustomCommandExecutionHelper {
     hostLevelParams.put(DB_DRIVER_FILENAME, configs.getMySQLJarName());
     hostLevelParams.putAll(managementController.getRcaParameters());
     hostLevelParams.put(HOST_SYS_PREPPED, configs.areHostsSysPrepped());
+    hostLevelParams.put(AGENT_STACK_RETRY_ON_UNAVAILABILITY, configs.isAgentStackRetryOnInstallEnabled());
+    hostLevelParams.put(AGENT_STACK_RETRY_COUNT, configs.getAgentStackRetryOnInstallCount());
     ClusterVersionEntity clusterVersionEntity = clusterVersionDAO.findByClusterAndStateCurrent(cluster.getClusterName());
     if (clusterVersionEntity == null) {
       List<ClusterVersionEntity> clusterVersionEntityList = clusterVersionDAO

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py
index 1240e7c..ee19ee4 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase.py
@@ -191,7 +191,9 @@ def hbase(name=None):
     params.HdfsResource(None, action="execute")
 
   if params.phoenix_enabled:
-    Package(params.phoenix_package)
+    Package(params.phoenix_package,
+            retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
+            retry_count=params.agent_stack_retry_count)
 
 def hbase_TemplateConfig(name, tag=None):
   import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
index 03486a7..f9694c6 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
@@ -24,6 +24,7 @@ from functions import calc_xmn_from_xms, ensure_unit_for_memory
 
 from ambari_commons.constants import AMBARI_SUDO_BINARY
 from ambari_commons.os_check import OSCheck
+from ambari_commons.str_utils import cbool, cint
 
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions import conf_select
@@ -42,6 +43,9 @@ exec_tmp_dir = Script.get_tmp_dir()
 sudo = AMBARI_SUDO_BINARY
 
 stack_name = default("/hostLevelParams/stack_name", None)
+agent_stack_retry_on_unavailability = cbool(config["hostLevelParams"]["agent_stack_retry_on_unavailability"])
+agent_stack_retry_count = cint(config["hostLevelParams"]["agent_stack_retry_count"])
+
 version = default("/commandParams/version", None)
 component_directory = status_params.component_directory
 etc_prefix_dir = "/etc/hbase"

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
index 246bf07..1264284 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs.py
@@ -117,7 +117,9 @@ def hdfs(name=None):
   )
   
   if params.lzo_enabled and len(params.lzo_packages) > 0:
-      Package(params.lzo_packages)
+      Package(params.lzo_packages,
+              retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
+              retry_count=params.agent_stack_retry_count)
       
 def install_snappy():
   import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
index 3fb4486..5242694 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
@@ -24,6 +24,7 @@ import os
 import re
 
 from ambari_commons.os_check import OSCheck
+from ambari_commons.str_utils import cbool, cint
 
 from resource_management.libraries.functions import conf_select
 from resource_management.libraries.functions import hdp_select
@@ -47,6 +48,8 @@ stack_name = default("/hostLevelParams/stack_name", None)
 upgrade_direction = default("/commandParams/upgrade_direction", None)
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
 hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
+agent_stack_retry_on_unavailability = cbool(config["hostLevelParams"]["agent_stack_retry_on_unavailability"])
+agent_stack_retry_count = cint(config["hostLevelParams"]["agent_stack_retry_count"])
 
 # New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade
 version = default("/commandParams/version", None)

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
index caaa9f3..2531598 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
@@ -26,6 +26,7 @@ from urlparse import urlparse
 
 from ambari_commons.constants import AMBARI_SUDO_BINARY
 from ambari_commons.os_check import OSCheck
+from ambari_commons.str_utils import cbool, cint
 
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions.default import default
@@ -44,6 +45,8 @@ tmp_dir = Script.get_tmp_dir()
 sudo = AMBARI_SUDO_BINARY
 
 stack_name = default("/hostLevelParams/stack_name", None)
+agent_stack_retry_on_unavailability = cbool(config["hostLevelParams"]["agent_stack_retry_on_unavailability"])
+agent_stack_retry_count = cint(config["hostLevelParams"]["agent_stack_retry_count"])
 
 # node hostname
 hostname = config["hostname"]

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/setup_atlas_hive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/setup_atlas_hive.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/setup_atlas_hive.py
index c92e3db..e78190f 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/setup_atlas_hive.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/setup_atlas_hive.py
@@ -32,7 +32,7 @@ def setup_atlas_hive(configuration_directory=None):
 
     if not params.host_sys_prepped:
       Package(params.atlas_ubuntu_plugin_package if OSCheck.is_ubuntu_family() else params.atlas_plugin_package, # FIXME HACK: install the package during RESTART/START when install_packages is not triggered.
-      )
+              retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability, retry_count=params.agent_stack_retry_count)
 
     PropertiesFile(format('{configuration_directory}/client.properties'),
                    properties = params.atlas_client_props,

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
index 2dd362a..df9ecfe 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
@@ -305,7 +305,9 @@ def oozie_server_specific():
       not_if  = no_op_test)
 
   if params.lzo_enabled and len(params.all_lzo_packages) > 0:
-    Package(params.all_lzo_packages)
+    Package(params.all_lzo_packages,
+            retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
+            retry_count=params.agent_stack_retry_count)
     Execute(format('{sudo} cp {hadoop_lib_home}/hadoop-lzo*.jar {oozie_lib_dir}'),
       not_if  = no_op_test,
     )

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
index 81c894a..072b127 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
@@ -19,6 +19,7 @@ limitations under the License.
 """
 from resource_management import *
 from ambari_commons.constants import AMBARI_SUDO_BINARY
+from ambari_commons.str_utils import cbool, cint
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions import conf_select
 from resource_management.libraries.functions import hdp_select
@@ -46,6 +47,8 @@ hostname = config["hostname"]
 version = default("/commandParams/version", None)
 stack_name = default("/hostLevelParams/stack_name", None)
 upgrade_direction = default("/commandParams/upgrade_direction", None)
+agent_stack_retry_on_unavailability = cbool(config["hostLevelParams"]["agent_stack_retry_on_unavailability"])
+agent_stack_retry_count = cint(config["hostLevelParams"]["agent_stack_retry_count"])
 
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
 hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py b/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py
index bf80616..6f8ef9f 100644
--- a/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py
+++ b/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py
@@ -31,12 +31,14 @@ from resource_management import *
 import resource_management
 from resource_management.libraries.functions.list_ambari_managed_repos import list_ambari_managed_repos
 from ambari_commons.os_check import OSCheck, OSConst
+from ambari_commons.str_utils import cbool, cint
 from resource_management.libraries.functions.packages_analyzer import allInstalledPackages
 from resource_management.libraries.functions import conf_select
 from resource_management.libraries.functions.hdp_select import get_hdp_versions
 from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
 from resource_management.libraries.functions.repo_version_history \
   import read_actual_version_from_history_file, write_actual_version_to_history_file, REPO_VERSION_HISTORY_FILE
+from resource_management.core.resources.system import Execute
 
 from resource_management.core.logger import Logger
 
@@ -52,7 +54,7 @@ class InstallPackages(Script):
   UBUNTU_REPO_COMPONENTS_POSTFIX = ["main"]
   REPO_FILE_NAME_PREFIX = 'HDP-'
   STACK_TO_ROOT_FOLDER = {"HDP": "/usr/hdp"}
-  
+
   def actionexecute(self, env):
     num_errors = 0
 
@@ -332,11 +334,18 @@ class InstallPackages(Script):
     :return: Returns 0 if no errors were found, and 1 otherwise.
     """
     ret_code = 0
+    
+    config = self.get_config()
+    agent_stack_retry_on_unavailability = cbool(config['hostLevelParams']['agent_stack_retry_on_unavailability'])
+    agent_stack_retry_count = cint(config['hostLevelParams']['agent_stack_retry_count'])
+
     # Install packages
     packages_were_checked = False
     try:
       Package("hdp-select", 
               action="upgrade",
+              retry_on_repo_unavailability=agent_stack_retry_on_unavailability,
+              retry_count=agent_stack_retry_count
       )
       
       packages_installed_before = []
@@ -347,7 +356,9 @@ class InstallPackages(Script):
       for package in filtered_package_list:
         name = self.format_package_name(package['name'])
         Package(name,
-          action="upgrade" # this enables upgrading non-versioned packages, despite the fact they exist. Needed by 'mahout' which is non-version but have to be updated     
+          action="upgrade", # this enables upgrading non-versioned packages, despite the fact they exist. Needed by 'mahout' which is non-version but have to be updated     
+          retry_on_repo_unavailability=agent_stack_retry_on_unavailability,
+          retry_count=agent_stack_retry_count
         )
     except Exception, err:
       ret_code = 1

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/scripts/params.py
index a687ea7..9fff7507 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/scripts/params.py
@@ -17,6 +17,7 @@ limitations under the License.
 
 """
 
+from ambari_commons.str_utils import cbool, cint
 from resource_management import *
 from resource_management.core.system import System
 import json
@@ -28,6 +29,9 @@ tmp_dir = Script.get_tmp_dir()
 #RPM versioning support
 rpm_version = default("/configurations/cluster-env/rpm_version", None)
 
+agent_stack_retry_on_unavailability = cbool(config["hostLevelParams"]["agent_stack_retry_on_unavailability"])
+agent_stack_retry_count = cint(config["hostLevelParams"]["agent_stack_retry_count"])
+
 #users and groups
 hbase_user = config['configurations']['hbase-env']['hbase_user']
 smoke_user =  config['configurations']['cluster-env']['smokeuser']

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/scripts/shared_initialization.py
index 03afc44..54fb8a3 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-INSTALL/scripts/shared_initialization.py
@@ -60,4 +60,7 @@ def install_packages():
   packages = ['unzip', 'curl']
   if params.rpm_version:
     packages.append('bigtop-select')
-  Package(packages)
+  Package(packages,
+          retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
+          retry_count=params.agent_stack_retry_count)
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py
index cf20df4..226cb0f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py
@@ -18,6 +18,7 @@ limitations under the License.
 """
 
 from ambari_commons.constants import AMBARI_SUDO_BINARY
+from ambari_commons.str_utils import cbool, cint
 from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
 from resource_management.core.system import System
 from resource_management.libraries.script.script import Script
@@ -28,6 +29,9 @@ tmp_dir = Script.get_tmp_dir()
 sudo = AMBARI_SUDO_BINARY
 
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+agent_stack_retry_on_unavailability = cbool(config["hostLevelParams"]["agent_stack_retry_on_unavailability"])
+agent_stack_retry_count = cint(config["hostLevelParams"]["agent_stack_retry_count"])
+
 hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
 
 #users and groups


[50/51] [abbrv] ambari git commit: AMBARI-15137: Parameterize distro-specific stack information for TEZ (Juanjo Marron via dili)

Posted by jl...@apache.org.
AMBARI-15137: Parameterize distro-specific stack information for TEZ (Juanjo Marron via dili)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/b68758b7
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/b68758b7
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/b68758b7

Branch: refs/heads/AMBARI-13364
Commit: b68758b7b95b6228ca21cec00c8c72d9b3d72217
Parents: fa8b4c4
Author: Di Li <di...@apache.org>
Authored: Mon Feb 29 10:34:46 2016 -0500
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Wed Mar 9 15:40:35 2016 -0800

----------------------------------------------------------------------
 .../TEZ/0.4.0.2.1/package/scripts/params_linux.py   | 16 ++++++++++------
 .../TEZ/0.4.0.2.1/package/scripts/pre_upgrade.py    |  2 +-
 .../TEZ/0.4.0.2.1/package/scripts/service_check.py  |  2 +-
 .../TEZ/0.4.0.2.1/package/scripts/tez_client.py     |  5 +++--
 .../stacks/HDP/2.0.6/configuration/cluster-env.xml  |  5 +++++
 5 files changed, 20 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/b68758b7/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/params_linux.py
index cc87973..439f862 100644
--- a/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/params_linux.py
@@ -32,10 +32,13 @@ config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 
 stack_name = default("/hostLevelParams/stack_name", None)
+stack_dir = config['configurations']['cluster-env']['stack_dir']
 
 # This is expected to be of the form #.#.#.#
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
 stack_version_formatted = format_stack_version(stack_version_unformatted)
+stack_version_ru_support = config['configurations']['cluster-env']['stack_version_ru_support']
+stack_version_tez_symlink_support = config['configurations']['cluster-env']['stack_version_tez_symlink_support']
 
 # New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
 version = default("/commandParams/version", None)
@@ -48,15 +51,16 @@ tez_etc_dir = "/etc/tez"
 config_dir = "/etc/tez/conf"
 tez_examples_jar = "/usr/lib/tez/tez-mapreduce-examples*.jar"
 
-# hadoop parameters for 2.2+
-if Script.is_stack_greater_or_equal("2.2"):
-  tez_examples_jar = "/usr/hdp/current/tez-client/tez-examples*.jar"
+# hadoop parameters for stack_version_ru_support+
+if Script.is_stack_greater_or_equal(stack_version_ru_support):
+  tez_examples_jar = "{stack_dir}/current/tez-client/tez-examples*.jar"
 
-# tez only started linking /usr/hdp/x.x.x.x/tez-client/conf in HDP 2.3+
-if Script.is_stack_greater_or_equal("2.3"):
+# tez only started linking <stack_dir>/x.x.x.x/tez-client/conf in stack_version_tez_symlink_support+
+if Script.is_stack_greater_or_equal(stack_version_tez_symlink_support):
   # !!! use realpath for now since the symlink exists but is broken and a
   # broken symlink messes with the DirectoryProvider class
-  config_dir = os.path.realpath("/usr/hdp/current/tez-client/conf")
+  config_path = os.path.join(stack_dir, "current/tez-client/conf")
+  config_dir = os.path.realpath(config_path)
 
 kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
 security_enabled = config['configurations']['cluster-env']['security_enabled']

http://git-wip-us.apache.org/repos/asf/ambari/blob/b68758b7/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/pre_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/pre_upgrade.py b/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/pre_upgrade.py
index 1faedf9..68df679 100644
--- a/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/pre_upgrade.py
+++ b/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/pre_upgrade.py
@@ -38,7 +38,7 @@ class TezPreUpgrade(Script):
 
     Logger.info("Before starting Stack Upgrade, check if tez tarball has been copied to HDFS.")
 
-    if params.stack_version_formatted and compare_versions(params.stack_version_formatted, '2.2.0.0') >= 0:
+    if params.stack_version_formatted and compare_versions(params.stack_version_formatted, params.stack_version_ru_support) >= 0:
       Logger.info("Stack version {0} is sufficient to check if need to copy tez.tar.gz to HDFS.".format(params.stack_version_formatted))
 
       # Force it to copy the current version of the tez tarball, rather than the version the RU will go to.

http://git-wip-us.apache.org/repos/asf/ambari/blob/b68758b7/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/service_check.py
index c0c66af..e581d8b 100644
--- a/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/service_check.py
@@ -67,7 +67,7 @@ class TezServiceCheckLinux(TezServiceCheck):
       source = format("{tmp_dir}/sample-tez-test"),
     )
 
-    if params.stack_version_formatted and compare_versions(params.stack_version_formatted, '2.2.0.0') >= 0:
+    if params.stack_version_formatted and compare_versions(params.stack_version_formatted, params.stack_version_ru_support) >= 0:
       copy_to_hdfs("tez", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
 
     params.HdfsResource(None, action = "execute")

http://git-wip-us.apache.org/repos/asf/ambari/blob/b68758b7/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/tez_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/tez_client.py b/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/tez_client.py
index e770d9b..dcd194d 100644
--- a/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/tez_client.py
+++ b/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/tez_client.py
@@ -49,13 +49,14 @@ class TezClient(Script):
 class TezClientLinux(TezClient):
 
   def get_stack_to_component(self):
-    return {"HDP": "hadoop-client"}
+    import params
+    return {params.stack_name: "hadoop-client"}
 
   def pre_upgrade_restart(self, env, upgrade_type=None):
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), params.stack_version_ru_support) >= 0:
       conf_select.select(params.stack_name, "tez", params.version)
       conf_select.select(params.stack_name, "hadoop", params.version)
       stack_select.select("hadoop-client", params.version)

http://git-wip-us.apache.org/repos/asf/ambari/blob/b68758b7/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
index 75b499f..3263457 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
@@ -72,6 +72,11 @@
         <description>Stack version from which oozie-server special case in the PRE-UPGRADE phase is supported</description>
     </property>
     <property>
+        <name>stack_version_tez_symlink_support</name>
+        <value>2.3.0.0</value>
+        <description>Stack version from which tez-client symlink for configuration is supported</description>
+    </property>
+    <property>
         <name>security_enabled</name>
         <value>false</value>
         <description>Hadoop Security</description>


[32/51] [abbrv] ambari git commit: AMBARI-15286. Alerts are not triggered when ambari agent is down (aonishuk)

Posted by jl...@apache.org.
AMBARI-15286. Alerts are not triggered when ambari agent is down (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/dddffd5c
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/dddffd5c
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/dddffd5c

Branch: refs/heads/AMBARI-13364
Commit: dddffd5c6a54e1fb08bde98dd874edc68add4198
Parents: 664ccd1
Author: Andrew Onishuk <ao...@hortonworks.com>
Authored: Wed Mar 9 16:23:55 2016 +0200
Committer: Andrew Onishuk <ao...@hortonworks.com>
Committed: Wed Mar 9 16:23:55 2016 +0200

----------------------------------------------------------------------
 ambari-agent/etc/init/ambari-agent.conf | 6 ------
 1 file changed, 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/dddffd5c/ambari-agent/etc/init/ambari-agent.conf
----------------------------------------------------------------------
diff --git a/ambari-agent/etc/init/ambari-agent.conf b/ambari-agent/etc/init/ambari-agent.conf
index 021eb3b..75c1b06 100644
--- a/ambari-agent/etc/init/ambari-agent.conf
+++ b/ambari-agent/etc/init/ambari-agent.conf
@@ -17,8 +17,6 @@ description     "ambari agent"
 
 stop on runlevel [06]
 
-env PIDFILE=/var/run/ambari-agent/ambari-agent.pid
-
 kill signal SIGKILL
 respawn
 
@@ -27,8 +25,4 @@ script
 
    export AMBARI_AGENT_RUN_IN_FOREGROUND=true
    exec /etc/init.d/ambari-agent start
-end script
-
-post-stop script
-  rm -f $PIDFILE
 end script
\ No newline at end of file


[48/51] [abbrv] ambari git commit: AMBARI-14472: Parameterize distro-specific stack information for OOZIE (Juanjo Marron via dili)

Posted by jl...@apache.org.
AMBARI-14472: Parameterize distro-specific stack information for OOZIE (Juanjo Marron via dili)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/6900539d
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/6900539d
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/6900539d

Branch: refs/heads/AMBARI-13364
Commit: 6900539d1a9f875789a2085805fa86d630b1c159
Parents: 897e6ab
Author: Di Li <di...@apache.org>
Authored: Mon Feb 29 10:30:41 2016 -0500
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Wed Mar 9 15:31:58 2016 -0800

----------------------------------------------------------------------
 .../package/alerts/alert_check_oozie_server.py  | 10 ++++-
 .../OOZIE/4.0.0.2.0/package/scripts/oozie.py    |  7 ++--
 .../4.0.0.2.0/package/scripts/oozie_client.py   |  7 ++--
 .../4.0.0.2.0/package/scripts/oozie_server.py   | 19 +++++----
 .../package/scripts/oozie_server_upgrade.py     | 32 +++++++--------
 .../4.0.0.2.0/package/scripts/params_linux.py   | 43 +++++++++++---------
 .../4.0.0.2.0/package/scripts/service_check.py  |  1 -
 .../4.0.0.2.0/package/scripts/status_params.py  |  9 ++--
 .../HDP/2.0.6/configuration/cluster-env.xml     | 15 +++++++
 9 files changed, 87 insertions(+), 56 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/6900539d/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py
index 90851c8..83db163 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/alerts/alert_check_oozie_server.py
@@ -26,10 +26,15 @@ from resource_management.core.resources import Execute
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions import get_kinit_path
 from resource_management.libraries.functions import get_klist_path
+from resource_management.libraries.script.script import Script
 from ambari_commons.os_check import OSConst, OSCheck
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 from urlparse import urlparse
 
+
+# server configurations
+config = Script.get_config()
+
 RESULT_CODE_OK = 'OK'
 RESULT_CODE_CRITICAL = 'CRITICAL'
 RESULT_CODE_UNKNOWN = 'UNKNOWN'
@@ -45,7 +50,8 @@ KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY = '{{kerberos-env/executable_search_paths}}
 OOZIE_URL_KEY = '{{oozie-site/oozie.base.url}}'
 SECURITY_ENABLED = '{{cluster-env/security_enabled}}'
 OOZIE_USER = '{{oozie-env/oozie_user}}'
-OOZIE_CONF_DIR = '/usr/hdp/current/oozie-server/conf'
+stack_dir = config['configurations']['cluster-env']['stack_dir']
+OOZIE_CONF_DIR = format('{stack_dir}/current/oozie-server/conf')
 OOZIE_CONF_DIR_LEGACY = '/etc/oozie/conf'
 OOZIE_HTTPS_PORT = '{{oozie-site/oozie.https.port}}'
 OOZIE_ENV_CONTENT = '{{oozie-env/content}}'
@@ -152,7 +158,7 @@ def get_check_command(oozie_url, host_name, configurations, parameters, only_kin
     finally:
       kinit_lock.release()
 
-  # oozie configuration directory uses a symlink when > HDP 2.2
+  # oozie configuration directory uses a symlink when stack_version > stack_version_ru_support
   oozie_config_directory = OOZIE_CONF_DIR_LEGACY
   if os.path.exists(OOZIE_CONF_DIR):
     oozie_config_directory = OOZIE_CONF_DIR

http://git-wip-us.apache.org/repos/asf/ambari/blob/6900539d/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
index 81a227e..2712f12 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
@@ -146,7 +146,7 @@ def oozie(is_server=False):
       owner=params.oozie_user
     )
 
-  if params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.2') >= 0:
+  if params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, params.stack_version_ru_support) >= 0:
     File(format("{params.conf_dir}/adminusers.txt"),
       mode=0644,
       group=params.user_group,
@@ -200,7 +200,7 @@ def oozie_ownership():
 def prepare_war():
   """
   Attempt to call prepare-war command if the marker file doesn't exist or its content doesn't equal the expected command.
-  The marker file is stored in /usr/hdp/current/oozie-server/.prepare_war_cmd
+  The marker file is stored in <stack_dir>/current/oozie-server/.prepare_war_cmd
   """
   import params
 
@@ -318,7 +318,8 @@ def oozie_server_specific():
        mode = 0644,
   )
 
-  if params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.2') >= 0:
+
+  if params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, params.stack_version_ru_support) >= 0:
     # Create hive-site and tez-site configs for oozie
     Directory(params.hive_conf_dir,
         create_parents = True,

http://git-wip-us.apache.org/repos/asf/ambari/blob/6900539d/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_client.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_client.py
index 4fc50d2..092c101 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_client.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_client.py
@@ -30,7 +30,8 @@ from oozie_service import oozie_service
 class OozieClient(Script):
 
   def get_stack_to_component(self):
-    return {"HDP": "oozie-client"}
+    import params
+    return {params.stack_name: "oozie-client"}
 
   def install(self, env):
     self.install_packages(env)
@@ -52,8 +53,8 @@ class OozieClient(Script):
     env.set_params(params)
 
     # this function should not execute if the version can't be determined or
-    # is not at least HDP 2.2.0.0
-    if not params.version or compare_versions(format_stack_version(params.version), '2.2.0.0') < 0:
+    # is not at least stack_version_ru_support
+    if not params.version or compare_versions(format_stack_version(params.version), params.stack_version_ru_support) < 0:
       return
 
     Logger.info("Executing Oozie Client Stack Upgrade pre-restart")

http://git-wip-us.apache.org/repos/asf/ambari/blob/6900539d/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server.py
index 030fb2d..5391a65 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server.py
@@ -47,7 +47,8 @@ from check_oozie_server_status import check_oozie_server_status
 class OozieServer(Script):
 
   def get_stack_to_component(self):
-    return {"HDP": "oozie-server"}
+    import params
+    return {params.stack_name: "oozie-server"}
 
   def install(self, env):
     self.install_packages(env)
@@ -65,17 +66,17 @@ class OozieServer(Script):
 
     if upgrade_type is not None and params.upgrade_direction == Direction.UPGRADE and params.version is not None:
       Logger.info(format("Configuring Oozie during upgrade type: {upgrade_type}, direction: {params.upgrade_direction}, and version {params.version}"))
-      if compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
-        # In order for the "/usr/hdp/current/oozie-<client/server>" point to the new version of
+      if compare_versions(format_stack_version(params.version), params.stack_version_ru_support) >= 0:
+        # In order for the "<stack_dir>/current/oozie-<client/server>" point to the new version of
         # oozie, we need to create the symlinks both for server and client.
         # This is required as both need to be pointing to new installed oozie version.
 
-        # Sets the symlink : eg: /usr/hdp/current/oozie-client -> /usr/hdp/2.3.x.y-<version>/oozie
+        # Sets the symlink : eg: <stack_dir>/current/oozie-client -> <stack_dir>/2.3.x.y-<version>/oozie
         stack_select.select("oozie-client", params.version)
-        # Sets the symlink : eg: /usr/hdp/current/oozie-server -> /usr/hdp/2.3.x.y-<version>/oozie
+        # Sets the symlink : eg: <stack_dir>/current/oozie-server -> <stack_dir>/2.3.x.y-<version>/oozie
         stack_select.select("oozie-server", params.version)
 
-      if compare_versions(format_stack_version(params.version), '2.3.0.0') >= 0:
+      if compare_versions(format_stack_version(params.version), params.stack_version_oozie_preconfiguration_support) >= 0:
         conf_select.select(params.stack_name, "oozie", params.version)
 
     env.set_params(params)
@@ -186,15 +187,15 @@ class OozieServerDefault(OozieServer):
     env.set_params(params)
 
     # this function should not execute if the version can't be determined or
-    # is not at least HDP 2.2.0.0
-    if not params.version or compare_versions(format_stack_version(params.version), '2.2.0.0') < 0:
+    # is not at least stack_version_ru_support
+    if not params.version or compare_versions(format_stack_version(params.version), params.stack_version_ru_support) < 0:
       return
 
     Logger.info("Executing Oozie Server Stack Upgrade pre-restart")
 
     OozieUpgrade.backup_configuration()
 
-    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), params.stack_version_ru_support) >= 0:
       conf_select.select(params.stack_name, "oozie", params.version)
       stack_select.select("oozie-server", params.version)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6900539d/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server_upgrade.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server_upgrade.py
index 27e2766..2b6a3e5 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server_upgrade.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server_upgrade.py
@@ -99,31 +99,31 @@ class OozieUpgrade(Script):
   def prepare_libext_directory():
     """
     Performs the following actions on libext:
-      - creates /usr/hdp/current/oozie/libext and recursively
+      - creates <stack_dir>/current/oozie/libext and recursively
       - set 777 permissions on it and its parents.
       - downloads JDBC driver JAR if needed
       - copies Falcon JAR for the Oozie WAR if needed
     """
     import params
 
-    # some versions of HDP don't need the lzo compression libraries
+    # some stack versions might don't need the lzo compression libraries
     target_version_needs_compression_libraries = compare_versions(
-      format_stack_version(params.version), '2.2.1.0') >= 0
+      format_stack_version(params.version), params.stack_version_lzo_unsupport) >= 0
 
     # ensure the directory exists
     Directory(params.oozie_libext_dir, mode = 0777)
 
     # get all hadooplzo* JAR files
     # hdp-select set hadoop-client has not run yet, therefore we cannot use
-    # /usr/hdp/current/hadoop-client ; we must use params.version directly
-    # however, this only works when upgrading beyond 2.2.0.0; don't do this
-    # for downgrade to 2.2.0.0 since hadoop-lzo will not be present
+    # <stack_dir>/current/hadoop-client ; we must use params.version directly
+    # however, this only works when upgrading beyond stack_version_ru_support; don't do this
+    # for downgrade to stack_version_ru_support since hadoop-lzo will not be present
     # This can also be called during a Downgrade.
-    # When a version is Intalled, it is responsible for downloading the hadoop-lzo packages
+    # When a version is installed, it is responsible for downloading the hadoop-lzo packages
     # if lzo is enabled.
     if params.lzo_enabled and (params.upgrade_direction == Direction.UPGRADE or target_version_needs_compression_libraries):
       hadoop_lzo_pattern = 'hadoop-lzo*.jar'
-      hadoop_client_new_lib_dir = format("/usr/hdp/{version}/hadoop/lib")
+      hadoop_client_new_lib_dir = format("{stack_dir}/{version}/hadoop/lib")
 
       files = glob.iglob(os.path.join(hadoop_client_new_lib_dir, hadoop_lzo_pattern))
       if not files:
@@ -143,10 +143,10 @@ class OozieUpgrade(Script):
           hadoop_client_new_lib_dir, hadoop_lzo_pattern))
 
     # copy ext ZIP to libext dir
-    oozie_ext_zip_file = '/usr/share/HDP-oozie/ext-2.2.zip'
+    oozie_ext_zip_file = params.ext_js_path
 
-    # something like /usr/hdp/current/oozie-server/libext/ext-2.2.zip
-    oozie_ext_zip_target_path = os.path.join(params.oozie_libext_dir, "ext-2.2.zip")
+    # something like <stack_dir>/current/oozie-server/libext/ext-2.2.zip
+    oozie_ext_zip_target_path = os.path.join(params.oozie_libext_dir, params.ext_js_file)
 
     if not os.path.isfile(oozie_ext_zip_file):
       raise Fail("Unable to copy {0} because it does not exist".format(oozie_ext_zip_file))
@@ -172,7 +172,7 @@ class OozieUpgrade(Script):
     # copy the Falcon JAR if needed; falcon has not upgraded yet, so we must
     # use the versioned falcon directory
     if params.has_falcon_host:
-      versioned_falcon_jar_directory = "/usr/hdp/{0}/falcon/oozie/ext/falcon-oozie-el-extension-*.jar".format(stack_version)
+      versioned_falcon_jar_directory = "{0}/{1}/falcon/oozie/ext/falcon-oozie-el-extension-*.jar".format(params.stack_dir, stack_version)
       Logger.info("Copying {0} to {1}".format(versioned_falcon_jar_directory, params.oozie_libext_dir))
 
       Execute(format('{sudo} cp {versioned_falcon_jar_directory} {oozie_libext_dir}'))
@@ -238,10 +238,10 @@ class OozieUpgrade(Script):
     # the database upgrade requires the db driver JAR, but since we have
     # not yet run hdp-select to upgrade the current points, we have to use
     # the versioned libext directory as the location[[-vufdtffr,
-    versioned_libext_dir = "/usr/hdp/{0}/oozie/libext".format(stack_version)
+    versioned_libext_dir = "{0}/{1}/oozie/libext".format(params.stack_dir, stack_version)
     oozie.download_database_library_if_needed(target_directory=versioned_libext_dir)
 
-    database_upgrade_command = "/usr/hdp/{0}/oozie/bin/ooziedb.sh upgrade -run".format(stack_version)
+    database_upgrade_command = "{0}/{1}/oozie/bin/ooziedb.sh upgrade -run".format(params.stack_dir, stack_version)
     Execute(database_upgrade_command, user=params.oozie_user, logoutput=True)
 
     # install new sharelib to HDFS
@@ -285,8 +285,8 @@ class OozieUpgrade(Script):
     stack_version = upgrade_stack[1]
 
     # install new sharelib to HDFS
-    sharelib_command = "/usr/hdp/{0}/oozie/bin/oozie-setup.sh sharelib create -fs {1}".format(
-      stack_version, params.fs_root)
+    sharelib_command = "{0}/{1}/oozie/bin/oozie-setup.sh sharelib create -fs {2}".format(
+      params.stack_dir, stack_version, params.fs_root)
 
     Execute(sharelib_command, user=params.oozie_user, logoutput=True)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6900539d/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
index 0decbc2..7b94f83 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
@@ -46,19 +46,25 @@ hostname = config["hostname"]
 # New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
 version = default("/commandParams/version", None)
 stack_name = default("/hostLevelParams/stack_name", None)
+stack_dir = config['configurations']['cluster-env']['stack_dir']
+stack_shared_dir = config['configurations']['cluster-env']['stack_shared_dir']
 upgrade_direction = default("/commandParams/upgrade_direction", None)
 agent_stack_retry_on_unavailability = cbool(config["hostLevelParams"]["agent_stack_retry_on_unavailability"])
 agent_stack_retry_count = cint(config["hostLevelParams"]["agent_stack_retry_count"])
 
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
 stack_version_formatted = format_stack_version(stack_version_unformatted)
+stack_version_ru_support = config['configurations']['cluster-env']['stack_version_ru_support']
+stack_version_lzo_unsupport = config['configurations']['cluster-env']['stack_version_lzo_unsupport']
+stack_version_oozie_preconfiguration_support = config['configurations']['cluster-env']['stack_version_oozie_preconfiguration_support']
 
 hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
 hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
 hadoop_lib_home = stack_select.get_hadoop_dir("lib")
 
 #hadoop params
-if Script.is_stack_greater_or_equal("2.2"):
+
+if Script.is_stack_greater_or_equal(stack_version_ru_support):
   # something like 2.3.0.0-1234
   stack_version = None
   upgrade_stack = stack_select._get_upgrade_stack()
@@ -68,25 +74,25 @@ if Script.is_stack_greater_or_equal("2.2"):
   # oozie-server or oozie-client, depending on role
   oozie_root = status_params.component_directory
 
-  # using the correct oozie root dir, format the correct location
-  oozie_lib_dir = format("/usr/hdp/current/{oozie_root}")
-  oozie_setup_sh = format("/usr/hdp/current/{oozie_root}/bin/oozie-setup.sh")
-  oozie_webapps_dir = format("/usr/hdp/current/{oozie_root}/oozie-server/webapps")
-  oozie_webapps_conf_dir = format("/usr/hdp/current/{oozie_root}/oozie-server/conf")
-  oozie_libext_dir = format("/usr/hdp/current/{oozie_root}/libext")
-  oozie_server_dir = format("/usr/hdp/current/{oozie_root}/oozie-server")
-  oozie_shared_lib = format("/usr/hdp/current/{oozie_root}/share")
-  oozie_home = format("/usr/hdp/current/{oozie_root}")
-  oozie_bin_dir = format("/usr/hdp/current/{oozie_root}/bin")
-  oozie_examples_regex = format("/usr/hdp/current/{oozie_root}/doc")
+  # using the correct oozie stack root dir, format the correct location
+  oozie_lib_dir = format("{stack_dir}/current/{oozie_root}")
+  oozie_setup_sh = format("{stack_dir}/current/{oozie_root}/bin/oozie-setup.sh")
+  oozie_webapps_dir = format("{stack_dir}/current/{oozie_root}/oozie-server/webapps")
+  oozie_webapps_conf_dir = format("{stack_dir}/current/{oozie_root}/oozie-server/conf")
+  oozie_libext_dir = format("{stack_dir}/current/{oozie_root}/libext")
+  oozie_server_dir = format("{stack_dir}/current/{oozie_root}/oozie-server")
+  oozie_shared_lib = format("{stack_dir}/current/{oozie_root}/share")
+  oozie_home = format("{stack_dir}/current/{oozie_root}")
+  oozie_bin_dir = format("{stack_dir}/current/{oozie_root}/bin")
+  oozie_examples_regex = format("{stack_dir}/current/{oozie_root}/doc")
 
   # set the falcon home for copying JARs; if in an upgrade, then use the version of falcon that
   # matches the version of oozie
-  falcon_home = '/usr/hdp/current/falcon-client'
+  falcon_home = format("{stack_dir}/current/falcon-client")
   if stack_version is not None:
-    falcon_home = '/usr/hdp/{0}/falcon'.format(stack_version)
+    falcon_home = '{0}/{1}/falcon'.format(stack_dir, stack_version)
 
-  conf_dir = format("/usr/hdp/current/{oozie_root}/conf")
+  conf_dir = format("{stack_dir}/current/{oozie_root}/conf")
   hive_conf_dir = format("{conf_dir}/action-conf/hive")
 
 else:
@@ -123,9 +129,8 @@ oozie_pid_dir = status_params.oozie_pid_dir
 pid_file = status_params.pid_file
 hadoop_jar_location = "/usr/lib/hadoop/"
 java_share_dir = "/usr/share/java"
-# for HDP1 it's "/usr/share/HDP-oozie/ext.zip"
 ext_js_file = "ext-2.2.zip"
-ext_js_path = format("/usr/share/HDP-oozie/{ext_js_file}")
+ext_js_path = format("{stack_shared_dir}-oozie/{ext_js_file}")
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 oozie_heapsize = config['configurations']['oozie-env']['oozie_heapsize']
 oozie_permsize = config['configurations']['oozie-env']['oozie_permsize']
@@ -143,7 +148,7 @@ oozie_site = config['configurations']['oozie-site']
 # Need this for yarn.nodemanager.recovery.dir in yarn-site
 yarn_log_dir_prefix = config['configurations']['yarn-env']['yarn_log_dir_prefix']
 
-if security_enabled and Script.is_stack_less_than("2.2"):
+if security_enabled and Script.is_stack_less_than(stack_version_ru_support):
   #older versions of oozie have problems when using _HOST in principal
   oozie_site = dict(config['configurations']['oozie-site'])
   oozie_site['oozie.service.HadoopAccessorService.kerberos.principal'] = \
@@ -194,7 +199,7 @@ if https_port is not None:
 hdfs_site = config['configurations']['hdfs-site']
 fs_root = config['configurations']['core-site']['fs.defaultFS']
 
-if Script.is_stack_less_than("2.2"):
+if Script.is_stack_less_than(stack_version_ru_support):
   put_shared_lib_to_hdfs_cmd = format("hadoop --config {hadoop_conf_dir} dfs -put {oozie_shared_lib} {oozie_hdfs_user_dir}")
 # for newer
 else:

http://git-wip-us.apache.org/repos/asf/ambari/blob/6900539d/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/service_check.py
index 8d14836..11f924f 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/service_check.py
@@ -43,7 +43,6 @@ class OozieServiceCheckDefault(OozieServiceCheck):
     import params
     env.set_params(params)
 
-    # on HDP1 this file is different
     prepare_hdfs_file_name = 'prepareOozieHdfsDirectories.sh'
     smoke_test_file_name = 'oozieSmoke2.sh'
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/6900539d/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/status_params.py
index 954bb80..98b6dbd 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/status_params.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/status_params.py
@@ -25,7 +25,7 @@ from resource_management.libraries.functions import get_kinit_path
 from resource_management.libraries.script.script import Script
 
 # a map of the Ambari role to the component name
-# for use with /usr/hdp/current/<component>
+# for use with <stack_dir>/current/<component>
 SERVER_ROLE_DIRECTORY_MAP = {
   'OOZIE_SERVER' : 'oozie-server',
   'OOZIE_CLIENT' : 'oozie-client',
@@ -44,12 +44,15 @@ else:
   oozie_pid_dir = config['configurations']['oozie-env']['oozie_pid_dir']
   pid_file = format("{oozie_pid_dir}/oozie.pid")
 
+  stack_dir = config['configurations']['cluster-env']['stack_dir']
+  stack_version_ru_support = config['configurations']['cluster-env']['stack_version_ru_support']
+
   security_enabled = config['configurations']['cluster-env']['security_enabled']
   kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
 
   conf_dir = "/etc/oozie/conf"
-  if Script.is_stack_greater_or_equal("2.2"):
-    conf_dir = format("/usr/hdp/current/{component_directory}/conf")
+  if Script.is_stack_greater_or_equal(stack_version_ru_support):
+    conf_dir = format("{stack_dir}/current/{component_directory}/conf")
 
   tmp_dir = Script.get_tmp_dir()
   oozie_user = config['configurations']['oozie-env']['oozie_user']

http://git-wip-us.apache.org/repos/asf/ambari/blob/6900539d/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
index 70a5fbb..3bef518 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
@@ -27,6 +27,11 @@
         <description>Directory prefix for stacks installation</description>
     </property>
     <property>
+        <name>stack_shared_dir</name>
+        <value>/usr/share/HDP</value>
+        <description>Prefix for shared stack directory</description>
+     </property>
+    <property>
         <name>stack_version_ru_support</name>
         <value>2.2.0.0</value>
         <description>Stack version from which rolling upgrade is supported and installation layout changed</description>
@@ -42,11 +47,21 @@
         <description>Stack version from which snappy is not supported</description>
     </property>
     <property>
+        <name>stack_version_lzo_unsupport</name>
+        <value>2.2.1.0</value>
+        <description>Stack version from which lzo compression libraries are not needed</description>
+    </property>
+    <property>
         <name>stack_version_nfs_support</name>
         <value>2.3.0.0</value>
         <description>Stack version from which hadoop-hdfs-nfs3 is supported</description>
     </property>
     <property>
+        <name>stack_version_oozie_preconfiguration_support</name>
+        <value>2.3.0.0</value>
+        <description>Stack version from which oozie-server special case in the PRE-UPGRADE phase is supported</description>
+    </property>
+    <property>
         <name>security_enabled</name>
         <value>false</value>
         <description>Hadoop Security</description>


[49/51] [abbrv] ambari git commit: AMBARI-15114: Parameterize distro-specific stack information for HBASE (Juanjo Marron via dili)

Posted by jl...@apache.org.
AMBARI-15114: Parameterize distro-specific stack information for HBASE (Juanjo Marron via dili)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/fa8b4c4e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/fa8b4c4e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/fa8b4c4e

Branch: refs/heads/AMBARI-13364
Commit: fa8b4c4e3981c82bf9930526f3bee8581da0524b
Parents: 6900539
Author: Di Li <di...@apache.org>
Authored: Mon Feb 29 10:32:35 2016 -0500
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Wed Mar 9 15:36:28 2016 -0800

----------------------------------------------------------------------
 .../0.96.0.2.0/package/scripts/hbase_client.py  |  5 +--
 .../0.96.0.2.0/package/scripts/hbase_master.py  |  3 +-
 .../package/scripts/hbase_regionserver.py       |  3 +-
 .../0.96.0.2.0/package/scripts/params_linux.py  | 33 +++++++++++---------
 .../package/scripts/phoenix_queryserver.py      |  7 +++--
 .../package/scripts/phoenix_service.py          |  2 +-
 .../0.96.0.2.0/package/scripts/status_params.py | 10 ++++--
 .../HBASE/0.96.0.2.0/package/scripts/upgrade.py |  2 +-
 .../HDP/2.0.6/configuration/cluster-env.xml     | 10 ++++++
 9 files changed, 48 insertions(+), 27 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/fa8b4c4e/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_client.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_client.py
index c31bbf6..2f59e4b 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_client.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_client.py
@@ -51,13 +51,14 @@ class HbaseClientWindows(HbaseClient):
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class HbaseClientDefault(HbaseClient):
   def get_stack_to_component(self):
-    return {"HDP": "hbase-client"}
+    import params
+    return {params.stack_name: "hbase-client"}
 
   def pre_upgrade_restart(self, env, upgrade_type=None):
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), params.stack_version_ru_support) >= 0:
       conf_select.select(params.stack_name, "hbase", params.version)
       stack_select.select("hbase-client", params.version)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/fa8b4c4e/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py
index 9515f61..5833773 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py
@@ -71,7 +71,8 @@ class HbaseMasterWindows(HbaseMaster):
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class HbaseMasterDefault(HbaseMaster):
   def get_stack_to_component(self):
-    return {"HDP": "hbase-master"}
+    import params
+    return {params.stack_name: "hbase-master"}
 
   def pre_upgrade_restart(self, env, upgrade_type=None):
     import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/fa8b4c4e/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_regionserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_regionserver.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_regionserver.py
index c17d219..37dd0a1 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_regionserver.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_regionserver.py
@@ -69,7 +69,8 @@ class HbaseRegionServerWindows(HbaseRegionServer):
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class HbaseRegionServerDefault(HbaseRegionServer):
   def get_stack_to_component(self):
-    return {"HDP": "hbase-regionserver"}
+    import params
+    return {params.stack_name: "hbase-regionserver"}
 
   def pre_upgrade_restart(self, env, upgrade_type=None):
     import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/fa8b4c4e/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
index 63e548a..6a2abab 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
@@ -43,15 +43,18 @@ exec_tmp_dir = Script.get_tmp_dir()
 sudo = AMBARI_SUDO_BINARY
 
 stack_name = default("/hostLevelParams/stack_name", None)
+stack_dir = config['configurations']['cluster-env']['stack_dir']
 agent_stack_retry_on_unavailability = cbool(config["hostLevelParams"]["agent_stack_retry_on_unavailability"])
 agent_stack_retry_count = cint(config["hostLevelParams"]["agent_stack_retry_count"])
-
 version = default("/commandParams/version", None)
 component_directory = status_params.component_directory
 etc_prefix_dir = "/etc/hbase"
 
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
 stack_version_formatted = format_stack_version(stack_version_unformatted)
+stack_version_ru_support = config['configurations']['cluster-env']['stack_version_ru_support']
+stack_version_ranger_support = config['configurations']['cluster-env']['stack_version_ranger_support']
+stack_version_phoenix_support = config['configurations']['cluster-env']['stack_version_phoenix_support']
 
 # hadoop default parameters
 hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
@@ -62,19 +65,19 @@ region_drainer = "/usr/lib/hbase/bin/draining_servers.rb"
 hbase_cmd = "/usr/lib/hbase/bin/hbase"
 hbase_max_direct_memory_size = None
 
-# hadoop parameters for 2.2+
-if Script.is_stack_greater_or_equal("2.2"):
-  daemon_script = format('/usr/hdp/current/hbase-client/bin/hbase-daemon.sh')
-  region_mover = format('/usr/hdp/current/hbase-client/bin/region_mover.rb')
-  region_drainer = format('/usr/hdp/current/hbase-client/bin/draining_servers.rb')
-  hbase_cmd = format('/usr/hdp/current/hbase-client/bin/hbase')
+# hadoop parameters for stack_version_ru_support+
+if Script.is_stack_greater_or_equal(stack_version_ru_support):
+  daemon_script = format('{stack_dir}/current/hbase-client/bin/hbase-daemon.sh')
+  region_mover = format('{stack_dir}/current/hbase-client/bin/region_mover.rb')
+  region_drainer = format('{stack_dir}/current/hbase-client/bin/draining_servers.rb')
+  hbase_cmd = format('{stack_dir}/current/hbase-client/bin/hbase')
 
   hbase_max_direct_memory_size  = default('configurations/hbase-env/hbase_max_direct_memory_size', None)
 
-  daemon_script=format("/usr/hdp/current/{component_directory}/bin/hbase-daemon.sh")
-  region_mover = format("/usr/hdp/current/{component_directory}/bin/region_mover.rb")
-  region_drainer = format("/usr/hdp/current/{component_directory}/bin/draining_servers.rb")
-  hbase_cmd = format("/usr/hdp/current/{component_directory}/bin/hbase")
+  daemon_script=format("{stack_dir}/current/{component_directory}/bin/hbase-daemon.sh")
+  region_mover = format("{stack_dir}/current/{component_directory}/bin/region_mover.rb")
+  region_drainer = format("{stack_dir}/current/{component_directory}/bin/draining_servers.rb")
+  hbase_cmd = format("{stack_dir}/current/{component_directory}/bin/hbase")
 
 
 hbase_conf_dir = status_params.hbase_conf_dir
@@ -84,7 +87,7 @@ hbase_user_nofile_limit = default("/configurations/hbase-env/hbase_user_nofile_l
 hbase_user_nproc_limit = default("/configurations/hbase-env/hbase_user_nproc_limit", "16000")
 
 # no symlink for phoenix-server at this point
-phx_daemon_script = '/usr/hdp/current/phoenix-server/bin/queryserver.py'
+phx_daemon_script = '{stack_dir}/current/phoenix-server/bin/queryserver.py'
 
 hbase_excluded_hosts = config['commandParams']['excluded_hosts']
 hbase_drain_only = default("/commandParams/mark_draining_only",False)
@@ -311,7 +314,7 @@ if has_ranger_admin:
 
   downloaded_custom_connector = format("{exec_tmp_dir}/{jdbc_jar_name}")
   driver_curl_source = format("{jdk_location}/{jdbc_symlink_name}")
-  driver_curl_target = format("/usr/hdp/current/{component_directory}/lib/{jdbc_jar_name}")
+  driver_curl_target = format("{stack_dir}/current/{component_directory}/lib/{jdbc_jar_name}")
 
   hbase_ranger_plugin_config = {
     'username': repo_config_username,
@@ -351,11 +354,11 @@ if security_enabled:
     hbase_coprocessor_master_classes = "org.apache.hadoop.hbase.security.access.AccessController"
     hbase_coprocessor_regionserver_classes = "org.apache.hadoop.hbase.security.access.AccessController"
     hbase_coprocessor_region_classes = "org.apache.hadoop.hbase.security.token.TokenProvider,org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint,org.apache.hadoop.hbase.security.access.AccessController"
-  elif xml_configurations_supported: # HDP stack 2.3+ ranger plugin enabled
+  elif xml_configurations_supported: # stack_version_ranger_support+
     hbase_coprocessor_master_classes = "org.apache.ranger.authorization.hbase.RangerAuthorizationCoprocessor "
     hbase_coprocessor_regionserver_classes = "org.apache.ranger.authorization.hbase.RangerAuthorizationCoprocessor"
     hbase_coprocessor_region_classes = "org.apache.hadoop.hbase.security.token.TokenProvider,org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint,org.apache.ranger.authorization.hbase.RangerAuthorizationCoprocessor"
-  else: # HDP Stack 2.2 and less / ranger plugin enabled
+  else: # stack_version_ru_support and less / ranger plugin enabled
     hbase_coprocessor_master_classes = "com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor"
     hbase_coprocessor_regionserver_classes = "com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor"
     hbase_coprocessor_region_classes = "org.apache.hadoop.hbase.security.token.TokenProvider,org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint,com.xasecure.authorization.hbase.XaSecureAuthorizationCoprocessor"

http://git-wip-us.apache.org/repos/asf/ambari/blob/fa8b4c4e/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/phoenix_queryserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/phoenix_queryserver.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/phoenix_queryserver.py
index 87e4899..c28fa7d 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/phoenix_queryserver.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/phoenix_queryserver.py
@@ -23,7 +23,7 @@ from resource_management.libraries.script import Script
 from phoenix_service import phoenix_service
 from hbase import hbase
 
-# Note: Phoenix Query Server is only applicable to HDP-2.3 and above.
+# Note: Phoenix Query Server is only applicable to stack_version_phoenix_support and above.
 class PhoenixQueryServer(Script):
 
   def install(self, env):
@@ -33,7 +33,8 @@ class PhoenixQueryServer(Script):
 
 
   def get_stack_to_component(self):
-    return {"HDP": "phoenix-server"}
+    import params
+    return {params.stack_name: "phoenix-server"}
 
 
   def configure(self, env):
@@ -59,7 +60,7 @@ class PhoenixQueryServer(Script):
     import params
     env.set_params(params)
 
-    if Script.is_stack_greater_or_equal("2.3"):
+    if Script.is_stack_greater_or_equal(params.stack_version_phoenix_support):
       # phoenix uses hbase configs
       conf_select.select(params.stack_name, "hbase", params.version)
       stack_select.select("phoenix-server", params.version)

http://git-wip-us.apache.org/repos/asf/ambari/blob/fa8b4c4e/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/phoenix_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/phoenix_service.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/phoenix_service.py
index e155cec..33c880c 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/phoenix_service.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/phoenix_service.py
@@ -22,7 +22,7 @@ from resource_management.core.resources.system import Execute
 from resource_management.core.resources.system import File
 from resource_management.libraries.functions import check_process_status, format
 
-# Note: Phoenix Query Server is only applicable to HDP-2.3 and above.
+# Note: Phoenix Query Server is only applicable to stack_version_phoenix_support and above.
 def phoenix_service(action = 'start'): # 'start', 'stop', 'status'
     # Note: params/status_params should already be imported before calling phoenix_service()
     pid_file = format("{pid_dir}/phoenix-{hbase_user}-server.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/fa8b4c4e/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/status_params.py
index 535c821..887f50f 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/status_params.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/status_params.py
@@ -25,7 +25,7 @@ from resource_management.libraries.functions import get_kinit_path
 from resource_management.libraries.script.script import Script
 
 # a map of the Ambari role to the component name
-# for use with /usr/hdp/current/<component>
+# for use with <stack_dir>/current/<component>
 SERVER_ROLE_DIRECTORY_MAP = {
   'HBASE_MASTER' : 'hbase-master',
   'HBASE_REGIONSERVER' : 'hbase-regionserver',
@@ -49,7 +49,11 @@ else:
   kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
   tmp_dir = Script.get_tmp_dir()
 
+  # Stack related params
+  stack_version_ru_support = config['configurations']['cluster-env']['stack_version_ru_support']
+  stack_dir = config['configurations']['cluster-env']['stack_dir']
+
   hbase_conf_dir = "/etc/hbase/conf"
   limits_conf_dir = "/etc/security/limits.d"
-  if Script.is_stack_greater_or_equal("2.2"):
-    hbase_conf_dir = format("/usr/hdp/current/{component_directory}/conf")
\ No newline at end of file
+  if Script.is_stack_greater_or_equal(stack_version_ru_support):
+    hbase_conf_dir = format("{stack_dir}/current/{component_directory}/conf")

http://git-wip-us.apache.org/repos/asf/ambari/blob/fa8b4c4e/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/upgrade.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/upgrade.py
index 92c0f70..467139f 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/upgrade.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/upgrade.py
@@ -29,7 +29,7 @@ from resource_management.libraries.functions.decorator import retry
 def prestart(env, stack_component):
   import params
 
-  if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
+  if params.version and compare_versions(format_stack_version(params.version), params.stack_version_ru_support) >= 0:
     conf_select.select(params.stack_name, "hbase", params.version)
     stack_select.select(stack_component, params.version)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/fa8b4c4e/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
index 3bef518..75b499f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
@@ -57,6 +57,16 @@
         <description>Stack version from which hadoop-hdfs-nfs3 is supported</description>
     </property>
     <property>
+        <name>stack_version_ranger_support</name>
+        <value>2.3.0.0</value>
+        <description>Stack version from which ranger is supported</description>
+    </property>
+    <property>
+        <name>stack_version_phoenix_support</name>
+        <value>2.3.0.0</value>
+        <description>Stack version from which Phoenix Query Server is supported</description>
+    </property>
+    <property>
         <name>stack_version_oozie_preconfiguration_support</name>
         <value>2.3.0.0</value>
         <description>Stack version from which oozie-server special case in the PRE-UPGRADE phase is supported</description>


[41/51] [abbrv] ambari git commit: AMBARI-15293. Capture HDFS metrics per RPC port number in AMS and Grafana. Fix for FLUME metrics. (swagle)

Posted by jl...@apache.org.
AMBARI-15293. Capture HDFS metrics per RPC port number in AMS and Grafana. Fix for FLUME metrics. (swagle)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/cbc3f1a9
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/cbc3f1a9
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/cbc3f1a9

Branch: refs/heads/AMBARI-13364
Commit: cbc3f1a900367d27ed69748cbec31e1b73bb28be
Parents: c0d0741
Author: Siddharth Wagle <sw...@hortonworks.com>
Authored: Wed Mar 9 13:10:43 2016 -0800
Committer: Siddharth Wagle <sw...@hortonworks.com>
Committed: Wed Mar 9 13:10:43 2016 -0800

----------------------------------------------------------------------
 .../server/api/services/AmbariMetaInfo.java     | 24 +++++++++++---------
 .../2.0.6/hooks/before-START/scripts/params.py  |  4 ++--
 2 files changed, 15 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/cbc3f1a9/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java b/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
index 2b863d5..4be4049 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
@@ -951,17 +951,19 @@ public class AmbariMetaInfo {
 
   private Map<String, Metric> getAggregateFunctionMetrics(String metricName, Metric currentMetric) {
     Map<String, Metric> newMetrics = new HashMap<String, Metric>();
-    // For every function id
-    for (String identifierToAdd : AGGREGATE_FUNCTION_IDENTIFIERS) {
-      String newMetricKey = metricName + identifierToAdd;
-      Metric newMetric = new Metric(
-        currentMetric.getName() + identifierToAdd,
-        currentMetric.isPointInTime(),
-        currentMetric.isTemporal(),
-        currentMetric.isAmsHostMetric(),
-        currentMetric.getUnit()
-      );
-      newMetrics.put(newMetricKey, newMetric);
+    if (!PropertyHelper.hasAggregateFunctionSuffix(currentMetric.getName())) {
+      // For every function id
+      for (String identifierToAdd : AGGREGATE_FUNCTION_IDENTIFIERS) {
+        String newMetricKey = metricName + identifierToAdd;
+        Metric newMetric = new Metric(
+          currentMetric.getName() + identifierToAdd,
+          currentMetric.isPointInTime(),
+          currentMetric.isTemporal(),
+          currentMetric.isAmsHostMetric(),
+          currentMetric.getUnit()
+        );
+        newMetrics.put(newMetricKey, newMetric);
+      }
     }
 
     return newMetrics;

http://git-wip-us.apache.org/repos/asf/ambari/blob/cbc3f1a9/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
index f6f56df..a19d969 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
@@ -275,8 +275,8 @@ if namenode_rpc:
  nn_rpc_client_port = namenode_rpc.split(':')[1].strip()
 
 if dfs_ha_enabled:
- dfs_service_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.servicerpc-address.{namenode_id}'), None)
- dfs_lifeline_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.lifeline.rpc-address.{namenode_id}'), None)
+ dfs_service_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.servicerpc-address.{dfs_ha_nameservices}.{namenode_id}'), None)
+ dfs_lifeline_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.lifeline.rpc-address.{dfs_ha_nameservices}.{namenode_id}'), None)
 else:
  dfs_service_rpc_address = default('/configurations/hdfs-site/dfs.namenode.servicerpc-address', None)
  dfs_lifeline_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.lifeline.rpc-address'), None)


[15/51] [abbrv] ambari git commit: AMBARI-15324 - Kerberos Tickets Expire Too Frequently For Alerts (jonathanhurley)

Posted by jl...@apache.org.
AMBARI-15324 - Kerberos Tickets Expire Too Frequently For Alerts (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/2efe8945
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/2efe8945
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/2efe8945

Branch: refs/heads/AMBARI-13364
Commit: 2efe8945c152fa5d4bdc51bf1828ca0ade4a004e
Parents: 8615cac
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Mon Mar 7 17:36:13 2016 -0500
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Tue Mar 8 15:53:52 2016 -0500

----------------------------------------------------------------------
 ambari-agent/conf/unix/ambari-agent.ini         |  1 +
 ambari-agent/conf/windows/ambari-agent.ini      |  1 +
 .../ambari_agent/AlertSchedulerHandler.py       | 13 +--
 .../src/main/python/ambari_agent/Controller.py  |  7 +-
 .../python/ambari_agent/alerts/base_alert.py    |  7 +-
 .../python/ambari_agent/alerts/metric_alert.py  | 11 ++-
 .../python/ambari_agent/alerts/port_alert.py    |  4 +-
 .../ambari_agent/alerts/recovery_alert.py       |  4 +-
 .../python/ambari_agent/alerts/script_alert.py  | 13 ++-
 .../python/ambari_agent/alerts/web_alert.py     | 11 +--
 .../ambari_agent/TestAlertSchedulerHandler.py   | 28 ++++---
 .../src/test/python/ambari_agent/TestAlerts.py  | 83 ++++++++++---------
 .../test/python/ambari_agent/TestBaseAlert.py   | 20 +++--
 .../test/python/ambari_agent/TestMetricAlert.py | 11 ++-
 .../test/python/ambari_agent/TestPortAlert.py   | 20 +++--
 .../test/python/ambari_agent/TestScriptAlert.py |  7 +-
 .../libraries/functions/curl_krb_request.py     | 86 +++++++++++++++++---
 .../package/alerts/alert_checkpoint_time.py     | 10 ++-
 .../package/alerts/alert_ha_namenode_health.py  |  7 +-
 .../package/alerts/alert_metrics_deviation.py   | 16 +++-
 .../package/alerts/alert_upgrade_finalized.py   |  9 +-
 .../package/alerts/alert_webhcat_server.py      | 18 ++--
 .../package/alerts/alert_nodemanager_health.py  |  6 +-
 .../alerts/alert_nodemanagers_summary.py        | 10 ++-
 24 files changed, 270 insertions(+), 133 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/2efe8945/ambari-agent/conf/unix/ambari-agent.ini
----------------------------------------------------------------------
diff --git a/ambari-agent/conf/unix/ambari-agent.ini b/ambari-agent/conf/unix/ambari-agent.ini
index 05e898a..4ec16d6 100644
--- a/ambari-agent/conf/unix/ambari-agent.ini
+++ b/ambari-agent/conf/unix/ambari-agent.ini
@@ -32,6 +32,7 @@ tolerate_download_failures=true
 run_as_user=root
 parallel_execution=0
 alert_grace_period=5
+alert_kinit_timeout=14400000
 system_resource_overrides=/etc/resource_overrides
 
 [security]

http://git-wip-us.apache.org/repos/asf/ambari/blob/2efe8945/ambari-agent/conf/windows/ambari-agent.ini
----------------------------------------------------------------------
diff --git a/ambari-agent/conf/windows/ambari-agent.ini b/ambari-agent/conf/windows/ambari-agent.ini
index e490f7c..df88be6 100644
--- a/ambari-agent/conf/windows/ambari-agent.ini
+++ b/ambari-agent/conf/windows/ambari-agent.ini
@@ -30,6 +30,7 @@ cache_dir=cache
 tolerate_download_failures=true
 parallel_execution=0
 alert_grace_period=5
+alert_kinit_timeout=14400000
 system_resource_overrides=\\etc\\resource_overrides
 
 [security]

http://git-wip-us.apache.org/repos/asf/ambari/blob/2efe8945/ambari-agent/src/main/python/ambari_agent/AlertSchedulerHandler.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/AlertSchedulerHandler.py b/ambari-agent/src/main/python/ambari_agent/AlertSchedulerHandler.py
index eb9945b..b84832d 100644
--- a/ambari-agent/src/main/python/ambari_agent/AlertSchedulerHandler.py
+++ b/ambari-agent/src/main/python/ambari_agent/AlertSchedulerHandler.py
@@ -35,6 +35,7 @@ from alerts.script_alert import ScriptAlert
 from alerts.web_alert import WebAlert
 from alerts.recovery_alert import RecoveryAlert
 from ambari_agent.ExitHelper import ExitHelper
+
 logger = logging.getLogger(__name__)
 
 class AlertSchedulerHandler():
@@ -46,8 +47,7 @@ class AlertSchedulerHandler():
   TYPE_RECOVERY = 'RECOVERY'
 
   def __init__(self, cachedir, stacks_dir, common_services_dir, host_scripts_dir,
-      alert_grace_period, cluster_configuration, config, recovery_manager,
-      in_minutes=True):
+      cluster_configuration, config, recovery_manager, in_minutes=True):
 
     self.cachedir = cachedir
     self.stacks_dir = stacks_dir
@@ -58,7 +58,10 @@ class AlertSchedulerHandler():
 
     # a mapping between a cluster name and a unique hash for all definitions
     self._cluster_hashes = {}
-    
+
+    # the amount of time, in seconds, that an alert can run after it's scheduled time
+    alert_grace_period = int(config.get('agent', 'alert_grace_period', 5))
+
     if not os.path.exists(cachedir):
       try:
         os.makedirs(cachedir)
@@ -297,7 +300,7 @@ class AlertSchedulerHandler():
       if source_type == AlertSchedulerHandler.TYPE_METRIC:
         alert = MetricAlert(json_definition, source, self.config)
       elif source_type == AlertSchedulerHandler.TYPE_PORT:
-        alert = PortAlert(json_definition, source)
+        alert = PortAlert(json_definition, source, self.config)
       elif source_type == AlertSchedulerHandler.TYPE_SCRIPT:
         source['stacks_directory'] = self.stacks_dir
         source['common_services_directory'] = self.common_services_dir
@@ -306,7 +309,7 @@ class AlertSchedulerHandler():
       elif source_type == AlertSchedulerHandler.TYPE_WEB:
         alert = WebAlert(json_definition, source, self.config)
       elif source_type == AlertSchedulerHandler.TYPE_RECOVERY:
-        alert = RecoveryAlert(json_definition, source, self.recovery_manger)
+        alert = RecoveryAlert(json_definition, source, self.config, self.recovery_manger)
 
       if alert is not None:
         alert.set_cluster(clusterName, hostName)

http://git-wip-us.apache.org/repos/asf/ambari/blob/2efe8945/ambari-agent/src/main/python/ambari_agent/Controller.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/Controller.py b/ambari-agent/src/main/python/ambari_agent/Controller.py
index eb2c363..c1c16ac 100644
--- a/ambari-agent/src/main/python/ambari_agent/Controller.py
+++ b/ambari-agent/src/main/python/ambari_agent/Controller.py
@@ -103,12 +103,9 @@ class Controller(threading.Thread):
 
     self.move_data_dir_mount_file()
 
-    self.alert_grace_period = int(config.get('agent', 'alert_grace_period', 5))
-
-    self.alert_scheduler_handler = AlertSchedulerHandler(alerts_cache_dir, 
+    self.alert_scheduler_handler = AlertSchedulerHandler(alerts_cache_dir,
       stacks_cache_dir, common_services_cache_dir, host_scripts_cache_dir,
-      self.alert_grace_period, self.cluster_configuration, config,
-      self.recovery_manager)
+      self.cluster_configuration, config, self.recovery_manager)
 
     self.alert_scheduler_handler.start()
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/2efe8945/ambari-agent/src/main/python/ambari_agent/alerts/base_alert.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/alerts/base_alert.py b/ambari-agent/src/main/python/ambari_agent/alerts/base_alert.py
index fd6b03c..92db07c 100644
--- a/ambari-agent/src/main/python/ambari_agent/alerts/base_alert.py
+++ b/ambari-agent/src/main/python/ambari_agent/alerts/base_alert.py
@@ -29,6 +29,9 @@ logger = logging.getLogger()
 AlertUri = namedtuple('AlertUri', 'uri is_ssl_enabled')
 
 class BaseAlert(object):
+  # will force a kinit even if klist says there are valid tickets (4 hour default)
+  _DEFAULT_KINIT_TIMEOUT = 14400000
+
   RESULT_OK = "OK"
   RESULT_WARNING = "WARNING"
   RESULT_CRITICAL = "CRITICAL"
@@ -38,12 +41,12 @@ class BaseAlert(object):
   HA_NAMESERVICE_PARAM = "{{ha-nameservice}}"
   HA_ALIAS_PARAM = "{{alias}}"
 
-  def __init__(self, alert_meta, alert_source_meta):
+  def __init__(self, alert_meta, alert_source_meta, config):
     self.alert_meta = alert_meta
     self.alert_source_meta = alert_source_meta
     self.cluster_name = ''
     self.host_name = ''
-    
+    self.config = config
     
   def interval(self):
     """ gets the defined interval this check should run """

http://git-wip-us.apache.org/repos/asf/ambari/blob/2efe8945/ambari-agent/src/main/python/ambari_agent/alerts/metric_alert.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/alerts/metric_alert.py b/ambari-agent/src/main/python/ambari_agent/alerts/metric_alert.py
index b2f4e33..d177bd4 100644
--- a/ambari-agent/src/main/python/ambari_agent/alerts/metric_alert.py
+++ b/ambari-agent/src/main/python/ambari_agent/alerts/metric_alert.py
@@ -42,7 +42,7 @@ DEFAULT_CONNECTION_TIMEOUT = 5.0
 class MetricAlert(BaseAlert):
   
   def __init__(self, alert_meta, alert_source_meta, config):
-    super(MetricAlert, self).__init__(alert_meta, alert_source_meta)
+    super(MetricAlert, self).__init__(alert_meta, alert_source_meta, config)
 
     connection_timeout = DEFAULT_CONNECTION_TIMEOUT
 
@@ -63,7 +63,9 @@ class MetricAlert(BaseAlert):
     self.connection_timeout = float(connection_timeout)
     self.curl_connection_timeout = int(connection_timeout)
 
-    self.config = config
+    # will force a kinit even if klist says there are valid tickets (4 hour default)
+    self.kinit_timeout = long(config.get('agent', 'alert_kinit_timeout', BaseAlert._DEFAULT_KINIT_TIMEOUT))
+
 
   def _collect(self):
     if self.metric_info is None:
@@ -209,7 +211,7 @@ class MetricAlert(BaseAlert):
 
           response, error_msg, time_millis = curl_krb_request(tmp_dir, kerberos_keytab, kerberos_principal, url,
             "metric_alert", kerberos_executable_search_paths, False, self.get_name(), smokeuser,
-            connection_timeout=self.curl_connection_timeout)
+            connection_timeout=self.curl_connection_timeout, kinit_timer_ms = self.kinit_timeout)
 
           content = response
         else:
@@ -254,7 +256,8 @@ class MetricAlert(BaseAlert):
       if not json_is_valid and security_enabled and kerberos_principal is not None and kerberos_keytab is not None:
         http_response_code, error_msg, time_millis = curl_krb_request(tmp_dir, kerberos_keytab,
           kerberos_principal, url, "metric_alert", kerberos_executable_search_paths, True,
-          self.get_name(), smokeuser, connection_timeout=self.curl_connection_timeout)
+          self.get_name(), smokeuser, connection_timeout=self.curl_connection_timeout,
+          kinit_timer_ms = self.kinit_timeout)
 
     return (value_list, http_response_code)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/2efe8945/ambari-agent/src/main/python/ambari_agent/alerts/port_alert.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/alerts/port_alert.py b/ambari-agent/src/main/python/ambari_agent/alerts/port_alert.py
index 92d28ad..1918327 100644
--- a/ambari-agent/src/main/python/ambari_agent/alerts/port_alert.py
+++ b/ambari-agent/src/main/python/ambari_agent/alerts/port_alert.py
@@ -33,8 +33,8 @@ DEFAULT_CRITICAL_TIMEOUT = 5.0
 
 class PortAlert(BaseAlert):
 
-  def __init__(self, alert_meta, alert_source_meta):
-    super(PortAlert, self).__init__(alert_meta, alert_source_meta)
+  def __init__(self, alert_meta, alert_source_meta, config):
+    super(PortAlert, self).__init__(alert_meta, alert_source_meta, config)
 
     self.uri = None
     self.default_port = None

http://git-wip-us.apache.org/repos/asf/ambari/blob/2efe8945/ambari-agent/src/main/python/ambari_agent/alerts/recovery_alert.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/alerts/recovery_alert.py b/ambari-agent/src/main/python/ambari_agent/alerts/recovery_alert.py
index 760a737..3092a39 100644
--- a/ambari-agent/src/main/python/ambari_agent/alerts/recovery_alert.py
+++ b/ambari-agent/src/main/python/ambari_agent/alerts/recovery_alert.py
@@ -30,8 +30,8 @@ DEFAULT_CRITICAL_RECOVERIES_COUNT = 4
 UNKNOWN_COMPONENT = 'UNKNOWN_COMPONENT'
 class RecoveryAlert(BaseAlert):
 
-  def __init__(self, alert_meta, alert_source_meta, recovery_manager):
-    super(RecoveryAlert, self).__init__(alert_meta, alert_source_meta)
+  def __init__(self, alert_meta, alert_source_meta, config, recovery_manager):
+    super(RecoveryAlert, self).__init__(alert_meta, alert_source_meta, config)
 
     self.recovery_manager = recovery_manager
     self.warning_count = DEFAULT_WARNING_RECOVERIES_COUNT

http://git-wip-us.apache.org/repos/asf/ambari/blob/2efe8945/ambari-agent/src/main/python/ambari_agent/alerts/script_alert.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/alerts/script_alert.py b/ambari-agent/src/main/python/ambari_agent/alerts/script_alert.py
index e8d0125..8dfa73e 100644
--- a/ambari-agent/src/main/python/ambari_agent/alerts/script_alert.py
+++ b/ambari-agent/src/main/python/ambari_agent/alerts/script_alert.py
@@ -24,13 +24,15 @@ import os
 import re
 from alerts.base_alert import BaseAlert
 from resource_management.core.environment import Environment
-from resource_management.core.logger import Logger
+from resource_management.libraries.functions.curl_krb_request import KERBEROS_KINIT_TIMER_PARAMETER
 from ambari_agent import Constants
 
 logger = logging.getLogger("ambari_alerts")
 
 class ScriptAlert(BaseAlert):
+
   def __init__(self, alert_meta, alert_source_meta, config):
+
     """ ScriptAlert reporting structure is output from the script itself """
 
     alert_source_meta['reporting'] = {
@@ -40,9 +42,8 @@ class ScriptAlert(BaseAlert):
       'unknown': { 'text': '{0}' }
     }
 
-    super(ScriptAlert, self).__init__(alert_meta, alert_source_meta)
+    super(ScriptAlert, self).__init__(alert_meta, alert_source_meta, config)
 
-    self.config = config
     self.path = None
     self.stacks_dir = None
     self.common_services_dir = None
@@ -50,6 +51,9 @@ class ScriptAlert(BaseAlert):
     self.path_to_script = None
     self.parameters = {}
 
+    # will force a kinit even if klist says there are valid tickets (4 hour default)
+    self.kinit_timeout = long(config.get('agent', 'alert_kinit_timeout', BaseAlert._DEFAULT_KINIT_TIMEOUT))
+
     if 'path' in alert_source_meta:
       self.path = alert_source_meta['path']
 
@@ -75,6 +79,9 @@ class ScriptAlert(BaseAlert):
         parameter_value = parameter['value']
         self.parameters[parameter_name] = parameter_value
 
+    # pass in some basic parameters to the scripts
+    self.parameters[KERBEROS_KINIT_TIMER_PARAMETER] = self.kinit_timeout
+
   def _collect(self):
     cmd_module = self._load_source()
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/2efe8945/ambari-agent/src/main/python/ambari_agent/alerts/web_alert.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/ambari_agent/alerts/web_alert.py b/ambari-agent/src/main/python/ambari_agent/alerts/web_alert.py
index 502526c..3f201c8 100644
--- a/ambari-agent/src/main/python/ambari_agent/alerts/web_alert.py
+++ b/ambari-agent/src/main/python/ambari_agent/alerts/web_alert.py
@@ -20,13 +20,13 @@ limitations under the License.
 
 import logging
 import time
-import os
 import urllib2
 import ssl
+
 from functools import wraps
 from urllib2 import HTTPError
 
-from  tempfile import gettempdir
+from tempfile import gettempdir
 from alerts.base_alert import BaseAlert
 from collections import namedtuple
 from resource_management.libraries.functions.get_port_from_url import get_port_from_url
@@ -66,7 +66,7 @@ ssl.wrap_socket = sslwrap(ssl.wrap_socket)
 class WebAlert(BaseAlert):
 
   def __init__(self, alert_meta, alert_source_meta, config):
-    super(WebAlert, self).__init__(alert_meta, alert_source_meta)
+    super(WebAlert, self).__init__(alert_meta, alert_source_meta, config)
 
     connection_timeout = DEFAULT_CONNECTION_TIMEOUT
 
@@ -83,7 +83,8 @@ class WebAlert(BaseAlert):
     self.connection_timeout = float(connection_timeout)
     self.curl_connection_timeout = int(connection_timeout)
 
-    self.config = config
+    # will force a kinit even if klist says there are valid tickets (4 hour default)
+    self.kinit_timeout = long(config.get('agent', 'alert_kinit_timeout', BaseAlert._DEFAULT_KINIT_TIMEOUT))
 
 
   def _collect(self):
@@ -194,7 +195,7 @@ class WebAlert(BaseAlert):
 
         response_code, error_msg, time_millis = curl_krb_request(tmp_dir, kerberos_keytab, kerberos_principal, url,
           "web_alert", kerberos_executable_search_paths, True, self.get_name(), smokeuser,
-          connection_timeout=self.curl_connection_timeout)
+          connection_timeout=self.curl_connection_timeout, kinit_timer_ms = self.kinit_timeout)
       else:
         # kerberos is not involved; use urllib2
         response_code, time_millis, error_msg = self._make_web_request_urllib(url)

http://git-wip-us.apache.org/repos/asf/ambari/blob/2efe8945/ambari-agent/src/test/python/ambari_agent/TestAlertSchedulerHandler.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestAlertSchedulerHandler.py b/ambari-agent/src/test/python/ambari_agent/TestAlertSchedulerHandler.py
index 9fd426f..f4e7ba1 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestAlertSchedulerHandler.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestAlertSchedulerHandler.py
@@ -24,9 +24,10 @@ import os
 from ambari_agent.AlertSchedulerHandler import AlertSchedulerHandler
 from ambari_agent.alerts.metric_alert import MetricAlert
 from ambari_agent.alerts.port_alert import PortAlert
-from ambari_agent.alerts.script_alert import ScriptAlert
 from ambari_agent.alerts.web_alert import WebAlert
 
+from AmbariConfig import AmbariConfig
+
 from mock.mock import patch, Mock, MagicMock
 from unittest import TestCase
 
@@ -34,6 +35,9 @@ TEST_PATH = os.path.join('ambari_agent', 'dummy_files')
 
 class TestAlertSchedulerHandler(TestCase):
 
+  def setUp(self):
+    self.config = AmbariConfig()
+
   def test_load_definitions(self):
     scheduler = AlertSchedulerHandler(TEST_PATH, TEST_PATH, TEST_PATH, TEST_PATH, None)
 
@@ -42,7 +46,7 @@ class TestAlertSchedulerHandler(TestCase):
     self.assertEquals(len(definitions), 1)
 
   def test_json_to_callable_metric(self):
-    scheduler = AlertSchedulerHandler(TEST_PATH, TEST_PATH, TEST_PATH, TEST_PATH, 5, None, None, None)
+    scheduler = AlertSchedulerHandler(TEST_PATH, TEST_PATH, TEST_PATH, TEST_PATH, None, self.config, None)
     json_definition = {
       'source': {
         'type': 'METRIC'
@@ -63,7 +67,7 @@ class TestAlertSchedulerHandler(TestCase):
       }
     }
 
-    scheduler = AlertSchedulerHandler(TEST_PATH, TEST_PATH, TEST_PATH, TEST_PATH, 5, None, None, None)
+    scheduler = AlertSchedulerHandler(TEST_PATH, TEST_PATH, TEST_PATH, TEST_PATH, None, self.config, None)
     callable_result = scheduler._AlertSchedulerHandler__json_to_callable('cluster', 'host', copy.deepcopy(json_definition))
 
     self.assertTrue(callable_result is not None)
@@ -79,7 +83,7 @@ class TestAlertSchedulerHandler(TestCase):
       }
     }
 
-    scheduler = AlertSchedulerHandler(TEST_PATH, TEST_PATH, TEST_PATH, TEST_PATH, 5, None, None, None)
+    scheduler = AlertSchedulerHandler(TEST_PATH, TEST_PATH, TEST_PATH, TEST_PATH, None, self.config, None)
     callable_result = scheduler._AlertSchedulerHandler__json_to_callable('cluster', 'host', copy.deepcopy(json_definition))
 
     self.assertTrue(callable_result is not None)
@@ -94,7 +98,7 @@ class TestAlertSchedulerHandler(TestCase):
       }
     }
 
-    scheduler = AlertSchedulerHandler(TEST_PATH, TEST_PATH, TEST_PATH, TEST_PATH, 5, None, None, None)
+    scheduler = AlertSchedulerHandler(TEST_PATH, TEST_PATH, TEST_PATH, TEST_PATH, None, self.config, None)
     callable_result = scheduler._AlertSchedulerHandler__json_to_callable('cluster', 'host', copy.deepcopy(json_definition))
 
     self.assertTrue(callable_result is None)
@@ -102,7 +106,7 @@ class TestAlertSchedulerHandler(TestCase):
   def test_execute_alert_noneScheduler(self):
     execution_commands = []
 
-    scheduler = AlertSchedulerHandler(TEST_PATH, TEST_PATH, TEST_PATH, TEST_PATH, 5, None, None, None)
+    scheduler = AlertSchedulerHandler(TEST_PATH, TEST_PATH, TEST_PATH, TEST_PATH, None, self.config, None)
     scheduler._AlertSchedulerHandler__scheduler = None
     alert_mock = Mock()
     scheduler._AlertSchedulerHandler__json_to_callable = Mock(return_value=alert_mock)
@@ -114,7 +118,7 @@ class TestAlertSchedulerHandler(TestCase):
   def test_execute_alert_noneCommands(self):
     execution_commands = None
 
-    scheduler = AlertSchedulerHandler(TEST_PATH, TEST_PATH, TEST_PATH, TEST_PATH, 5, None, None, None)
+    scheduler = AlertSchedulerHandler(TEST_PATH, TEST_PATH, TEST_PATH, TEST_PATH, None, self.config, None)
     alert_mock = Mock()
     scheduler._AlertSchedulerHandler__json_to_callable = Mock(return_value=alert_mock)
 
@@ -125,7 +129,7 @@ class TestAlertSchedulerHandler(TestCase):
   def test_execute_alert_emptyCommands(self):
     execution_commands = []
 
-    scheduler = AlertSchedulerHandler(TEST_PATH, TEST_PATH, TEST_PATH, TEST_PATH, 5, None, None, None)
+    scheduler = AlertSchedulerHandler(TEST_PATH, TEST_PATH, TEST_PATH, TEST_PATH, None, self.config, None)
     alert_mock = Mock()
     scheduler._AlertSchedulerHandler__json_to_callable = Mock(return_value=alert_mock)
 
@@ -144,7 +148,7 @@ class TestAlertSchedulerHandler(TestCase):
       }
     ]
 
-    scheduler = AlertSchedulerHandler(TEST_PATH, TEST_PATH, TEST_PATH, TEST_PATH, 5, None, None, None)
+    scheduler = AlertSchedulerHandler(TEST_PATH, TEST_PATH, TEST_PATH, TEST_PATH, None, self.config, None)
     alert_mock = MagicMock()
     alert_mock.collect = Mock()
     alert_mock.set_helpers = Mock()
@@ -159,7 +163,7 @@ class TestAlertSchedulerHandler(TestCase):
     self.assertTrue(alert_mock.collect.called)
 
   def test_load_definitions(self):
-    scheduler = AlertSchedulerHandler(TEST_PATH, TEST_PATH, TEST_PATH, TEST_PATH, 5, None, None, None)
+    scheduler = AlertSchedulerHandler(TEST_PATH, TEST_PATH, TEST_PATH, TEST_PATH, None, self.config, None)
     scheduler._AlertSchedulerHandler__config_maps = {
       'cluster': {}
     }
@@ -170,7 +174,7 @@ class TestAlertSchedulerHandler(TestCase):
     self.assertTrue(isinstance(alert_def, PortAlert))
 
   def test_load_definitions_noFile(self):
-    scheduler = AlertSchedulerHandler('wrong_path', 'wrong_path', 'wrong_path', 'wrong_path', 5, None, None, None)
+    scheduler = AlertSchedulerHandler('wrong_path', 'wrong_path', 'wrong_path', 'wrong_path', None, self.config, None)
     scheduler._AlertSchedulerHandler__config_maps = {
       'cluster': {}
     }
@@ -190,7 +194,7 @@ class TestAlertSchedulerHandler(TestCase):
       }
     ]
 
-    scheduler = AlertSchedulerHandler(TEST_PATH, TEST_PATH, TEST_PATH, TEST_PATH, 5, None, None, None)
+    scheduler = AlertSchedulerHandler(TEST_PATH, TEST_PATH, TEST_PATH, TEST_PATH, None, self.config, None)
     alert_mock = MagicMock()
     alert_mock.interval = Mock(return_value=5)
     alert_mock.collect = Mock()

http://git-wip-us.apache.org/repos/asf/ambari/blob/2efe8945/ambari-agent/src/test/python/ambari_agent/TestAlerts.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestAlerts.py b/ambari-agent/src/test/python/ambari_agent/TestAlerts.py
index 8344238..bf56703 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestAlerts.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestAlerts.py
@@ -41,11 +41,14 @@ from collections import namedtuple
 from mock.mock import MagicMock, patch
 from unittest import TestCase
 
+from AmbariConfig import AmbariConfig
+
 class TestAlerts(TestCase):
 
   def setUp(self):
     # save original open() method for later use
     self.original_open = open
+    self.config = AmbariConfig()
 
   def tearDown(self):
     sys.stdout == sys.__stdout__
@@ -62,8 +65,8 @@ class TestAlerts(TestCase):
     cluster_configuration = self.__get_cluster_configuration()
 
     ash = AlertSchedulerHandler(test_file_path, test_stack_path,
-      test_common_services_path, test_host_scripts_path, 5, cluster_configuration,
-      None, None)
+      test_common_services_path, test_host_scripts_path, cluster_configuration,
+      self.config, None)
 
     ash.start()
 
@@ -89,7 +92,7 @@ class TestAlerts(TestCase):
       0,2000,336283100000,
       socket.timeout,336283200000]
 
-    alert = PortAlert(definition_json, definition_json['source'])
+    alert = PortAlert(definition_json, definition_json['source'], self.config)
     alert.set_helpers(collector, cluster_configuration)
     alert.set_cluster("c1", "c6401.ambari.apache.org")
     self.assertEquals(6, alert.interval())
@@ -134,7 +137,7 @@ class TestAlerts(TestCase):
     self.__update_cluster_configuration(cluster_configuration, {})
 
     rm = RecoveryManager(tempfile.mktemp(), True)
-    alert = RecoveryAlert(definition_json, definition_json['source'], rm)
+    alert = RecoveryAlert(definition_json, definition_json['source'], self.config, rm)
     alert.set_helpers(collector, cluster_configuration)
     alert.set_cluster("c1", "c6401.ambari.apache.org")
     self.assertEquals(1, alert.interval())
@@ -221,7 +224,7 @@ class TestAlerts(TestCase):
     cluster_configuration = self.__get_cluster_configuration()
     self.__update_cluster_configuration(cluster_configuration, configuration)
 
-    alert = PortAlert(definition_json, definition_json['source'])
+    alert = PortAlert(definition_json, definition_json['source'], self.config)
     alert.set_helpers(collector, cluster_configuration)
     alert.set_cluster("c1", "c6402.ambari.apache.org")
 
@@ -266,7 +269,7 @@ class TestAlerts(TestCase):
 
     cluster_configuration = self.__get_cluster_configuration()
 
-    alert = PortAlert(definition_json, definition_json['source'])
+    alert = PortAlert(definition_json, definition_json['source'], self.config)
     alert.set_helpers(AlertCollector(), cluster_configuration)
     alert.set_cluster("c1", "c6401.ambari.apache.org")
 
@@ -354,7 +357,7 @@ class TestAlerts(TestCase):
     cluster_configuration = self.__get_cluster_configuration()
     self.__update_cluster_configuration(cluster_configuration, configuration)
 
-    alert = MetricAlert(definition_json, definition_json['source'], None)
+    alert = MetricAlert(definition_json, definition_json['source'], self.config)
     alert.set_helpers(collector, cluster_configuration)
     alert.set_cluster("c1", "c6401.ambari.apache.org")
 
@@ -388,7 +391,7 @@ class TestAlerts(TestCase):
     del definition_json['source']['jmx']['value']
     collector = AlertCollector()
 
-    alert = MetricAlert(definition_json, definition_json['source'], None)
+    alert = MetricAlert(definition_json, definition_json['source'], self.config)
     alert.set_helpers(collector, cluster_configuration)
     alert.set_cluster("c1", "c6401.ambari.apache.org")
 
@@ -412,7 +415,7 @@ class TestAlerts(TestCase):
     # indicating that there was no URI and the result is UNKNOWN
     collector = AlertCollector()
     cluster_configuration = self.__get_cluster_configuration()
-    alert = MetricAlert(definition_json, definition_json['source'], None)
+    alert = MetricAlert(definition_json, definition_json['source'], self.config)
     alert.set_helpers(collector, cluster_configuration)
     alert.set_cluster("c1", "c6401.ambari.apache.org")
     alert.collect()
@@ -428,7 +431,7 @@ class TestAlerts(TestCase):
     cluster_configuration = self.__get_cluster_configuration()
     self.__update_cluster_configuration(cluster_configuration, configuration)
 
-    alert = MetricAlert(definition_json, definition_json['source'], None)
+    alert = MetricAlert(definition_json, definition_json['source'], self.config)
     alert.set_helpers(collector, cluster_configuration)
     alert.set_cluster("c1", "c6401.ambari.apache.org")
     alert.collect()
@@ -444,7 +447,7 @@ class TestAlerts(TestCase):
     self.__update_cluster_configuration(cluster_configuration, configuration)
 
     collector = AlertCollector()
-    alert = MetricAlert(definition_json, definition_json['source'], None)
+    alert = MetricAlert(definition_json, definition_json['source'], self.config)
     alert.set_helpers(collector, cluster_configuration)
     alert.set_cluster("c1", "c6401.ambari.apache.org")
     alert.collect()
@@ -460,7 +463,7 @@ class TestAlerts(TestCase):
     self.__update_cluster_configuration(cluster_configuration, configuration)
 
     collector = AlertCollector()
-    alert = MetricAlert(definition_json, definition_json['source'], None)
+    alert = MetricAlert(definition_json, definition_json['source'], self.config)
     alert.set_helpers(collector, cluster_configuration)
     alert.set_cluster("c1", "c6401.ambari.apache.org")
     alert.collect()
@@ -477,7 +480,7 @@ class TestAlerts(TestCase):
     self.__update_cluster_configuration(cluster_configuration, configuration)
 
     collector = AlertCollector()
-    alert = MetricAlert(definition_json, definition_json['source'], None)
+    alert = MetricAlert(definition_json, definition_json['source'], self.config)
     alert.set_helpers(collector, cluster_configuration)
     alert.set_cluster("c1", "c6401.ambari.apache.org")
     alert.collect()
@@ -501,7 +504,7 @@ class TestAlerts(TestCase):
     cluster_configuration = self.__get_cluster_configuration()
     self.__update_cluster_configuration(cluster_configuration, configuration)
 
-    alert = WebAlert(definition_json, definition_json['source'], None)
+    alert = WebAlert(definition_json, definition_json['source'], self.config)
     alert.set_helpers(collector, cluster_configuration)
     alert.set_cluster("c1", "c6401.ambari.apache.org")
     alert.collect()
@@ -515,7 +518,7 @@ class TestAlerts(TestCase):
     # run the alert and check HTTP 500
     wa_make_web_request_mock.return_value = WebResponse(500,1.234,"Internal Server Error")
     collector = AlertCollector()
-    alert = WebAlert(definition_json, definition_json['source'], None)
+    alert = WebAlert(definition_json, definition_json['source'], self.config)
     alert.set_helpers(collector, cluster_configuration)
     alert.set_cluster("c1", "c6401.ambari.apache.org")
     alert.collect()
@@ -530,7 +533,7 @@ class TestAlerts(TestCase):
     wa_make_web_request_mock.return_value = WebResponse(0,0,'error message')
      
     collector = AlertCollector()
-    alert = WebAlert(definition_json, definition_json['source'], None)
+    alert = WebAlert(definition_json, definition_json['source'], self.config)
     alert.set_helpers(collector, cluster_configuration)
     alert.set_cluster("c1", "c6401.ambari.apache.org")
     alert.collect()
@@ -551,7 +554,7 @@ class TestAlerts(TestCase):
     self.__update_cluster_configuration(cluster_configuration, configuration)
 
     collector = AlertCollector()
-    alert = WebAlert(definition_json, definition_json['source'], None)
+    alert = WebAlert(definition_json, definition_json['source'], self.config)
     alert.set_helpers(collector, cluster_configuration)
     alert.set_cluster("c1", "c6401.ambari.apache.org")
 
@@ -573,8 +576,8 @@ class TestAlerts(TestCase):
     cluster_configuration = self.__get_cluster_configuration()
 
     ash = AlertSchedulerHandler(test_file_path, test_stack_path,
-      test_common_services_path, test_host_scripts_path, 5, cluster_configuration,
-      None, None)
+      test_common_services_path, test_host_scripts_path, cluster_configuration,
+      self.config, None)
 
     ash.start()
 
@@ -594,7 +597,7 @@ class TestAlerts(TestCase):
     cluster_configuration = self.__get_cluster_configuration()
     self.__update_cluster_configuration(cluster_configuration, configuration)
 
-    alert = PortAlert(definition_json, definition_json['source'])
+    alert = PortAlert(definition_json, definition_json['source'], self.config)
     alert.set_helpers(collector, cluster_configuration)
     alert.set_cluster("c1", "c6401.ambari.apache.org")
     self.assertEquals(6, alert.interval())
@@ -620,8 +623,8 @@ class TestAlerts(TestCase):
     cluster_configuration = self.__get_cluster_configuration()
 
     ash = AlertSchedulerHandler(test_file_path, test_stack_path,
-      test_common_services_path, test_host_scripts_path, 5, cluster_configuration,
-      None, None)
+      test_common_services_path, test_host_scripts_path, cluster_configuration,
+      self.config, None)
 
     ash.start()
 
@@ -629,20 +632,20 @@ class TestAlerts(TestCase):
 
     definition_json = self._get_port_alert_definition()
 
-    alert = PortAlert(definition_json, definition_json['source'])
+    alert = PortAlert(definition_json, definition_json['source'], self.config)
     ash.schedule_definition(alert)
 
     self.assertEquals(2, ash.get_job_count())
 
     definition_json['enabled'] = False
-    alert = PortAlert(definition_json, definition_json['source'])
+    alert = PortAlert(definition_json, definition_json['source'], self.config)
     ash.schedule_definition(alert)
 
     # verify disabled alert not scheduled
     self.assertEquals(2, ash.get_job_count())
 
     definition_json['enabled'] = True
-    pa = PortAlert(definition_json, definition_json['source'])
+    pa = PortAlert(definition_json, definition_json['source'], self.config)
     ash.schedule_definition(pa)
 
     # verify enabled alert was scheduled
@@ -656,8 +659,8 @@ class TestAlerts(TestCase):
 
     cluster_configuration = self.__get_cluster_configuration()
     ash = AlertSchedulerHandler(test_file_path, test_stack_path,
-      test_common_services_path, test_host_scripts_path, 5, cluster_configuration,
-      None, None)
+      test_common_services_path, test_host_scripts_path, cluster_configuration,
+      self.config, None)
 
     ash.start()
 
@@ -691,7 +694,7 @@ class TestAlerts(TestCase):
     cluster_configuration = self.__get_cluster_configuration()
     self.__update_cluster_configuration(cluster_configuration, configuration)
 
-    alert = ScriptAlert(definition_json, definition_json['source'], None)
+    alert = ScriptAlert(definition_json, definition_json['source'], self.config)
 
     # instruct the test alert script to be skipped
     alert.set_helpers(collector, cluster_configuration )
@@ -709,32 +712,32 @@ class TestAlerts(TestCase):
   def test_default_reporting_text(self):
     definition_json = self._get_script_alert_definition()
 
-    alert = ScriptAlert(definition_json, definition_json['source'], None)
+    alert = ScriptAlert(definition_json, definition_json['source'], self.config)
     self.assertEquals(alert._get_reporting_text(alert.RESULT_OK), '{0}')
     self.assertEquals(alert._get_reporting_text(alert.RESULT_WARNING), '{0}')
     self.assertEquals(alert._get_reporting_text(alert.RESULT_CRITICAL), '{0}')
 
     definition_json['source']['type'] = 'PORT'
-    alert = PortAlert(definition_json, definition_json['source'])
+    alert = PortAlert(definition_json, definition_json['source'], self.config)
     self.assertEquals(alert._get_reporting_text(alert.RESULT_OK), 'TCP OK - {0:.4f} response on port {1}')
     self.assertEquals(alert._get_reporting_text(alert.RESULT_WARNING), 'TCP OK - {0:.4f} response on port {1}')
     self.assertEquals(alert._get_reporting_text(alert.RESULT_CRITICAL), 'Connection failed: {0} to {1}:{2}')
 
     definition_json['source']['type'] = 'WEB'
-    alert = WebAlert(definition_json, definition_json['source'], None)
+    alert = WebAlert(definition_json, definition_json['source'], self.config)
     self.assertEquals(alert._get_reporting_text(alert.RESULT_OK), 'HTTP {0} response in {2:.4f} seconds')
     self.assertEquals(alert._get_reporting_text(alert.RESULT_WARNING), 'HTTP {0} response in {2:.4f} seconds')
     self.assertEquals(alert._get_reporting_text(alert.RESULT_CRITICAL), 'Connection failed to {1}')
 
     definition_json['source']['type'] = 'METRIC'
-    alert = MetricAlert(definition_json, definition_json['source'], None)
+    alert = MetricAlert(definition_json, definition_json['source'], self.config)
     self.assertEquals(alert._get_reporting_text(alert.RESULT_OK), '{0}')
     self.assertEquals(alert._get_reporting_text(alert.RESULT_WARNING), '{0}')
     self.assertEquals(alert._get_reporting_text(alert.RESULT_CRITICAL), '{0}')
 
     rm = RecoveryManager(tempfile.mktemp())
     definition_json['source']['type'] = 'RECOVERY'
-    alert = RecoveryAlert(definition_json, definition_json['source'], rm)
+    alert = RecoveryAlert(definition_json, definition_json['source'], self.config, rm)
     self.assertEquals(alert._get_reporting_text(alert.RESULT_OK), 'No recovery operations executed for {2}{0}.')
     self.assertEquals(alert._get_reporting_text(alert.RESULT_WARNING), '{1} recovery operations executed for {2}{0}.')
     self.assertEquals(alert._get_reporting_text(alert.RESULT_CRITICAL), '{1} recovery operations executed for {2}{0}.')
@@ -973,7 +976,7 @@ class TestAlerts(TestCase):
     cluster_configuration = self.__get_cluster_configuration()
     self.__update_cluster_configuration(cluster_configuration, configuration)
 
-    alert = MetricAlert(definition_json, definition_json['source'], None)
+    alert = MetricAlert(definition_json, definition_json['source'], self.config)
     alert.set_helpers(collector, cluster_configuration)
     alert.set_cluster("c1", "c6401.ambari.apache.org")
 
@@ -1065,13 +1068,13 @@ class TestAlerts(TestCase):
   def test_uri_timeout(self):
     # the web alert will have a timeout value
     definition_json = self._get_web_alert_definition()
-    alert = WebAlert(definition_json, definition_json['source'], None)
+    alert = WebAlert(definition_json, definition_json['source'], self.config)
     self.assertEquals(5.678, alert.connection_timeout)
     self.assertEquals(5, alert.curl_connection_timeout)
 
     # the metric definition will not and should default to 5.0
     definition_json = self._get_metric_alert_definition()
-    alert = MetricAlert(definition_json, definition_json['source'], None)
+    alert = MetricAlert(definition_json, definition_json['source'], self.config)
     self.assertEquals(5.0, alert.connection_timeout)
 
 
@@ -1129,7 +1132,7 @@ class TestAlerts(TestCase):
     cluster_configuration = self.__get_cluster_configuration()
     self.__update_cluster_configuration(cluster_configuration, configuration)
 
-    alert = MetricAlert(definition_json, definition_json['source'], None)
+    alert = MetricAlert(definition_json, definition_json['source'], self.config)
     alert.set_helpers(collector, cluster_configuration)
     alert.set_cluster("c1", "c6401.ambari.apache.org")
 
@@ -1162,7 +1165,7 @@ class TestAlerts(TestCase):
     cluster_configuration = self.__get_cluster_configuration()
     self.__update_cluster_configuration(cluster_configuration, configuration)
 
-    alert = PortAlert(definition_json, definition_json['source'])
+    alert = PortAlert(definition_json, definition_json['source'], self.config)
     alert.set_helpers(collector, cluster_configuration)
     alert.set_cluster("c1", "c6402.ambari.apache.org")
 
@@ -1200,7 +1203,7 @@ class TestAlerts(TestCase):
     cluster_configuration = self.__get_cluster_configuration()
     self.__update_cluster_configuration(cluster_configuration, configuration)
 
-    alert = PortAlert(definition_json, definition_json['source'])
+    alert = PortAlert(definition_json, definition_json['source'], self.config)
     alert.set_helpers(collector, cluster_configuration)
     alert.set_cluster("c1", "c6402.ambari.apache.org")
 
@@ -1523,7 +1526,7 @@ class MockAlert(BaseAlert):
   Mock class for testing
   """
   def __init__(self):
-    super(MockAlert, self).__init__(None, None)
+    super(MockAlert, self).__init__(None, None, AmbariConfig())
 
   def get_name(self):
     return "mock_alert"

http://git-wip-us.apache.org/repos/asf/ambari/blob/2efe8945/ambari-agent/src/test/python/ambari_agent/TestBaseAlert.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestBaseAlert.py b/ambari-agent/src/test/python/ambari_agent/TestBaseAlert.py
index e67c894..62877f2 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestBaseAlert.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestBaseAlert.py
@@ -20,16 +20,18 @@ limitations under the License.
 
 from unittest import TestCase
 from alerts.base_alert import BaseAlert
-
-alert = BaseAlert({}, {})
+from AmbariConfig import AmbariConfig
 
 class TestBaseAlert(TestCase):
 
+  def setUp(self):
+    self.config = AmbariConfig()
+
   def test_interval_noData(self):
     alert_meta = {}
     alert_source_meta = {}
 
-    alert = BaseAlert(alert_meta, alert_source_meta)
+    alert = BaseAlert(alert_meta, alert_source_meta, self.config)
     interval = alert.interval()
     self.assertEquals(interval, 1)
 
@@ -37,7 +39,7 @@ class TestBaseAlert(TestCase):
     alert_meta = {'interval': 0}
     alert_source_meta = {}
 
-    alert = BaseAlert(alert_meta, alert_source_meta)
+    alert = BaseAlert(alert_meta, alert_source_meta, self.config)
     interval = alert.interval()
     self.assertEquals(interval, 1)
 
@@ -45,7 +47,7 @@ class TestBaseAlert(TestCase):
     alert_meta = {'interval': 5}
     alert_source_meta = {}
 
-    alert = BaseAlert(alert_meta, alert_source_meta)
+    alert = BaseAlert(alert_meta, alert_source_meta, self.config)
     interval = alert.interval()
     self.assertEquals(interval, 5)
 
@@ -53,7 +55,7 @@ class TestBaseAlert(TestCase):
     alert_meta = {'enabled': 'true'}
     alert_source_meta = {}
 
-    alert = BaseAlert(alert_meta, alert_source_meta)
+    alert = BaseAlert(alert_meta, alert_source_meta, self.config)
     enabled = alert.is_enabled()
     self.assertEquals(enabled, 'true')
 
@@ -61,7 +63,7 @@ class TestBaseAlert(TestCase):
     alert_meta = {'name': 'ambari'}
     alert_source_meta = {}
 
-    alert = BaseAlert(alert_meta, alert_source_meta)
+    alert = BaseAlert(alert_meta, alert_source_meta, self.config)
     name = alert.get_name()
     self.assertEquals(name, 'ambari')
 
@@ -69,7 +71,7 @@ class TestBaseAlert(TestCase):
     alert_meta = {'uuid': '123'}
     alert_source_meta = {}
 
-    alert = BaseAlert(alert_meta, alert_source_meta)
+    alert = BaseAlert(alert_meta, alert_source_meta, self.config)
     uuid = alert.get_uuid()
     self.assertEquals(uuid, '123')
 
@@ -79,7 +81,7 @@ class TestBaseAlert(TestCase):
     cluster = 'cluster'
     host = 'host'
 
-    alert = BaseAlert(alert_meta, alert_source_meta)
+    alert = BaseAlert(alert_meta, alert_source_meta, self.config)
     alert.set_cluster(cluster, host)
     self.assertEquals(alert.cluster_name, cluster)
     self.assertEquals(alert.host_name, host)

http://git-wip-us.apache.org/repos/asf/ambari/blob/2efe8945/ambari-agent/src/test/python/ambari_agent/TestMetricAlert.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestMetricAlert.py b/ambari-agent/src/test/python/ambari_agent/TestMetricAlert.py
index 23e9f13..9dfb8e9 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestMetricAlert.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestMetricAlert.py
@@ -21,10 +21,13 @@ limitations under the License.
 from unittest import TestCase
 from alerts.metric_alert import MetricAlert
 from mock.mock import Mock, MagicMock, patch
-import os
+from AmbariConfig import AmbariConfig
 
 class TestMetricAlert(TestCase):
 
+  def setUp(self):
+    self.config = AmbariConfig()
+
   @patch("urllib2.urlopen")
   def test_collect(self, urllib):
     alert_meta = {
@@ -81,7 +84,7 @@ class TestMetricAlert(TestCase):
     mock_collector = MagicMock()
     mock_collector.put = Mock(side_effect=collector_side_effect)
 
-    alert = MetricAlert(alert_meta, alert_source_meta)
+    alert = MetricAlert(alert_meta, alert_source_meta, self.config)
     alert.set_helpers(mock_collector, {'foo-site/bar': 12, 'foo-site/baz': 'asd'})
     alert.set_cluster(cluster, host)
 
@@ -143,7 +146,7 @@ class TestMetricAlert(TestCase):
     mock_collector = MagicMock()
     mock_collector.put = Mock(side_effect=collector_side_effect)
 
-    alert = MetricAlert(alert_meta, alert_source_meta)
+    alert = MetricAlert(alert_meta, alert_source_meta, self.config)
     alert.set_helpers(mock_collector, {'foo-site/bar': 12, 'foo-site/baz': 'asd'})
     alert.set_cluster(cluster, host)
 
@@ -204,7 +207,7 @@ class TestMetricAlert(TestCase):
     mock_collector = MagicMock()
     mock_collector.put = Mock(side_effect=collector_side_effect)
 
-    alert = MetricAlert(alert_meta, alert_source_meta, None)
+    alert = MetricAlert(alert_meta, alert_source_meta, self.config)
     alert.set_helpers(mock_collector, {'foo-site/bar': 12, 'foo-site/baz': 'asd'})
     alert.set_cluster(cluster, host)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/2efe8945/ambari-agent/src/test/python/ambari_agent/TestPortAlert.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestPortAlert.py b/ambari-agent/src/test/python/ambari_agent/TestPortAlert.py
index 195cc63..dffa56c 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestPortAlert.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestPortAlert.py
@@ -21,9 +21,13 @@ limitations under the License.
 from unittest import TestCase
 from alerts.port_alert import PortAlert
 from mock.mock import Mock, MagicMock, patch
+from AmbariConfig import AmbariConfig
 
 class TestPortAlert(TestCase):
 
+  def setUp(self):
+    self.config = AmbariConfig()
+
   @patch("socket.socket")
   @patch("time.time")
   def test_collect_defaultPort(self, time, socket):
@@ -44,7 +48,7 @@ class TestPortAlert(TestCase):
     expected_state = 'OK'
     expected_text = 'TCP OK - 0.2010 response on port 80'
     time.side_effect = [123, 324, 567]
-    alert = PortAlert(alert_meta, alert_source_meta)
+    alert = PortAlert(alert_meta, alert_source_meta, self.config)
     alert.set_cluster(cluster, host)
 
     def collector_side_effect(clus, data):
@@ -84,7 +88,7 @@ class TestPortAlert(TestCase):
     expected_state = 'WARNING'
     expected_text = 'TCP OK - 3.1170 response on port 8080'
     time.side_effect = [123, 3240, 567]
-    alert = PortAlert(alert_meta, alert_source_meta)
+    alert = PortAlert(alert_meta, alert_source_meta, self.config)
     alert.set_cluster(cluster, host)
 
     def collector_side_effect(clus, data):
@@ -124,7 +128,7 @@ class TestPortAlert(TestCase):
     expected_state = 'CRITICAL'
     expected_text = 'Connection failed: Socket Timeout to 192.168.0.1:8080'
     time.side_effect = [123, 5240, 567]
-    alert = PortAlert(alert_meta, alert_source_meta)
+    alert = PortAlert(alert_meta, alert_source_meta, self.config)
     alert.set_cluster(cluster, host)
 
     def collector_side_effect(clus, data):
@@ -163,7 +167,7 @@ class TestPortAlert(TestCase):
     expected_state = 'CRITICAL'
     expected_text = 'Connection failed: Socket Timeout to host1:80'
     time.side_effect = [123, 5240, 567]
-    alert = PortAlert(alert_meta, alert_source_meta)
+    alert = PortAlert(alert_meta, alert_source_meta, self.config)
     alert.set_cluster(cluster, host)
 
     def collector_side_effect(clus, data):
@@ -204,7 +208,7 @@ class TestPortAlert(TestCase):
     expected_text = 'Connection failed: exception message to 192.168.0.1:8080'
     time.side_effect = [123, 345, 567]
     socket.side_effect = Exception('exception message')
-    alert = PortAlert(alert_meta, alert_source_meta)
+    alert = PortAlert(alert_meta, alert_source_meta, self.config)
     alert.set_cluster(cluster, host)
 
     def collector_side_effect(clus, data):
@@ -249,7 +253,7 @@ class TestPortAlert(TestCase):
     expected_state = 'OK'
     expected_text = 'TCP OK - 3.1170 response on port 8080'
     time.side_effect = [123, 3240, 567]
-    alert = PortAlert(alert_meta, alert_source_meta)
+    alert = PortAlert(alert_meta, alert_source_meta, self.config)
     alert.set_cluster(cluster, host)
 
     def collector_side_effect(clus, data):
@@ -294,7 +298,7 @@ class TestPortAlert(TestCase):
     expected_state = 'CRITICAL'
     expected_text = 'Connection failed: Socket Timeout to 192.168.0.1:8080'
     time.side_effect = [123, 3240, 567]
-    alert = PortAlert(alert_meta, alert_source_meta)
+    alert = PortAlert(alert_meta, alert_source_meta, self.config)
     alert.set_cluster(cluster, host)
 
     def collector_side_effect(clus, data):
@@ -339,7 +343,7 @@ class TestPortAlert(TestCase):
     expected_state = 'CRITICAL'
     expected_text = 'Connection failed: Socket Timeout to 192.168.0.1:8080'
     time.side_effect = [120, 123, 5240, 567]
-    alert = PortAlert(alert_meta, alert_source_meta)
+    alert = PortAlert(alert_meta, alert_source_meta, self.config)
     alert.set_cluster(cluster, host)
 
     def collector_side_effect(clus, data):

http://git-wip-us.apache.org/repos/asf/ambari/blob/2efe8945/ambari-agent/src/test/python/ambari_agent/TestScriptAlert.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/test/python/ambari_agent/TestScriptAlert.py b/ambari-agent/src/test/python/ambari_agent/TestScriptAlert.py
index 46c7651..a56258b 100644
--- a/ambari-agent/src/test/python/ambari_agent/TestScriptAlert.py
+++ b/ambari-agent/src/test/python/ambari_agent/TestScriptAlert.py
@@ -23,10 +23,15 @@ from alerts.script_alert import ScriptAlert
 from mock.mock import Mock, MagicMock, patch
 import os
 
+from AmbariConfig import AmbariConfig
+
 DUMMY_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'dummy_files')
 
 class TestScriptAlert(TestCase):
 
+  def setUp(self):
+    self.config = AmbariConfig()
+
   def test_collect(self):
     alert_meta = {
       'name': 'alert1',
@@ -59,7 +64,7 @@ class TestScriptAlert(TestCase):
     mock_collector = MagicMock()
     mock_collector.put = Mock(side_effect=collector_side_effect)
 
-    alert = ScriptAlert(alert_meta, alert_source_meta, {})
+    alert = ScriptAlert(alert_meta, alert_source_meta, self.config)
     alert.set_helpers(mock_collector, {'foo-site/bar': 12, 'foo-site/baz': 'asd'})
     alert.set_cluster(cluster, host)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/2efe8945/ambari-common/src/main/python/resource_management/libraries/functions/curl_krb_request.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/curl_krb_request.py b/ambari-common/src/main/python/resource_management/libraries/functions/curl_krb_request.py
index 1ccc45f..21cdd09 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/curl_krb_request.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/curl_krb_request.py
@@ -24,7 +24,6 @@ __all__ = ["curl_krb_request"]
 import logging
 import os
 import time
-import threading
 
 from resource_management.core import global_lock
 from resource_management.core import shell
@@ -49,12 +48,52 @@ MAX_TIMEOUT_DEFAULT = CONNECTION_TIMEOUT_DEFAULT + 2
 
 logger = logging.getLogger()
 
+# a dictionary of the last time that a kinit was performed for a specific cache
+# dicionaries are inherently thread-safe in Python via the Global Interpreer Lock
+# https://docs.python.org/2/glossary.html#term-global-interpreter-lock
+_KINIT_CACHE_TIMES = {}
+
+# the default time in between forced kinit calls (4 hours)
+DEFAULT_KERBEROS_KINIT_TIMER_MS = 14400000
+
+# a parameter which can be used to pass around the above timout value
+KERBEROS_KINIT_TIMER_PARAMETER = "kerberos.kinit.timer"
+
 def curl_krb_request(tmp_dir, keytab, principal, url, cache_file_prefix,
-    krb_exec_search_paths, return_only_http_code, alert_name, user,
-    connection_timeout = CONNECTION_TIMEOUT_DEFAULT):
+    krb_exec_search_paths, return_only_http_code, caller_label, user,
+    connection_timeout = CONNECTION_TIMEOUT_DEFAULT,
+    kinit_timer_ms=DEFAULT_KERBEROS_KINIT_TIMER_MS):
+  """
+  Makes a curl request using the kerberos credentials stored in a calculated cache file. The
+  cache file is created by combining the supplied principal, keytab, user, and request name into
+  a unique hash.
+
+  This function will use the klist command to determine if the cache is expired and will perform
+  a kinit if necessary. Additionally, it has an internal timer to force a kinit after a
+  configurable amount of time. This is to prevent boundary issues where requests hit the edge
+  of a ticket's lifetime.
+
+  :param tmp_dir: the directory to use for storing the local kerberos cache for this request.
+  :param keytab: the location of the keytab to use when performing a kinit
+  :param principal: the principal to use when performing a kinit
+  :param url: the URL to request
+  :param cache_file_prefix: an identifier used to build the unique cache name for this request.
+                            This ensures that multiple requests can use the same cache.
+  :param krb_exec_search_paths: the search path to use for invoking kerberos binaries
+  :param return_only_http_code: True to return only the HTTP code, False to return GET content
+  :param caller_label: an identifier to give context into the caller of this module (used for logging)
+  :param user: the user to invoke the curl command as
+  :param connection_timeout: if specified, a connection timeout for curl (default 10 seconds)
+  :param kinit_timer_ms: if specified, the time (in ms), before forcing a kinit even if the
+                         klist cache is still valid.
+  :return:
+  """
 
   import uuid
 
+  # start off false
+  is_kinit_required = False
+
   # Create the kerberos credentials cache (ccache) file and set it in the environment to use
   # when executing curl. Use the md5 hash of the combination of the principal and keytab file
   # to generate a (relatively) unique cache filename so that we can use it as needed. Scope
@@ -75,19 +114,41 @@ def curl_krb_request(tmp_dir, keytab, principal, url, cache_file_prefix,
     else:
       klist_path_local = get_klist_path()
 
-    if shell.call("{0} -s {1}".format(klist_path_local, ccache_file_path), user=user)[0] != 0:
+    # take a look at the last time kinit was run for the specified cache and force a new
+    # kinit if it's time; this helps to avoid problems approaching ticket boundary when
+    # executing a klist and then a curl
+    last_kinit_time = _KINIT_CACHE_TIMES.get(ccache_file_name, 0)
+    current_time = long(time.time())
+    if current_time - kinit_timer_ms > last_kinit_time:
+      is_kinit_required = True
+
+    # if the time has not expired, double-check that the cache still has a valid ticket
+    if not is_kinit_required:
+      klist_command = "{0} -s {1}".format(klist_path_local, ccache_file_path)
+      is_kinit_required = (shell.call(klist_command, user=user)[0] != 0)
+
+    # if kinit is required, the perform the kinit
+    if is_kinit_required:
       if krb_exec_search_paths:
         kinit_path_local = get_kinit_path(krb_exec_search_paths)
       else:
         kinit_path_local = get_kinit_path()
 
-      logger.debug("[Alert][{0}] Enabling Kerberos authentication via GSSAPI using ccache at {1}.".format(
-        alert_name, ccache_file_path))
+      logger.debug("Enabling Kerberos authentication for %s via GSSAPI using ccache at %s",
+        caller_label, ccache_file_path)
+
+      # kinit; there's no need to set a ticket timeout as this will use the default invalidation
+      # configured in the krb5.conf - regenerating keytabs will not prevent an existing cache
+      # from working correctly
+      shell.checked_call("{0} -c {1} -kt {2} {3} > /dev/null".format(kinit_path_local,
+        ccache_file_path, keytab, principal), user=user)
 
-      shell.checked_call("{0} -l 5m -c {1} -kt {2} {3} > /dev/null".format(kinit_path_local, ccache_file_path, keytab, principal), user=user)
+      # record kinit time
+      _KINIT_CACHE_TIMES[ccache_file_name] = current_time
     else:
-      logger.debug("[Alert][{0}] Kerberos authentication via GSSAPI already enabled using ccache at {1}.".format(
-        alert_name, ccache_file_path))
+      # no kinit needed, use the cache
+      logger.debug("Kerberos authentication for %s via GSSAPI already enabled using ccache at %s.",
+        caller_label, ccache_file_path)
   finally:
     kinit_lock.release()
 
@@ -119,7 +180,7 @@ def curl_krb_request(tmp_dir, keytab, principal, url, cache_file_prefix,
                              user=user, env=kerberos_env)
   except Fail:
     if logger.isEnabledFor(logging.DEBUG):
-      logger.exception("[Alert][{0}] Unable to make a web request.".format(alert_name))
+      logger.exception("Unable to make a curl request for {0}.".format(caller_label))
     raise
   finally:
     if os.path.isfile(cookie_file):
@@ -138,6 +199,7 @@ def curl_krb_request(tmp_dir, keytab, principal, url, cache_file_prefix,
     else:
       return (curl_stdout, error_msg, time_millis)
 
-  logger.debug("[Alert][{0}] Curl response is empty! Please take a look at error message: ".
-               format(alert_name, str(error_msg)))
+  logger.debug("The curl response for %s is empty; standard error = %s",
+    caller_label, str(error_msg))
+
   return ("", error_msg, time_millis)

http://git-wip-us.apache.org/repos/asf/ambari/blob/2efe8945/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_checkpoint_time.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_checkpoint_time.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_checkpoint_time.py
index ef389cd..71e34e6 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_checkpoint_time.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_checkpoint_time.py
@@ -25,6 +25,8 @@ import logging
 import traceback
 
 from resource_management.libraries.functions.curl_krb_request import curl_krb_request
+from resource_management.libraries.functions.curl_krb_request import DEFAULT_KERBEROS_KINIT_TIMER_MS
+from resource_management.libraries.functions.curl_krb_request import KERBEROS_KINIT_TIMER_PARAMETER
 from resource_management.core.environment import Environment
 
 LABEL = 'Last Checkpoint: [{h} hours, {m} minutes, {tx} transactions]'
@@ -133,6 +135,8 @@ def execute(configurations={}, parameters={}, host_name=None):
   if PERCENT_CRITICAL_KEY in parameters:
     percent_critical = float(parameters[PERCENT_CRITICAL_KEY]) * 100
 
+  kinit_timer_ms = parameters.get(KERBEROS_KINIT_TIMER_PARAMETER, DEFAULT_KERBEROS_KINIT_TIMER_MS)
+
   # determine the right URI and whether to use SSL
   uri = http_uri
   if http_policy == 'HTTPS_ONLY':
@@ -159,14 +163,16 @@ def execute(configurations={}, parameters={}, host_name=None):
 
       last_checkpoint_time_response, error_msg, time_millis = curl_krb_request(env.tmp_dir, kerberos_keytab,
         kerberos_principal, last_checkpoint_time_qry,"checkpoint_time_alert", executable_paths, False,
-        "NameNode Last Checkpoint", smokeuser, connection_timeout=curl_connection_timeout)
+        "NameNode Last Checkpoint", smokeuser, connection_timeout=curl_connection_timeout,
+        kinit_timer_ms = kinit_timer_ms)
 
       last_checkpoint_time_response_json = json.loads(last_checkpoint_time_response)
       last_checkpoint_time = int(last_checkpoint_time_response_json["beans"][0]["LastCheckpointTime"])
 
       journal_transaction_info_response, error_msg, time_millis = curl_krb_request(env.tmp_dir, kerberos_keytab,
         kerberos_principal, journal_transaction_info_qry,"checkpoint_time_alert", executable_paths,
-        False, "NameNode Last Checkpoint", smokeuser, connection_timeout=curl_connection_timeout)
+        False, "NameNode Last Checkpoint", smokeuser, connection_timeout=curl_connection_timeout,
+        kinit_timer_ms = kinit_timer_ms)
 
       journal_transaction_info_response_json = json.loads(journal_transaction_info_response)
       journal_transaction_info = journal_transaction_info_response_json["beans"][0]["JournalTransactionInfo"]

http://git-wip-us.apache.org/repos/asf/ambari/blob/2efe8945/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_ha_namenode_health.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_ha_namenode_health.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_ha_namenode_health.py
index a174cb4..70b1970 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_ha_namenode_health.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_ha_namenode_health.py
@@ -21,9 +21,10 @@ limitations under the License.
 import urllib2
 import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
 import logging
-import traceback
 
 from resource_management.libraries.functions.curl_krb_request import curl_krb_request
+from resource_management.libraries.functions.curl_krb_request import DEFAULT_KERBEROS_KINIT_TIMER_MS
+from resource_management.libraries.functions.curl_krb_request import KERBEROS_KINIT_TIMER_PARAMETER
 from resource_management.core.environment import Environment
 
 RESULT_STATE_OK = 'OK'
@@ -110,6 +111,7 @@ def execute(configurations={}, parameters={}, host_name=None):
     kerberos_principal = configurations[KERBEROS_PRINCIPAL]
     kerberos_principal = kerberos_principal.replace('_HOST', host_name)
 
+  kinit_timer_ms = parameters.get(KERBEROS_KINIT_TIMER_PARAMETER, DEFAULT_KERBEROS_KINIT_TIMER_MS)
 
   # determine whether or not SSL is enabled
   is_ssl_enabled = False
@@ -165,7 +167,8 @@ def execute(configurations={}, parameters={}, host_name=None):
 
           state_response, error_msg, time_millis  = curl_krb_request(env.tmp_dir,
             kerberos_keytab, kerberos_principal, jmx_uri,"ha_nn_health", executable_paths, False,
-            "NameNode High Availability Health", smokeuser, connection_timeout=curl_connection_timeout)
+            "NameNode High Availability Health", smokeuser, connection_timeout=curl_connection_timeout,
+            kinit_timer_ms = kinit_timer_ms)
 
           state = _get_ha_state_from_json(state_response)
         else:

http://git-wip-us.apache.org/repos/asf/ambari/blob/2efe8945/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py
index 217f3b8..f6a9a56 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_metrics_deviation.py
@@ -25,7 +25,12 @@ from math import sqrt
 import urllib
 import time
 import urllib2
-from resource_management import Environment, curl_krb_request
+from resource_management import Environment
+
+from resource_management.libraries.functions.curl_krb_request import curl_krb_request
+from resource_management.libraries.functions.curl_krb_request import DEFAULT_KERBEROS_KINIT_TIMER_MS
+from resource_management.libraries.functions.curl_krb_request import KERBEROS_KINIT_TIMER_PARAMETER
+
 
 RESULT_STATE_OK = 'OK'
 RESULT_STATE_CRITICAL = 'CRITICAL'
@@ -178,6 +183,8 @@ def execute(configurations={}, parameters={}, host_name=None):
       if dfs_policy == "HTTPS_ONLY":
         is_ssl_enabled = True
 
+    kinit_timer_ms = parameters.get(KERBEROS_KINIT_TIMER_PARAMETER, DEFAULT_KERBEROS_KINIT_TIMER_MS)
+
     name_service = configurations[NAMESERVICE_KEY]
     hdfs_site = configurations[HDFS_SITE_KEY]
 
@@ -215,9 +222,10 @@ def execute(configurations={}, parameters={}, host_name=None):
 
             # curl requires an integer timeout
             curl_connection_timeout = int(connection_timeout)
-            state_response, error_msg, time_millis  = curl_krb_request(env.tmp_dir,
-                                                                       kerberos_keytab, kerberos_principal, jmx_uri,"ha_nn_health", executable_paths, False,
-                                                                       "NameNode High Availability Health", smokeuser, connection_timeout=curl_connection_timeout)
+            state_response, error_msg, time_millis = curl_krb_request(env.tmp_dir,
+              kerberos_keytab, kerberos_principal, jmx_uri,"ha_nn_health", executable_paths, False,
+              "NameNode High Availability Health", smokeuser, connection_timeout=curl_connection_timeout,
+              kinit_timer_ms = kinit_timer_ms)
 
             state = _get_ha_state_from_json(state_response)
           else:

http://git-wip-us.apache.org/repos/asf/ambari/blob/2efe8945/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_upgrade_finalized.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_upgrade_finalized.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_upgrade_finalized.py
index 6e8945c..fbda22e 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_upgrade_finalized.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/alerts/alert_upgrade_finalized.py
@@ -23,7 +23,10 @@ import ambari_simplejson as json # simplejson is much faster comparing to Python
 import logging
 import traceback
 
-from resource_management.libraries.functions.curl_krb_request import curl_krb_request, CONNECTION_TIMEOUT_DEFAULT
+from resource_management.libraries.functions.curl_krb_request import curl_krb_request
+from resource_management.libraries.functions.curl_krb_request import DEFAULT_KERBEROS_KINIT_TIMER_MS
+from resource_management.libraries.functions.curl_krb_request import KERBEROS_KINIT_TIMER_PARAMETER
+from resource_management.libraries.functions.curl_krb_request import CONNECTION_TIMEOUT_DEFAULT
 from resource_management.core.environment import Environment
 
 NN_HTTP_ADDRESS_KEY = '{{hdfs-site/dfs.namenode.http-address}}'
@@ -100,6 +103,8 @@ def execute(configurations={}, parameters={}, host_name=None):
     kerberos_principal = configurations[KERBEROS_PRINCIPAL]
     kerberos_principal = kerberos_principal.replace('_HOST', host_name)
 
+  kinit_timer_ms = parameters.get(KERBEROS_KINIT_TIMER_PARAMETER, DEFAULT_KERBEROS_KINIT_TIMER_MS)
+
   # determine the right URI and whether to use SSL
   uri = http_uri
   if http_policy == 'HTTPS_ONLY':
@@ -121,7 +126,7 @@ def execute(configurations={}, parameters={}, host_name=None):
       last_checkpoint_time_response, error_msg, time_millis = curl_krb_request(
         env.tmp_dir, kerberos_keytab,
         kerberos_principal, upgrade_finalized_qry, "upgrade_finalized_state", executable_paths, False,
-        "HDFS Upgrade Finalized State", smokeuser
+        "HDFS Upgrade Finalized State", smokeuser, kinit_timer_ms = kinit_timer_ms
        )
 
       upgrade_finalized_response_json = json.loads(last_checkpoint_time_response)

http://git-wip-us.apache.org/repos/asf/ambari/blob/2efe8945/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_webhcat_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_webhcat_server.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_webhcat_server.py
index b49fd6e..c9575c0 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_webhcat_server.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/alerts/alert_webhcat_server.py
@@ -27,6 +27,9 @@ import logging
 
 from resource_management.core.environment import Environment
 from resource_management.libraries.functions.curl_krb_request import curl_krb_request
+from resource_management.libraries.functions.curl_krb_request import DEFAULT_KERBEROS_KINIT_TIMER_MS
+from resource_management.libraries.functions.curl_krb_request import KERBEROS_KINIT_TIMER_PARAMETER
+
 
 RESULT_CODE_OK = "OK"
 RESULT_CODE_CRITICAL = "CRITICAL"
@@ -148,11 +151,13 @@ def execute(configurations={}, parameters={}, host_name=None):
       if KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY in configurations:
         kerberos_executable_search_paths = configurations[KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY]
 
+      kinit_timer_ms = parameters.get(KERBEROS_KINIT_TIMER_PARAMETER, DEFAULT_KERBEROS_KINIT_TIMER_MS)
+
       env = Environment.get_instance()
       stdout, stderr, time_millis = curl_krb_request(env.tmp_dir, smokeuser_keytab, smokeuser_principal,
-                                                      query_url, "webhcat_alert_cc_", kerberos_executable_search_paths, True,
-                                                      "WebHCat Server Status", smokeuser,
-                                                      connection_timeout=curl_connection_timeout)
+        query_url, "webhcat_alert_cc_", kerberos_executable_search_paths, True,
+        "WebHCat Server Status", smokeuser, connection_timeout=curl_connection_timeout,
+        kinit_timer_ms = kinit_timer_ms)
 
       # check the response code
       response_code = int(stdout)
@@ -169,9 +174,10 @@ def execute(configurations={}, parameters={}, host_name=None):
 
       # now that we have the http status and it was 200, get the content
       stdout, stderr, total_time = curl_krb_request(env.tmp_dir, smokeuser_keytab, smokeuser_principal,
-                                                      query_url, "webhcat_alert_cc_", kerberos_executable_search_paths,
-                                                      False, "WebHCat Server Status", smokeuser,
-                                                      connection_timeout=curl_connection_timeout)
+        query_url, "webhcat_alert_cc_", kerberos_executable_search_paths,
+        False, "WebHCat Server Status", smokeuser, connection_timeout=curl_connection_timeout,
+        kinit_timer_ms = kinit_timer_ms)
+
       json_response = json.loads(stdout)
     except:
       return (RESULT_CODE_CRITICAL, [traceback.format_exc()])

http://git-wip-us.apache.org/repos/asf/ambari/blob/2efe8945/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/alerts/alert_nodemanager_health.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/alerts/alert_nodemanager_health.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/alerts/alert_nodemanager_health.py
index ef5e6b3..2105bed 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/alerts/alert_nodemanager_health.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/alerts/alert_nodemanager_health.py
@@ -26,6 +26,8 @@ import traceback
 from ambari_commons import OSCheck
 from ambari_commons.inet_utils import resolve_address
 from resource_management.libraries.functions.curl_krb_request import curl_krb_request
+from resource_management.libraries.functions.curl_krb_request import DEFAULT_KERBEROS_KINIT_TIMER_MS
+from resource_management.libraries.functions.curl_krb_request import KERBEROS_KINIT_TIMER_PARAMETER
 from resource_management.core.environment import Environment
 
 RESULT_CODE_OK = 'OK'
@@ -155,9 +157,11 @@ def execute(configurations={}, parameters={}, host_name=None):
       # curl requires an integer timeout
       curl_connection_timeout = int(connection_timeout)
 
+      kinit_timer_ms = parameters.get(KERBEROS_KINIT_TIMER_PARAMETER, DEFAULT_KERBEROS_KINIT_TIMER_MS)
+
       url_response, error_msg, time_millis  = curl_krb_request(env.tmp_dir, kerberos_keytab, kerberos_principal,
         query, "nm_health_alert", executable_paths, False, "NodeManager Health", smokeuser,
-        connection_timeout=curl_connection_timeout)
+        connection_timeout=curl_connection_timeout, kinit_timer_ms = kinit_timer_ms)
 
       json_response = json.loads(url_response)
     else:

http://git-wip-us.apache.org/repos/asf/ambari/blob/2efe8945/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/alerts/alert_nodemanagers_summary.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/alerts/alert_nodemanagers_summary.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/alerts/alert_nodemanagers_summary.py
index 119a1a1..adf27ec 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/alerts/alert_nodemanagers_summary.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/alerts/alert_nodemanagers_summary.py
@@ -25,6 +25,8 @@ import traceback
 
 from ambari_commons.urllib_handlers import RefreshHeaderProcessor
 from resource_management.libraries.functions.curl_krb_request import curl_krb_request
+from resource_management.libraries.functions.curl_krb_request import DEFAULT_KERBEROS_KINIT_TIMER_MS
+from resource_management.libraries.functions.curl_krb_request import KERBEROS_KINIT_TIMER_PARAMETER
 from resource_management.core.environment import Environment
 
 ERROR_LABEL = '{0} NodeManager{1} {2} unhealthy.'
@@ -109,6 +111,8 @@ def execute(configurations={}, parameters={}, host_name=None):
   if CONNECTION_TIMEOUT_KEY in parameters:
     connection_timeout = float(parameters[CONNECTION_TIMEOUT_KEY])
 
+  kinit_timer_ms = parameters.get(KERBEROS_KINIT_TIMER_PARAMETER, DEFAULT_KERBEROS_KINIT_TIMER_MS)
+
   # determine the right URI and whether to use SSL
   uri = http_uri
   if http_policy == 'HTTPS_ONLY':
@@ -130,7 +134,8 @@ def execute(configurations={}, parameters={}, host_name=None):
 
       url_response, error_msg, time_millis  = curl_krb_request(env.tmp_dir, kerberos_keytab, kerberos_principal,
         live_nodemanagers_qry, "nm_health_summary_alert", executable_paths, False,
-        "NodeManager Health Summary", smokeuser, connection_timeout=curl_connection_timeout)
+        "NodeManager Health Summary", smokeuser, connection_timeout=curl_connection_timeout,
+        kinit_timer_ms = kinit_timer_ms)
 
       try:
         url_response_json = json.loads(url_response)
@@ -143,7 +148,8 @@ def execute(configurations={}, parameters={}, host_name=None):
       if convert_to_json_failed:
         response_code, error_msg, time_millis  = curl_krb_request(env.tmp_dir, kerberos_keytab, kerberos_principal,
           live_nodemanagers_qry, "nm_health_summary_alert", executable_paths, True,
-          "NodeManager Health Summary", smokeuser, connection_timeout=curl_connection_timeout)
+          "NodeManager Health Summary", smokeuser, connection_timeout=curl_connection_timeout,
+          kinit_timer_ms = kinit_timer_ms)
     else:
       live_nodemanagers = json.loads(get_value_from_jmx(live_nodemanagers_qry,
       "LiveNodeManagers", connection_timeout))


[04/51] [abbrv] ambari git commit: AMBARI-15292: Text update for the Hive Server 2 port switch manual step in rolling upgrade (dili)

Posted by jl...@apache.org.
AMBARI-15292: Text update for the Hive Server 2 port switch manual step in rolling upgrade (dili)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/3dd4c4a4
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/3dd4c4a4
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/3dd4c4a4

Branch: refs/heads/AMBARI-13364
Commit: 3dd4c4a4686318e8c7c55f81ff3c8a9bed518c1d
Parents: 03eb1c5
Author: Di Li <di...@apache.org>
Authored: Mon Mar 7 16:38:04 2016 -0500
Committer: Di Li <di...@apache.org>
Committed: Mon Mar 7 16:38:04 2016 -0500

----------------------------------------------------------------------
 .../src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.2.xml   | 4 ++--
 .../src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.3.xml   | 4 ++--
 .../src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.4.xml   | 4 ++--
 .../src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml   | 4 ++--
 .../src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.4.xml   | 4 ++--
 .../src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.4.xml   | 4 ++--
 6 files changed, 12 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/3dd4c4a4/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.2.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.2.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.2.xml
index 953a731..d23c992 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.2.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.2.xml
@@ -538,7 +538,7 @@
         <pre-upgrade>
           <task xsi:type="manual">
             <summary>HiveServer Port Availability</summary>
-            <message>Please note that the HiveServer port will now change to 10010 if hive is using a binary transfer mode or 10011 if hive is using an http transport mode. You can use "netstat -anp | grep 1001[01]" to determine if the port is available on each of following HiveServer host(s): {{hosts.all}}. If the port is not available, the process using it must be terminated.</message>
+            <message>Please note that the HiveServer port will now change to 10010 if Hive is using a binary transfer mode or 10011 if Hive is using an http transport mode. You can use "netstat -anp | grep 1001[01]" to determine if the port is available on each of following HiveServer host(s): {{hosts.all}}. If the port is not available, the process using it must be terminated.</message>
           </task>
 
           <task xsi:type="configure" id="hdp_2_2_0_0_hive_server_set_transport_mode"/>
@@ -547,7 +547,7 @@
         <pre-downgrade>
           <task xsi:type="manual">
             <summary>HiveServer Port Availability</summary>
-            <message>Please note that the HiveServer port will now change to 10000 if hive is using a binary transfer mode or 10001 if hive is using an http transport mode. You can use "netstat -anp | grep 1000[01]" to determine if the port is available on each of following HiveServer host(s): {{hosts.all}}. If the port is not available, the process using it must be terminated.</message>
+            <message>Please note that the HiveServer port will now change to 10000 if Hive is using a binary transfer mode or 10001 if Hive is using an http transport mode. You can use "netstat -anp | grep 1000[01]" to determine if the port is available on each of following HiveServer host(s): {{hosts.all}}. If the port is not available, the process using it must be terminated.</message>
           </task>
 
           <task xsi:type="configure" id="hdp_2_2_0_0_hive_server_restore_transport_mode_on_downgrade"/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3dd4c4a4/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.3.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.3.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.3.xml
index 018fb73..78bee7e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.3.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.3.xml
@@ -683,7 +683,7 @@
         <pre-upgrade>
           <task xsi:type="manual">
             <summary>HiveServer Port Availability</summary>
-            <message>Please note that the HiveServer port will now change to 10010 if hive is using a binary transfer mode or 10011 if hive is using an http transport mode. You can use "netstat -anp | grep 1001[01]" to determine if the port is available on each of following HiveServer host(s): {{hosts.all}}. If the port is not available, the process using it must be terminated.</message>
+            <message>Please note that the HiveServer port will now change to 10010 if Hive is using a binary transfer mode or 10011 if Hive is using an http transport mode. You can use "netstat -anp | grep 1001[01]" to determine if the port is available on each of following HiveServer host(s): {{hosts.all}}. If the port is not available, the process using it must be terminated.</message>
           </task>
 
           <task xsi:type="configure" id="hdp_2_3_0_0_hive_server_set_transport_mode"/>
@@ -699,7 +699,7 @@
         <pre-downgrade>
           <task xsi:type="manual">
             <summary>HiveServer Port Availability</summary>
-            <message>Please note that the HiveServer port will now change to 10000 if hive is using a binary transfer mode or 10001 if hive is using an http transport mode. You can use "netstat -anp | grep 1000[01]" to determine if the port is available on each of following HiveServer host(s): {{hosts.all}}. If the port is not available, the process using it must be terminated.</message>
+            <message>Please note that the HiveServer port will now change to 10000 if Hive is using a binary transfer mode or 10001 if Hive is using an http transport mode. You can use "netstat -anp | grep 1000[01]" to determine if the port is available on each of following HiveServer host(s): {{hosts.all}}. If the port is not available, the process using it must be terminated.</message>
           </task>
 
           <task xsi:type="configure" id="hdp_2_3_0_0_hive_server_restore_transport_mode_on_downgrade"/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3dd4c4a4/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.4.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.4.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.4.xml
index 90e7f62..b1599ea 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.4.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.4.xml
@@ -695,7 +695,7 @@
         <pre-upgrade>
           <task xsi:type="manual">
             <summary>HiveServer Port Availability</summary>
-            <message>Please note that the HiveServer port will now change to 10010 if hive is using a binary transfer mode or 10011 if hive is using an http transport mode. You can use "netstat -anp | grep 1001[01]" to determine if the port is available on each of following HiveServer host(s): {{hosts.all}}. If the port is not available, the process using it must be terminated.</message>
+            <message>Please note that the HiveServer port will now change to 10010 if Hive is using a binary transfer mode or 10011 if Hive is using an http transport mode. You can use "netstat -anp | grep 1001[01]" to determine if the port is available on each of following HiveServer host(s): {{hosts.all}}. If the port is not available, the process using it must be terminated.</message>
           </task>
 
           <task xsi:type="configure" id="hdp_2_3_0_0_hive_server_set_transport_mode"/>
@@ -711,7 +711,7 @@
         <pre-downgrade>
           <task xsi:type="manual">
             <summary>HiveServer Port Availability</summary>
-            <message>Please note that the HiveServer port will now change to 10000 if hive is using a binary transfer mode or 10001 if hive is using an http transport mode. You can use "netstat -anp | grep 1000[01]" to determine if the port is available on each of following HiveServer host(s): {{hosts.all}}. If the port is not available, the process using it must be terminated.</message>
+            <message>Please note that the HiveServer port will now change to 10000 if Hive is using a binary transfer mode or 10001 if Hive is using an http transport mode. You can use "netstat -anp | grep 1000[01]" to determine if the port is available on each of following HiveServer host(s): {{hosts.all}}. If the port is not available, the process using it must be terminated.</message>
           </task>
 
           <task xsi:type="configure" id="hdp_2_3_0_0_hive_server_restore_transport_mode_on_downgrade"/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3dd4c4a4/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml
index 64d0e8e..ed30846 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.3.xml
@@ -683,7 +683,7 @@
         <pre-upgrade>
           <task xsi:type="manual">
             <summary>HiveServer Port Availability</summary>
-            <message>Please note that the HiveServer port will now change to 10010 if hive is using a binary transfer mode or 10011 if hive is using an http transport mode. You can use "netstat -anp | grep 1001[01]" to determine if the port is available on each of following HiveServer host(s): {{hosts.all}}. If the port is not available, the process using it must be terminated.</message>
+            <message>Please note that the HiveServer port will now change to 10010 if Hive is using a binary transfer mode or 10011 if Hive is using an http transport mode. You can use "netstat -anp | grep 1001[01]" to determine if the port is available on each of following HiveServer host(s): {{hosts.all}}. If the port is not available, the process using it must be terminated.</message>
           </task>
 
           <task xsi:type="configure" id="hdp_2_3_0_0_hive_server_set_transport_mode"/>
@@ -692,7 +692,7 @@
         <pre-downgrade>
           <task xsi:type="manual">
             <summary>HiveServer Port Availability</summary>
-            <message>Please note that the HiveServer port will now change to 10000 if hive is using a binary transfer mode or 10001 if hive is using an http transport mode. You can use "netstat -anp | grep 1000[01]" to determine if the port is available on each of following HiveServer host(s): {{hosts.all}}. If the port is not available, the process using it must be terminated.</message>
+            <message>Please note that the HiveServer port will now change to 10000 if Hive is using a binary transfer mode or 10001 if Hive is using an http transport mode. You can use "netstat -anp | grep 1000[01]" to determine if the port is available on each of following HiveServer host(s): {{hosts.all}}. If the port is not available, the process using it must be terminated.</message>
           </task>
 
           <task xsi:type="configure" id="hdp_2_3_0_0_hive_server_restore_transport_mode_on_downgrade"/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3dd4c4a4/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.4.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.4.xml b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.4.xml
index 5bf3532..5fa0966 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.4.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3/upgrades/upgrade-2.4.xml
@@ -666,7 +666,7 @@
         <pre-upgrade>
           <task xsi:type="manual">
             <summary>HiveServer Port Availability</summary>
-            <message>Please note that the HiveServer port will now change to 10010 if hive is using a binary transfer mode or 10011 if hive is using an http transport mode. You can use "netstat -anp | grep 1001[01]" to determine if the port is available on each of following HiveServer host(s): {{hosts.all}}. If the port is not available, the process using it must be terminated.</message>
+            <message>Please note that the HiveServer port will now change to 10010 if Hive is using a binary transfer mode or 10011 if Hive is using an http transport mode. You can use "netstat -anp | grep 1001[01]" to determine if the port is available on each of following HiveServer host(s): {{hosts.all}}. If the port is not available, the process using it must be terminated.</message>
           </task>
 
           <task xsi:type="configure" id="hdp_2_4_0_0_hive_server_set_transport_mode"/>
@@ -676,7 +676,7 @@
         <pre-downgrade>
           <task xsi:type="manual">
             <summary>HiveServer Port Availability</summary>
-            <message>Please note that the HiveServer port will now change to 10000 if hive is using a binary transfer mode or 10001 if hive is using an http transport mode. You can use "netstat -anp | grep 1000[01]" to determine if the port is available on each of following HiveServer host(s): {{hosts.all}}. If the port is not available, the process using it must be terminated.</message>
+            <message>Please note that the HiveServer port will now change to 10000 if Hive is using a binary transfer mode or 10001 if Hive is using an http transport mode. You can use "netstat -anp | grep 1000[01]" to determine if the port is available on each of following HiveServer host(s): {{hosts.all}}. If the port is not available, the process using it must be terminated.</message>
           </task>
 
           <task xsi:type="configure" id="hdp_2_4_0_0_hive_server_restore_transport_mode_on_downgrade"/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3dd4c4a4/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.4.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.4.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.4.xml
index ac2ab03..4ad5d6d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.4.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/upgrades/upgrade-2.4.xml
@@ -676,7 +676,7 @@
         <pre-upgrade>
           <task xsi:type="manual">
             <summary>HiveServer Port Availability</summary>
-            <message>Please note that the HiveServer port will now change to 10010 if hive is using a binary transfer mode or 10011 if hive is using an http transport mode. You can use "netstat -anp | grep 1001[01]" to determine if the port is available on each of following HiveServer host(s): {{hosts.all}}. If the port is not available, the process using it must be terminated.</message>
+            <message>Please note that the HiveServer port will now change to 10010 if Hive is using a binary transfer mode or 10011 if Hive is using an http transport mode. You can use "netstat -anp | grep 1001[01]" to determine if the port is available on each of following HiveServer host(s): {{hosts.all}}. If the port is not available, the process using it must be terminated.</message>
           </task>
 
           <task xsi:type="configure" id="hdp_2_4_0_0_hive_server_set_transport_mode"/>
@@ -685,7 +685,7 @@
         <pre-downgrade>
           <task xsi:type="manual">
             <summary>HiveServer Port Availability</summary>
-            <message>Please note that the HiveServer port will now change to 10000 if hive is using a binary transfer mode or 10001 if hive is using an http transport mode. You can use "netstat -anp | grep 1000[01]" to determine if the port is available on each of following HiveServer host(s): {{hosts.all}}. If the port is not available, the process using it must be terminated.</message>
+            <message>Please note that the HiveServer port will now change to 10000 if Hive is using a binary transfer mode or 10001 if Hive is using an http transport mode. You can use "netstat -anp | grep 1000[01]" to determine if the port is available on each of following HiveServer host(s): {{hosts.all}}. If the port is not available, the process using it must be terminated.</message>
           </task>
 
           <task xsi:type="configure" id="hdp_2_4_0_0_hive_server_restore_transport_mode_on_downgrade" />


[26/51] [abbrv] ambari git commit: AMBARI-15326. YARN Queues Templated Dashboard. (Prajwal Rao via yusaku)

Posted by jl...@apache.org.
AMBARI-15326. YARN Queues Templated Dashboard. (Prajwal Rao via yusaku)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/2c2c201e
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/2c2c201e
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/2c2c201e

Branch: refs/heads/AMBARI-13364
Commit: 2c2c201e97cc2dec9c0298c41d2b47597d6cac70
Parents: 2763c30
Author: Yusaku Sako <yu...@hortonworks.com>
Authored: Tue Mar 8 15:59:02 2016 -0800
Committer: Yusaku Sako <yu...@hortonworks.com>
Committed: Tue Mar 8 15:59:02 2016 -0800

----------------------------------------------------------------------
 .../ambari-metrics/datasource.js                | 165 +++++++++++++------
 .../ambari-metrics/queryCtrl.js                 |   2 +-
 2 files changed, 116 insertions(+), 51 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/2c2c201e/ambari-metrics/ambari-metrics-grafana/ambari-metrics/datasource.js
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-grafana/ambari-metrics/datasource.js b/ambari-metrics/ambari-metrics-grafana/ambari-metrics/datasource.js
index 855caaa..3e665e0 100644
--- a/ambari-metrics/ambari-metrics-grafana/ambari-metrics/datasource.js
+++ b/ambari-metrics/ambari-metrics-grafana/ambari-metrics/datasource.js
@@ -36,28 +36,28 @@ define([
           this.url = datasource.url;
           this.initMetricAppidMapping();
         }
-        var allMetrics = [];
+        var allMetrics = {};
         var appIds = [];
+
         //We get a list of components and their associated metrics.
         AmbariMetricsDatasource.prototype.initMetricAppidMapping = function () {
           backendSrv.get(this.url + '/ws/v1/timeline/metrics/metadata')
             .then(function (items) {
-              allMetrics = [];
+              allMetrics = {};
               appIds = [];
+              _.forEach(items, function (metric,app) {
+                metric.forEach(function (component) {
+                  if (!allMetrics[app]) {
+                    allMetrics[app] = [];
+                  }
+                  allMetrics[app].push(component.metricname);
+                });
+              });
               //We remove a couple of components from the list that do not contain any
               //pertinent metrics.
-              delete items.timeline_metric_store_watcher; delete items.amssmoketestfake;
-              for (var key in items) {
-                if (items.hasOwnProperty(key)) {
-                  items[key].forEach(function (_item) {
-                    allMetrics.push({
-                      metric: _item.metricname,
-                      app: key
-                    });
-                  });
-                }
-                appIds = _.keys(items);
-              }
+              delete allMetrics["timeline_metric_store_watcher"];
+              delete allMetrics["amssmoketestfake"];
+              appIds = Object.keys(allMetrics);
             });
         };
 
@@ -106,7 +106,7 @@ define([
               var timeSeries = {};
               if (target.hosts === undefined || target.hosts.trim() === "") {
                 timeSeries = {
-                  target: target.metric + hostLegend,
+                  target: res.metrics[0].metricname + hostLegend,
                   datapoints: []
                 };
               } else {
@@ -123,7 +123,6 @@ define([
               series.push(timeSeries);
               return $q.when({data: series});
             };
-
           };
           var getHostAppIdData = function(target) {
             var precision = target.shouldAddPrecision && target.precision !== '' ? '&precision=' + target.precision : '';
@@ -136,18 +135,12 @@ define([
             );
           };
           //Check if it's a templated dashboard.
-          var templatedHosts = templateSrv.variables.filter(function(o) {
-            return o.name === "hosts"
-          });
-          var templatedHost = (_.isEmpty(templateSrv.variables)) ? '' : templatedHosts[0].options.filter(function(host) {
-            return host.selected;
-          }).map(function(hostName) {
-            return hostName.value;
-          });
+          var templatedHosts = templateSrv.variables.filter(function(o) { return o.name === "hosts"});
+          var templatedHost = (_.isEmpty(templatedHosts)) ? '' : templatedHosts[0].options.filter(function(host)
+            { return host.selected; }).map(function(hostName) { return hostName.value; });
 
-          var tComponents = _.isEmpty(templateSrv.variables) ? '' : templateSrv.variables.filter(function(variable) {
-            return variable.name === "components"
-          });
+          var tComponents = _.isEmpty(templateSrv.variables) ? '' : templateSrv.variables.filter(function(variable)
+            { return variable.name === "components"});
           var tComponent = _.isEmpty(tComponents) ? '' : tComponents[0].current.value;
 
           var getServiceAppIdData = function(target) {
@@ -161,12 +154,50 @@ define([
             );
           };
 
+          var getYarnAppIdData = function(target) {
+            var precision = target.shouldAddPrecision && target.precision !== '' ? '&precision=' + target.precision : '';
+            var rate = target.shouldComputeRate ? '._rate._' : '._';
+            return backendSrv.get(self.url + '/ws/v1/timeline/metrics?metricNames=' + target.queue + rate
+              + target.aggregator + '&appId=resourcemanager&startTime=' + from +
+              '&endTime=' + to + precision).then(
+              getMetricsData(target)
+            );
+          };
+
           // Time Ranges
           var from = Math.floor(options.range.from.valueOf() / 1000);
           var to = Math.floor(options.range.to.valueOf() / 1000);
 
           var metricsPromises = [];
           if (!_.isEmpty(templateSrv.variables)) {
+            // YARN Queues Dashboard
+            if (templateSrv.variables[0].query === "yarnqueues") {
+              var allQueues = templateSrv.variables.filter(function(variable) { return variable.query === "yarnqueues";});
+              var selectedQs = (_.isEmpty(allQueues)) ? "" : allQueues[0].options.filter(function(q)
+              { return q.selected; }).map(function(qName) { return qName.value; });
+              // All Queues
+              if (!_.isEmpty(_.find(selectedQs, function (wildcard) { return wildcard === "*"; })))  {
+                var allQueue = allQueues[0].options.filter(function(q) {
+                  return q.text !== "All"; }).map(function(queue) { return queue.value; });
+                _.forEach(allQueue, function(processQueue) {
+                  metricsPromises.push(_.map(options.targets, function(target) {
+                    target.qmetric = processQueue;
+                    target.queue = target.metric.replace('root', target.qmetric);
+                    return getYarnAppIdData(target);
+                  }));
+                });
+              } else {
+                // All selected queues.
+                _.forEach(selectedQs, function(processQueue) {
+                  metricsPromises.push(_.map(options.targets, function(target) {
+                    target.qmetric = processQueue;
+                    target.queue = target.metric.replace('root', target.qmetric);
+                    return getYarnAppIdData(target);
+                  }));
+                });
+              }
+            }
+            //All Hosts
             if (!_.isEmpty(_.find(templatedHost, function (wildcard) { return wildcard === "*"; })))  {
               var allHosts = templateSrv.variables.filter(function(variable) { return variable.name === "hosts"});
               var allHost = allHosts[0].options.filter(function(all) {
@@ -180,6 +211,7 @@ define([
               }));
             });
             } else {
+              // Single or multi selected hosts
               _.forEach(templatedHost, function(processHost) {
               metricsPromises.push(_.map(options.targets, function(target) {
                 target.templatedHost = processHost;
@@ -192,6 +224,7 @@ define([
 
             metricsPromises = _.flatten(metricsPromises);
           } else {
+            // Non Templatized Dashboards
             metricsPromises = _.map(options.targets, function(target) {
               console.debug('target app=' + target.app + ',' +
                 'target metric=' + target.metric + ' on host=' + target.tempHost);
@@ -226,40 +259,76 @@ define([
         /**
          * AMS Datasource Templating Variables.
          */
-        AmbariMetricsDatasource.prototype.metricFindQuery = function(query) {
+        AmbariMetricsDatasource.prototype.metricFindQuery = function (query) {
           var interpolated;
           try {
             interpolated = templateSrv.replace(query);
           } catch (err) {
             return $q.reject(err);
           }
-          var tComponents = _.isEmpty(templateSrv.variables) ? '' : templateSrv.variables.filter(function(variable) {
-            return variable.name === "components"
-          });
+          var tComponents = _.isEmpty(templateSrv.variables) ? '' : templateSrv.variables.filter(function(variable) 
+            { return variable.name === "components"});
           var tComponent = _.isEmpty(tComponents) ? '' : tComponents[0].current.value;
-          if (!tComponent) {
-            return this.doAmbariRequest({
-                method: 'GET',
-                url: '/ws/v1/timeline/metrics/' + interpolated
-              }).then(function(results) {
-                //Remove fakehostname from the list of hosts on the cluster.
-                var fake = "fakehostname";
-                delete results.data[fake];
-                return _.map(_.keys(results.data), function(hostName) {
+          // Templated Variable for YARN Queues.
+          // It will search the cluster and populate the queues.
+          if(interpolated === "yarnqueues") {
+            return backendSrv.get(this.url + '/ws/v1/timeline/metrics/metadata')
+              .then(function (results) {
+                var allM = {};
+                _.forEach(results, function (metric,app) {
+                  metric.forEach(function (component) {
+                    if (!allM[app]) {
+                      allM[app] = [];
+                    }
+                    allM[app].push(component.metricname);
+                  });
+                });
+                var yarnqueues = allM["resourcemanager"];
+                var extractQueues = yarnqueues.filter(/./.test.bind(new RegExp(".=root", 'g')));
+                var queues = _.map(extractQueues, function(metric) {
+                  return metric.substring("yarn.QueueMetrics.Queue=".length);
+                });
+                queues = _.map(queues, function(metricName) {
+                  return metricName.substring(metricName.lastIndexOf("."), 0);
+                });
+                queues = _.sortBy(_.uniq(queues));
+                return _.map(queues, function (queues) {
                   return {
-                    text: hostName,
-                    expandable: hostName.expandable ? true : false
+                    text: queues
                   };
                 });
               });
+          }
+          // Templated Variable that will populate all hosts on the cluster.
+          // The variable needs to be set to "hosts".
+          if (!tComponent){
+                  return this.doAmbariRequest({
+                        method: 'GET',
+                        url: '/ws/v1/timeline/metrics/' + interpolated
+                      })
+                      .then(function (results) {
+                        //Remove fakehostname from the list of hosts on the cluster.
+                        var fake = "fakehostname"; delete results.data[fake];
+                        return _.map(_.keys(results.data), function (hostName) {
+                          return {
+                                text: hostName,
+                                expandable: hostName.expandable ? true : false
+                              };
+                        });
+                      });
           } else {
+            // Create a dropdown in templated dashboards for single components.
+            // This will check for the component set and show hosts only for the
+            // selected component.
             return this.doAmbariRequest({
                 method: 'GET',
                 url: '/ws/v1/timeline/metrics/hosts'
-              }).then(function(results) {
+              })
+              .then(function(results) {
                 var compToHostMap = {};
                 //Remove fakehostname from the list of hosts on the cluster.
-                var fake = "fakehostname"; delete results.data[fake];
+                var fake = "fakehostname";
+                delete results.data[fake];
                 //Query hosts based on component name
                 _.forEach(results.data, function(comp, hostName) {
                   comp.forEach(function(component) {
@@ -281,7 +350,7 @@ define([
                 });
                 return $q.when(compHosts);
               });
-          }
+           }
         };
 
         /**
@@ -326,12 +395,8 @@ define([
           if (!app) {
             return $q.when([]);
           }
-          var metrics = allMetrics.filter(function(item) {
-            return (item.app === app);
-          });
           var keys = [];
-          _.forEach(metrics, function (k) { keys.push(k.metric); });
-          keys = _.map(keys,function(m) {
+          keys = _.map(allMetrics[app],function(m) {
             return {text: m};
           });
           keys = _.sortBy(keys, function (i) { return i.text.toLowerCase(); });

http://git-wip-us.apache.org/repos/asf/ambari/blob/2c2c201e/ambari-metrics/ambari-metrics-grafana/ambari-metrics/queryCtrl.js
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-grafana/ambari-metrics/queryCtrl.js b/ambari-metrics/ambari-metrics-grafana/ambari-metrics/queryCtrl.js
index 6c231dd..3f5e31e 100644
--- a/ambari-metrics/ambari-metrics-grafana/ambari-metrics/queryCtrl.js
+++ b/ambari-metrics/ambari-metrics-grafana/ambari-metrics/queryCtrl.js
@@ -42,7 +42,7 @@ define([
           
           $scope.$watch('target.precision', function() {
             if ($scope.target.precision !== "seconds") {
-              $scope.target.aggregator = 'avg';
+              $scope.target.aggregator = $scope.target.aggregator;
             }
           });
           $scope.$watch('target.app', function (newValue) {


[07/51] [abbrv] ambari git commit: Update download URLs to use archive for older releases. (yusaku)

Posted by jl...@apache.org.
Update download URLs to use archive for older releases. (yusaku)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/bdba8cbd
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/bdba8cbd
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/bdba8cbd

Branch: refs/heads/AMBARI-13364
Commit: bdba8cbd56d4ed3c9f3ad239c1e0bd82d1fd58ec
Parents: 283d816
Author: Yusaku Sako <yu...@hortonworks.com>
Authored: Mon Mar 7 16:34:06 2016 -0800
Committer: Yusaku Sako <yu...@hortonworks.com>
Committed: Mon Mar 7 16:34:06 2016 -0800

----------------------------------------------------------------------
 docs/src/site/site.xml | 24 ++++++++++++------------
 1 file changed, 12 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/bdba8cbd/docs/src/site/site.xml
----------------------------------------------------------------------
diff --git a/docs/src/site/site.xml b/docs/src/site/site.xml
index bc47fa5..467299c 100644
--- a/docs/src/site/site.xml
+++ b/docs/src/site/site.xml
@@ -135,18 +135,18 @@
 
     <menu name="Releases">
       <item name="2.2.1" href="http://www.apache.org/dyn/closer.cgi/ambari/ambari-2.2.1"/>
-      <item name="2.2.0" href="http://www.apache.org/dyn/closer.cgi/ambari/ambari-2.2.0"/>
-      <item name="2.1.2" href="http://www.apache.org/dyn/closer.cgi/ambari/ambari-2.1.2"/>
-      <item name="2.1.1" href="http://www.apache.org/dyn/closer.cgi/ambari/ambari-2.1.1"/>
-      <item name="2.1.0" href="http://www.apache.org/dyn/closer.cgi/ambari/ambari-2.1.0"/>
-      <item name="2.0.2" href="http://www.apache.org/dyn/closer.cgi/ambari/ambari-2.0.2"/>
-      <item name="2.0.1" href="http://www.apache.org/dyn/closer.cgi/ambari/ambari-2.0.1"/>
-      <item name="2.0.0" href="http://www.apache.org/dyn/closer.cgi/ambari/ambari-2.0.0"/>
-      <item name="1.7.0" href="http://www.apache.org/dyn/closer.cgi/ambari/ambari-1.7.0"/>
-      <item name="1.6.1" href="http://www.apache.org/dyn/closer.cgi/ambari/ambari-1.6.1"/>
-      <item name="1.6.0" href="http://www.apache.org/dyn/closer.cgi/ambari/ambari-1.6.0"/>
-      <item name="1.5.1" href="http://www.apache.org/dyn/closer.cgi/ambari/ambari-1.5.1"/>
-      <item name="1.5.0" href="http://www.apache.org/dyn/closer.cgi/ambari/ambari-1.5.0"/>
+      <item name="2.2.0" href="http://archive.apache.org/dist/ambari/ambari-2.2.0"/>
+      <item name="2.1.2" href="http://archive.apache.org/dist/ambari/ambari-2.1.2"/>
+      <item name="2.1.1" href="http://archive.apache.org/dist/ambari/ambari-2.1.1"/>
+      <item name="2.1.0" href="http://archive.apache.org/dist/ambari/ambari-2.1.0"/>
+      <item name="2.0.2" href="http://archive.apache.org/dist/ambari/ambari-2.0.2"/>
+      <item name="2.0.1" href="http://archive.apache.org/dist/ambari/ambari-2.0.1"/>
+      <item name="2.0.0" href="http://archive.apache.org/dist/ambari/ambari-2.0.0"/>
+      <item name="1.7.0" href="http://archive.apache.org/dist/ambari/ambari-1.7.0"/>
+      <item name="1.6.1" href="http://archive.apache.org/dist/ambari/ambari-1.6.1"/>
+      <item name="1.6.0" href="http://archive.apache.org/dist/ambari/ambari-1.6.0"/>
+      <item name="1.5.1" href="http://archive.apache.org/dist/ambari/ambari-1.5.1"/>
+      <item name="1.5.0" href="http://archive.apache.org/dist/ambari/ambari-1.5.0"/>
       <item name="1.4.4" href="http://archive.apache.org/dist/ambari/ambari-1.4.4/"/>
       <item name="1.4.3" href="http://archive.apache.org/dist/ambari/ambari-1.4.3/"/>
       <item name="1.4.2" href="http://archive.apache.org/dist/ambari/ambari-1.4.2/"/>


[47/51] [abbrv] ambari git commit: AMBARI-15053: Parameterize distro-specific stack information for YARN/MR (Juanjo Marron via dili)

Posted by jl...@apache.org.
AMBARI-15053: Parameterize distro-specific stack information for YARN/MR (Juanjo Marron via dili)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/897e6ab0
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/897e6ab0
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/897e6ab0

Branch: refs/heads/AMBARI-13364
Commit: 897e6ab0c113cfdea4cf41d1a89d7ace61f72c92
Parents: 4788dc2
Author: Di Li <di...@apache.org>
Authored: Mon Feb 29 10:28:27 2016 -0500
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Wed Mar 9 15:23:53 2016 -0800

----------------------------------------------------------------------
 .../scripts/application_timeline_server.py      |  5 ++--
 .../2.1.0.2.0/package/scripts/historyserver.py  |  9 +++----
 .../package/scripts/mapreduce2_client.py        |  6 +++--
 .../2.1.0.2.0/package/scripts/nodemanager.py    |  5 ++--
 .../2.1.0.2.0/package/scripts/params_linux.py   | 25 +++++++++++---------
 .../package/scripts/resourcemanager.py          |  5 ++--
 .../2.1.0.2.0/package/scripts/service_check.py  |  6 ++---
 .../YARN/2.1.0.2.0/package/scripts/yarn.py      |  6 ++---
 .../2.1.0.2.0/package/scripts/yarn_client.py    |  5 ++--
 9 files changed, 41 insertions(+), 31 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/897e6ab0/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/application_timeline_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/application_timeline_server.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/application_timeline_server.py
index 2966581..9d55fbf 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/application_timeline_server.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/application_timeline_server.py
@@ -63,14 +63,15 @@ class ApplicationTimelineServerWindows(ApplicationTimelineServer):
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class ApplicationTimelineServerDefault(ApplicationTimelineServer):
   def get_stack_to_component(self):
-    return {"HDP": "hadoop-yarn-timelineserver"}
+    import params
+    return {params.stack_name: "hadoop-yarn-timelineserver"}
 
   def pre_upgrade_restart(self, env, upgrade_type=None):
     Logger.info("Executing Stack Upgrade pre-restart")
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), params.stack_version_ru_support) >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
       stack_select.select("hadoop-yarn-timelineserver", params.version)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/897e6ab0/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py
index 53b0e53..03681ee 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py
@@ -70,14 +70,15 @@ class HistoryserverWindows(HistoryServer):
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class HistoryServerDefault(HistoryServer):
   def get_stack_to_component(self):
-    return {"HDP": "hadoop-mapreduce-historyserver"}
+    import params
+    return {params.stack_name: "hadoop-mapreduce-historyserver"}
 
   def pre_upgrade_restart(self, env, upgrade_type=None):
     Logger.info("Executing Stack Upgrade pre-restart")
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), params.stack_version_ru_support) >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
       stack_select.select("hadoop-mapreduce-historyserver", params.version)
       # MC Hammer said, "Can't touch this"
@@ -91,7 +92,7 @@ class HistoryServerDefault(HistoryServer):
     env.set_params(params)
     self.configure(env) # FOR SECURITY
 
-    if params.stack_version_formatted_major and compare_versions(params.stack_version_formatted_major, '2.2.0.0') >= 0:
+    if params.stack_version_formatted and compare_versions(params.stack_version_formatted, params.stack_version_ru_support) >= 0:
       # MC Hammer said, "Can't touch this"
       resource_created = copy_to_hdfs(
         "mapreduce",
@@ -111,7 +112,7 @@ class HistoryServerDefault(HistoryServer):
       if resource_created:
         params.HdfsResource(None, action="execute")
     else:
-      # In HDP 2.1, tez.tar.gz was copied to a different folder in HDFS.
+      # In version < stack_version_ru_support tez.tar.gz was copied to a different folder in HDFS.
       install_tez_jars()
 
     service('historyserver', action='start', serviceName='mapreduce')

http://git-wip-us.apache.org/repos/asf/ambari/blob/897e6ab0/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/mapreduce2_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/mapreduce2_client.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/mapreduce2_client.py
index 9fc1e32..3c52681 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/mapreduce2_client.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/mapreduce2_client.py
@@ -50,13 +50,15 @@ class MapReduce2ClientWindows(MapReduce2Client):
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class MapReduce2ClientDefault(MapReduce2Client):
   def get_stack_to_component(self):
-    return {"HDP": "hadoop-client"}
+    import params
+    return {params.stack_name: "hadoop-client"}
 
   def pre_upgrade_restart(self, env, upgrade_type=None):
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
+
+    if params.version and compare_versions(format_stack_version(params.version), params.stack_version_ru_support) >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
       stack_select.select("hadoop-client", params.version)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/897e6ab0/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/nodemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/nodemanager.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/nodemanager.py
index fd14d0f..8ec15ed 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/nodemanager.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/nodemanager.py
@@ -65,14 +65,15 @@ class NodemanagerWindows(Nodemanager):
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class NodemanagerDefault(Nodemanager):
   def get_stack_to_component(self):
-    return {"HDP": "hadoop-yarn-nodemanager"}
+    import params
+    return {params.stack_name: "hadoop-yarn-nodemanager"}
 
   def pre_upgrade_restart(self, env, upgrade_type=None):
     Logger.info("Executing NodeManager Stack Upgrade pre-restart")
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), params.stack_version_ru_support) >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
       stack_select.select("hadoop-yarn-nodemanager", params.version)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/897e6ab0/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
index e02a55d..1f82870 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
@@ -33,7 +33,7 @@ from resource_management.libraries import functions
 import status_params
 
 # a map of the Ambari role to the component name
-# for use with /usr/hdp/current/<component>
+# for use with <stack_dir>/current/<component>
 MAPR_SERVER_ROLE_DIRECTORY_MAP = {
   'HISTORYSERVER' : 'hadoop-mapreduce-historyserver',
   'MAPREDUCE2_CLIENT' : 'hadoop-mapreduce-client',
@@ -51,11 +51,14 @@ config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 
 stack_name = default("/hostLevelParams/stack_name", None)
+stack_dir = config['configurations']['cluster-env']['stack_dir']
 
 # This is expected to be of the form #.#.#.#
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-stack_version_formatted_major = format_stack_version(stack_version_unformatted)
-stack_version_formatted = functions.get_stack_version('hadoop-yarn-resourcemanager')
+
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+stack_version = functions.get_stack_version('hadoop-yarn-resourcemanager')
+stack_version_ru_support = config['configurations']['cluster-env']['stack_version_ru_support']
 
 # New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade.
 # It cannot be used during the initial Cluser Install because the version is not yet known.
@@ -75,8 +78,8 @@ yarn_bin = "/usr/lib/hadoop-yarn/sbin"
 yarn_container_bin = "/usr/lib/hadoop-yarn/bin"
 hadoop_java_io_tmpdir = os.path.join(tmp_dir, "hadoop_java_io_tmpdir")
 
-# hadoop parameters for 2.2+
-if Script.is_stack_greater_or_equal("2.2"):
+# hadoop parameters for stack_version_ru_support+
+if Script.is_stack_greater_or_equal(stack_version_ru_support):
   # MapR directory root
   mapred_role_root = "hadoop-mapreduce-client"
   command_role = default("/role", "")
@@ -88,14 +91,14 @@ if Script.is_stack_greater_or_equal("2.2"):
   if command_role in YARN_SERVER_ROLE_DIRECTORY_MAP:
     yarn_role_root = YARN_SERVER_ROLE_DIRECTORY_MAP[command_role]
 
-  hadoop_mapred2_jar_location = format("/usr/hdp/current/{mapred_role_root}")
-  mapred_bin = format("/usr/hdp/current/{mapred_role_root}/sbin")
+  hadoop_mapred2_jar_location = format("{stack_dir}/current/{mapred_role_root}")
+  mapred_bin = format("{stack_dir}/current/{mapred_role_root}/sbin")
 
-  hadoop_yarn_home = format("/usr/hdp/current/{yarn_role_root}")
-  yarn_bin = format("/usr/hdp/current/{yarn_role_root}/sbin")
-  yarn_container_bin = format("/usr/hdp/current/{yarn_role_root}/bin")
+  hadoop_yarn_home = format("{stack_dir}/current/{yarn_role_root}")
+  yarn_bin = format("{stack_dir}/current/{yarn_role_root}/sbin")
+  yarn_container_bin = format("{stack_dir}/current/{yarn_role_root}/bin")
 
-  # Timeline Service property that was added in 2.2
+  # Timeline Service property that was added in tack_version_ru_support
   ats_leveldb_state_store_dir = config['configurations']['yarn-site']['yarn.timeline-service.leveldb-state-store.path']
 
 # ats 1.5 properties

http://git-wip-us.apache.org/repos/asf/ambari/blob/897e6ab0/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
index e51ca8a..9a2aa5c 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
@@ -98,14 +98,15 @@ class ResourcemanagerWindows(Resourcemanager):
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class ResourcemanagerDefault(Resourcemanager):
   def get_stack_to_component(self):
-    return {"HDP": "hadoop-yarn-resourcemanager"}
+    import params
+    return {params.stack_name: "hadoop-yarn-resourcemanager"}
 
   def pre_upgrade_restart(self, env, upgrade_type=None):
     Logger.info("Executing Stack Upgrade post-restart")
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), params.stack_version_ru_support) >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
       stack_select.select("hadoop-yarn-resourcemanager", params.version)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/897e6ab0/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service_check.py
index d82b630..c25c8f4 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service_check.py
@@ -85,10 +85,10 @@ class ServiceCheckDefault(ServiceCheck):
     import params
     env.set_params(params)
 
-    if params.stack_version_formatted_major != "" and compare_versions(params.stack_version_formatted_major, '2.2') >= 0:
-      path_to_distributed_shell_jar = "/usr/hdp/current/hadoop-yarn-client/hadoop-yarn-applications-distributedshell.jar"
+    if params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, params.stack_version_ru_support) >= 0:
+      path_to_distributed_shell_jar = format("{stack_dir}/current/hadoop-yarn-client/hadoop-yarn-applications-distributedshell.jar")
     else:
-      path_to_distributed_shell_jar = "/usr/lib/hadoop-yarn/hadoop-yarn-applications-distributedshell*.jar"
+      path_to_distributed_shell_jar = format("{stack_dir}/hadoop-yarn/hadoop-yarn-applications-distributedshell*.jar")
 
     yarn_distrubuted_shell_check_cmd = format("yarn org.apache.hadoop.yarn.applications.distributedshell.Client "
                                               "-shell_command ls -num_containers {number_of_nm} -jar {path_to_distributed_shell_jar}")

http://git-wip-us.apache.org/repos/asf/ambari/blob/897e6ab0/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn.py
index e05ed60..f05ed78 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn.py
@@ -189,7 +189,7 @@ def yarn(name = None):
 
   # During RU, Core Masters and Slaves need hdfs-site.xml
   # TODO, instead of specifying individual configs, which is susceptible to breaking when new configs are added,
-  # RU should rely on all available in /usr/hdp/<version>/hadoop/conf
+  # RU should rely on all available in {stack_dir}/<version>/hadoop/conf
   if 'hdfs-site' in params.config['configurations']:
     XmlConfig("hdfs-site.xml",
               conf_dir=params.hadoop_conf_dir,
@@ -252,8 +252,8 @@ def yarn(name = None):
        cd_access="a",
     )
 
-    # if HDP stack is greater than/equal to 2.2, mkdir for state store property (added in 2.2)
-    if (Script.is_stack_greater_or_equal("2.2")):
+    # if stack is greater than/equal to stack_version_ru_support, mkdir for state store property (added in stack_version_ru_support)
+    if (Script.is_stack_greater_or_equal(params.stack_version_ru_support)):
       Directory(params.ats_leveldb_state_store_dir,
        owner=params.yarn_user,
        group=params.user_group,

http://git-wip-us.apache.org/repos/asf/ambari/blob/897e6ab0/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn_client.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn_client.py
index d300279..cb967eb 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn_client.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn_client.py
@@ -50,13 +50,14 @@ class YarnClientWindows(YarnClient):
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class YarnClientDefault(YarnClient):
   def get_stack_to_component(self):
-    return {"HDP": "hadoop-client"}
+    import params
+    return {params.stack_name: "hadoop-client"}
 
   def pre_upgrade_restart(self, env, upgrade_type=None):
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), params.stack_version_ru_support) >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
       stack_select.select("hadoop-client", params.version)
 


[40/51] [abbrv] ambari git commit: AMBARI-15327 Combo Search: Query for multiple facets and display label instead of value in search box (Joe Wang via rzang)

Posted by jl...@apache.org.
AMBARI-15327 Combo Search: Query for multiple <Component:Any> facets and display label instead of value in search box (Joe Wang via rzang)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c0d07416
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c0d07416
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c0d07416

Branch: refs/heads/AMBARI-13364
Commit: c0d07416e2f75912cacbd3f4a3adb0b918729892
Parents: eb6b0da
Author: Richard Zang <rz...@apache.org>
Authored: Wed Mar 9 11:42:24 2016 -0800
Committer: Richard Zang <rz...@apache.org>
Committed: Wed Mar 9 11:42:24 2016 -0800

----------------------------------------------------------------------
 ambari-web/app/controllers/main/host.js         | 26 +++++--
 .../controllers/main/host/combo_search_box.js   |  2 +-
 .../mixins/common/table_server_view_mixin.js    | 13 ++--
 .../app/views/main/host/combo_search_box.js     | 79 +++++++++++++-------
 ambari-web/test/controllers/main/host_test.js   |  6 +-
 5 files changed, 82 insertions(+), 44 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/c0d07416/ambari-web/app/controllers/main/host.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/host.js b/ambari-web/app/controllers/main/host.js
index b473c59..5d7e268 100644
--- a/ambari-web/app/controllers/main/host.js
+++ b/ambari-web/app/controllers/main/host.js
@@ -494,23 +494,29 @@ App.MainHostController = Em.ArrayController.extend(App.TableServerMixin, {
     return result;
   },
 
+  labelValueMap: {},
+
   /**
    * Filter hosts by componentName of <code>component</code>
    * @param {App.HostComponent} component
    */
   filterByComponent: function (component) {
     if (!component) return;
-    var id = component.get('componentName');
-    var column = 6;
+    var componentName = component.get('componentName');
+    var displayName = App.format.role(componentName);
     var colPropAssoc = this.get('colPropAssoc');
+    var map = this.get('labelValueMap');
 
     var filterForComponent = {
-      iColumn: column,
-      value: [id],
-      type: 'multiple'
+      iColumn: 15,
+      value: componentName + ':ALL',
+      type: 'string'
     };
+    map[displayName] = componentName;
+    map['All'] = 'ALL';
+    var filterStr = '"' + displayName + '"' + ': "All"';
     App.db.setFilterConditions(this.get('name'), [filterForComponent]);
-    App.db.setComboSearchQuery(this.get('name'), colPropAssoc[column] + ': ' + '"' + id + '"');
+    App.db.setComboSearchQuery(this.get('name'), filterStr);
   },
 
   /**
@@ -521,6 +527,7 @@ App.MainHostController = Em.ArrayController.extend(App.TableServerMixin, {
   filterByStack: function (displayName, state) {
     if (!displayName || !state) return;
     var colPropAssoc = this.get('colPropAssoc');
+    var map = this.get('labelValueMap');
 
     var versionFilter = {
       iColumn: 16,
@@ -532,8 +539,11 @@ App.MainHostController = Em.ArrayController.extend(App.TableServerMixin, {
       value: state.toUpperCase(),
       type: 'string'
     };
-    var versionFilterStr = colPropAssoc[versionFilter.iColumn] + ': ' + '"' + versionFilter.value + '"';
-    var stateFilterStr = colPropAssoc[stateFilter.iColumn] + ': ' + '"' + stateFilter.value + '"';
+    map["Stack Version"] = colPropAssoc[versionFilter.iColumn];
+    map["Version State"] = colPropAssoc[stateFilter.iColumn];
+    map[App.HostStackVersion.formatStatus(stateFilter.value)] = stateFilter.value;
+    var versionFilterStr = '"Stack Version": "' + versionFilter.value + '"';
+    var stateFilterStr = '"Version State": "' + App.HostStackVersion.formatStatus(stateFilter.value) + '"';
     App.db.setFilterConditions(this.get('name'), [versionFilter, stateFilter]);
     App.db.setComboSearchQuery(this.get('name'), [versionFilterStr, stateFilterStr].join(' '));
   },

http://git-wip-us.apache.org/repos/asf/ambari/blob/c0d07416/ambari-web/app/controllers/main/host/combo_search_box.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/host/combo_search_box.js b/ambari-web/app/controllers/main/host/combo_search_box.js
index 9fa6851..6f64170 100644
--- a/ambari-web/app/controllers/main/host/combo_search_box.js
+++ b/ambari-web/app/controllers/main/host/combo_search_box.js
@@ -92,7 +92,7 @@ App.MainHostComboSearchBoxController = Em.Controller.extend({
         ex = ex.replace('{1}', v);
         result += ex;
       }
-      result += '&';
+      result += '|';
     }
 
     return result.substring(0, result.length - 1);

http://git-wip-us.apache.org/repos/asf/ambari/blob/c0d07416/ambari-web/app/mixins/common/table_server_view_mixin.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/mixins/common/table_server_view_mixin.js b/ambari-web/app/mixins/common/table_server_view_mixin.js
index eca17d6..66f886a 100644
--- a/ambari-web/app/mixins/common/table_server_view_mixin.js
+++ b/ambari-web/app/mixins/common/table_server_view_mixin.js
@@ -95,10 +95,13 @@ App.TableServerViewMixin = Em.Mixin.create({
     this.set('filterConditions', []);
     searchCollection.models.forEach(function (model) {
       var tag = model.attributes;
-      var isComponentState = comboController.isComponentStateFacet(tag.category);
-      var iColumn = App.router.get('mainHostController').get('colPropAssoc').indexOf(isComponentState? 'componentState' : tag.category);
+      var map = App.router.get('mainHostController.labelValueMap');
+      var category = map[tag.category] || tag.category;
+      var value = map[tag.value] || tag.value;
+      var isComponentState = comboController.isComponentStateFacet(category);
+      var iColumn = App.router.get('mainHostController').get('colPropAssoc').indexOf(isComponentState? 'componentState' : category);
       var filterCondition = self.get('filterConditions').findProperty('iColumn', iColumn);
-      var filterValue = isComponentState? (tag.category + ':' + tag.value) : tag.value;
+      var filterValue = isComponentState? (category + ':' + value) : value;
       if (filterCondition) {
         if (typeof filterCondition.value == 'string') {
           filterCondition.value = [filterCondition.value, filterValue];
@@ -107,10 +110,10 @@ App.TableServerViewMixin = Em.Mixin.create({
         }
       } else {
         var type = 'string';
-        if (tag.category === 'cpu') {
+        if (category === 'cpu') {
           type = 'number';
         }
-        if (tag.category === 'memoryFormatted') {
+        if (category === 'memoryFormatted') {
           type = 'ambari-bandwidth';
         }
         filterCondition = {

http://git-wip-us.apache.org/repos/asf/ambari/blob/c0d07416/ambari-web/app/views/main/host/combo_search_box.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/host/combo_search_box.js b/ambari-web/app/views/main/host/combo_search_box.js
index 17c0a6d..3ccacf2 100644
--- a/ambari-web/app/views/main/host/combo_search_box.js
+++ b/ambari-web/app/views/main/host/combo_search_box.js
@@ -39,7 +39,8 @@ App.MainHostComboSearchBoxView = Em.View.extend({
       var displayName = component.get('displayName');
       var name = component.get('componentName');
       if (displayName != null && !controller.isClientComponent(name)) {
-        hostComponentList.push({label: displayName, value: name, category: 'Component'});
+        hostComponentList.push({label: displayName, category: 'Component'});
+        App.router.get('mainHostController.labelValueMap')[displayName] = name;
       }
     });
     return hostComponentList;
@@ -50,9 +51,9 @@ App.MainHostComboSearchBoxView = Em.View.extend({
       hostComponentList = this.getHostComponentList();
     }
     var currentComponentFacets = visualSearch.searchQuery.toJSON().filter(function (facet) {
-      var result = !!(hostComponentList.findProperty('value', facet.category) && facet.value);
+      var result = !!(hostComponentList.findProperty('label', facet.category) && facet.value);
       if (!includeAllValue) {
-        result &= (facet.value != 'ALL');
+        result &= (facet.value != 'All');
       }
       return result;
     });
@@ -79,16 +80,27 @@ App.MainHostComboSearchBoxView = Em.View.extend({
 
         facetMatches: function (callback) {
           var list = [
-            {label: 'Host Name', value: 'hostName', category: 'Host'},
-            {label: 'IP', value: 'ip', category: 'Host'},
-            {label: 'Heath Status', value: 'healthClass', category: 'Host'},
-            {label: 'Cores', value: 'cpu', category: 'Host'},
-            {label: 'RAM', value: 'memoryFormatted', category: 'Host'},
-            {label: 'Stack Version', value: 'version', category: 'Host'},
-            {label: 'Version State', value: 'versionState', category: 'Host'},
-            {label: 'Rack', value: 'rack', category: 'Host'},
-            {label: 'Service', value: 'services', category: 'Service'},
+            {label: 'Host Name', category: 'Host'},
+            {label: 'IP', category: 'Host'},
+            {label: 'Heath Status', category: 'Host'},
+            {label: 'Cores', category: 'Host'},
+            {label: 'RAM', category: 'Host'},
+            {label: 'Stack Version', category: 'Host'},
+            {label: 'Version State', category: 'Host'},
+            {label: 'Rack', category: 'Host'},
+            {label: 'Service', category: 'Service'}
           ];
+          var map = App.router.get('mainHostController.labelValueMap');
+          map['Host Name'] = 'hostName';
+          map['IP'] = 'ip';
+          map['Heath Status'] = 'healthClass';
+          map['Cores'] = 'cpu';
+          map['RAM'] = 'memoryFormatted';
+          map['Stack Version'] = 'version';
+          map['Version State'] = 'versionState';
+          map['Rack'] = 'rack';
+          map['Service'] = 'services';
+
           var hostComponentList = self.getHostComponentList();
           // Add host component facets only when there isn't any component filter
           // with value other than ALL yet
@@ -101,13 +113,15 @@ App.MainHostComboSearchBoxView = Em.View.extend({
         },
 
         valueMatches: function (facet, searchTerm, callback) {
-          if (controller.isComponentStateFacet(facet)) {
-            facet = 'componentState'
+          var map = App.router.get('mainHostController.labelValueMap');
+          var facetValue = map[facet] || facet;
+          if (controller.isComponentStateFacet(facetValue)) {
+            facetValue = 'componentState'
           }
-          switch (facet) {
+          switch (facetValue) {
             case 'hostName':
             case 'ip':
-              controller.getPropertySuggestions(facet, searchTerm).done(function() {
+              controller.getPropertySuggestions(facetValue, searchTerm).done(function() {
                 callback(controller.get('currentSuggestion').reject(function (item) {
                   return visualSearch.searchQuery.values(facet).indexOf(item) >= 0; // reject the ones already in search
                 }), {preserveMatches: true});
@@ -126,7 +140,8 @@ App.MainHostComboSearchBoxView = Em.View.extend({
               break;
             case 'versionState':
               callback(App.HostStackVersion.statusDefinition.map(function (status) {
-                return {label: App.HostStackVersion.formatStatus(status), value: status};
+                map[App.HostStackVersion.formatStatus(status)] = status;
+                return App.HostStackVersion.formatStatus(status);
               }).reject(function (item) {
                 return visualSearch.searchQuery.values(facet).indexOf(item.value) >= 0;
               }));
@@ -134,32 +149,42 @@ App.MainHostComboSearchBoxView = Em.View.extend({
             case 'healthClass':
               var category_mocks = require('data/host/categories');
               callback(category_mocks.slice(1).map(function (category) {
-                return {label: category.value, value: category.healthStatus}
+                map[category.value] = category.healthStatus;
+                return category.value;
               }).reject(function (item) {
                 return visualSearch.searchQuery.values(facet).indexOf(item.value) >= 0;
               }), {preserveOrder: true});
               break;
             case 'services':
               callback(App.Service.find().toArray().map(function (service) {
-                return {label: App.format.role(service.get('serviceName')), value: service.get('serviceName')};
+                map[App.format.role(service.get('serviceName'))] = service.get('serviceName');
+                return App.format.role(service.get('serviceName'));
               }).reject(function (item) {
                 return visualSearch.searchQuery.values(facet).indexOf(item.value) >= 0;
               }), {preserveOrder: true});
               break;
             case 'componentState':
-              var list = [{label: "All", value: "ALL"}];
+              var list = [ "All" ];
+              map['All'] = 'ALL';
               var currentComponentFacets = self.getComponentStateFacets(null, true);
               if (currentComponentFacets.length == 0) {
                 list = list.concat(App.HostComponentStatus.getStatusesList().map(function (status) {
-                  return {label: App.HostComponentStatus.getTextStatus(status), value: status};
+                  map[App.HostComponentStatus.getTextStatus(status)] = status;
+                  return App.HostComponentStatus.getTextStatus(status);
                 })).concat([
-                    {label: "Inservice", value: "INSERVICE"},
-                    {label: "Decommissioned", value: "DECOMMISSIONED"},
-                    {label: "Decommissioning", value: "DECOMMISSIONING"},
-                    {label: "RS Decommissioned", value: "RS_DECOMMISSIONED"},
-                    {label: "Maintenance Mode On", value: "ON"},
-                    {label: "Maintenance Mode Off", value: "OFF"}
+                    "Inservice",
+                    "Decommissioned",
+                    "Decommissioning",
+                    "RS Decommissioned",
+                    "Maintenance Mode On",
+                    "Maintenance Mode Off"
                 ]);
+                map['Inservice'] = 'INSERVICE';
+                map['Decommissioned'] = 'DECOMMISSIONED';
+                map['Decommissioning'] = 'DECOMMISSIONING';
+                map['RS Decommissioned'] = 'RS_DECOMMISSIONED';
+                map['Maintenance Mode On'] = 'ON';
+                map['Maintenance Mode Off'] = 'OFF';
               }
               callback(list, {preserveOrder: true});
               break;

http://git-wip-us.apache.org/repos/asf/ambari/blob/c0d07416/ambari-web/test/controllers/main/host_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/main/host_test.js b/ambari-web/test/controllers/main/host_test.js
index 06a4813..e2041f3 100644
--- a/ambari-web/test/controllers/main/host_test.js
+++ b/ambari-web/test/controllers/main/host_test.js
@@ -370,9 +370,9 @@ describe('MainHostController', function () {
         componentName: 'C1'
       }));
       expect(App.db.setFilterConditions.calledWith('ctrl1', [{
-        iColumn: 6,
-        value: ['C1'],
-        type: 'multiple'
+        iColumn: 15,
+        value: 'C1:ALL',
+        type: 'string'
       }])).to.be.true;
     });
   });


[19/51] [abbrv] ambari git commit: AMBARI-15329: Code Cleanup: Remove hdp hardcodings in functions, variables etc. (jluniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
index fc22d08..3655317 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
@@ -38,7 +38,7 @@ class TestNamenode(RMFTestCase):
                        classname = "NameNode",
                        command = "configure",
                        config_file = "default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -49,7 +49,7 @@ class TestNamenode(RMFTestCase):
                        classname = "NameNode",
                        command = "start",
                        config_file = "altfs_plus_hdfs.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0,"")],
     )
@@ -149,7 +149,7 @@ class TestNamenode(RMFTestCase):
                        classname = "NameNode",
                        command = "install",
                        config_file = "default_no_install.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        try_install=True
     )
@@ -162,7 +162,7 @@ class TestNamenode(RMFTestCase):
                        classname = "NameNode",
                        command = "start",
                        config_file = "default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0,"")],
     )
@@ -262,7 +262,7 @@ class TestNamenode(RMFTestCase):
                        classname = "NameNode",
                        command = "stop",
                        config_file = "default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop namenode'",
@@ -276,7 +276,7 @@ class TestNamenode(RMFTestCase):
                        classname = "NameNode",
                        command = "configure",
                        config_file = "secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
@@ -288,7 +288,7 @@ class TestNamenode(RMFTestCase):
                        classname = "NameNode",
                        command = "start",
                        config_file = "secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0,"")],
     )
@@ -382,7 +382,7 @@ class TestNamenode(RMFTestCase):
                        classname = "NameNode",
                        command = "stop",
                        config_file = "secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop namenode'",
@@ -396,7 +396,7 @@ class TestNamenode(RMFTestCase):
                        classname = "NameNode",
                        command = "start",
                        config_file = "ha_default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -492,7 +492,7 @@ class TestNamenode(RMFTestCase):
                        classname = "NameNode",
                        command = "start",
                        config_file = "ha_default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = call_mocks
     )
@@ -593,7 +593,7 @@ class TestNamenode(RMFTestCase):
                        classname = "NameNode",
                        command = "start",
                        config_file = "ha_secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
@@ -689,7 +689,7 @@ class TestNamenode(RMFTestCase):
                        classname = "NameNode",
                        command = "start",
                        config_file="ha_bootstrap_active_node.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -795,7 +795,7 @@ class TestNamenode(RMFTestCase):
                        classname = "NameNode",
                        command = "start",
                        config_file="ha_bootstrap_standby_node.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = call_mocks
     )
@@ -905,7 +905,7 @@ class TestNamenode(RMFTestCase):
                        classname = "NameNode",
                        command = "start",
                        config_file="ha_bootstrap_standby_node_initial_start.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = call_mocks
     )
@@ -1009,7 +1009,7 @@ class TestNamenode(RMFTestCase):
                        classname = "NameNode",
                        command = "decommission",
                        config_file = "default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
@@ -1029,7 +1029,7 @@ class TestNamenode(RMFTestCase):
                        classname = "NameNode",
                        command = "decommission",
                        config_file = "default_update_exclude_file_only.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
@@ -1045,7 +1045,7 @@ class TestNamenode(RMFTestCase):
                        classname = "NameNode",
                        command = "decommission",
                        config_file = "ha_default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
@@ -1066,7 +1066,7 @@ class TestNamenode(RMFTestCase):
                        classname = "NameNode",
                        command = "decommission",
                        config_file = "secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('File', '/etc/hadoop/conf/dfs.exclude',
@@ -1194,7 +1194,7 @@ class TestNamenode(RMFTestCase):
                          classname = "NameNode",
                          command = "rebalancehdfs",
                          config_file = "rebalancehdfs_default.json",
-                         hdp_stack_version = self.STACK_VERSION,
+                         stack_version = self.STACK_VERSION,
                          target = RMFTestCase.TARGET_COMMON_SERVICES
       )
       self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c 'export  PATH=/bin:/usr/bin ; hdfs --config /etc/hadoop/conf balancer -threshold -1'",
@@ -1212,7 +1212,7 @@ class TestNamenode(RMFTestCase):
                        classname = "NameNode",
                        command = "rebalancehdfs",
                        config_file = "rebalancehdfs_secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks=[(1, "no kinit")]
     )
@@ -1245,7 +1245,7 @@ class TestNamenode(RMFTestCase):
     try:
       self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/namenode.py",
         classname = "NameNode", command = "start", config_file = "ranger-namenode-start.json",
-        hdp_stack_version = self.STACK_VERSION, target = RMFTestCase.TARGET_COMMON_SERVICES )
+        stack_version = self.STACK_VERSION, target = RMFTestCase.TARGET_COMMON_SERVICES )
 
       self.fail("Expected a failure since the ranger install.properties was missing")
     except Fail, failure:
@@ -1285,7 +1285,7 @@ class TestNamenode(RMFTestCase):
                        classname = "NameNode",
                        command = "security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -1305,7 +1305,7 @@ class TestNamenode(RMFTestCase):
                        classname = "NameNode",
                        command = "security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -1321,7 +1321,7 @@ class TestNamenode(RMFTestCase):
                          classname = "NameNode",
                          command = "security_status",
                          config_file="secured.json",
-                         hdp_stack_version = self.STACK_VERSION,
+                         stack_version = self.STACK_VERSION,
                          target = RMFTestCase.TARGET_COMMON_SERVICES
       )
     except:
@@ -1342,7 +1342,7 @@ class TestNamenode(RMFTestCase):
                        classname = "NameNode",
                        command = "security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -1362,7 +1362,7 @@ class TestNamenode(RMFTestCase):
                        classname = "NameNode",
                        command = "security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
@@ -1382,7 +1382,7 @@ class TestNamenode(RMFTestCase):
                        classname = "NameNode",
                        command = "restart",
                        config_file = "nn_ru_lzo.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     unknown_namenodes = active_namenodes
@@ -1392,7 +1392,7 @@ class TestNamenode(RMFTestCase):
                      classname = "NameNode",
                      command = "restart",
                      config_file = "nn_ru_lzo.json",
-                     hdp_stack_version = self.STACK_VERSION,
+                     stack_version = self.STACK_VERSION,
                      target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assertFalse(0 == len(Script.structuredOut))
@@ -1414,7 +1414,7 @@ class TestNamenode(RMFTestCase):
                        classname = "NameNode",
                        command = "restart",
                        config_file = "nn_eu_standby.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None)],
                        mocks_dict=mocks_dict)
@@ -1447,7 +1447,7 @@ class TestNamenode(RMFTestCase):
                        command = "start",
                        command_args=["nonrolling"],
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None)],
                        mocks_dict=mocks_dict)
@@ -1469,7 +1469,7 @@ class TestNamenode(RMFTestCase):
                        classname = "NameNode",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
                               ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-hdfs-namenode', version), sudo=True)
@@ -1488,7 +1488,7 @@ class TestNamenode(RMFTestCase):
                        classname = "NameNode",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None), (0, None)],
                        mocks_dict = mocks_dict)
@@ -1503,7 +1503,7 @@ class TestNamenode(RMFTestCase):
                        classname = "NameNode",
                        command = "post_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute', 'hdfs dfsadmin -fs hdfs://c6401.ambari.apache.org:8020 -report -live',
                               user='hdfs',
@@ -1520,7 +1520,7 @@ class TestNamenode(RMFTestCase):
                        classname = "NameNode",
                        command = "post_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute', 'hdfs dfsadmin -fs hdfs://ns1 -report -live',
                               user='hdfs',
@@ -1539,7 +1539,7 @@ class TestNamenode(RMFTestCase):
       classname = "NameNode",
       command = "prepare_rolling_upgrade",
       config_dict = json_content,
-      hdp_stack_version = self.STACK_VERSION,
+      stack_version = self.STACK_VERSION,
       target = RMFTestCase.TARGET_COMMON_SERVICES,
       call_mocks = [(0, "Safe mode is OFF in c6401.ambari.apache.org")])
     
@@ -1564,7 +1564,7 @@ class TestNamenode(RMFTestCase):
                        classname = "NameNode",
                        command = "prepare_rolling_upgrade",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, "Safe mode is OFF in c6401.ambari.apache.org")])
 
@@ -1595,7 +1595,7 @@ class TestNamenode(RMFTestCase):
       classname = "NameNode",
       command = "prepare_rolling_upgrade",
       config_dict = json_content,
-      hdp_stack_version = self.STACK_VERSION,
+      stack_version = self.STACK_VERSION,
       target = RMFTestCase.TARGET_COMMON_SERVICES)
     
     self.assertResourceCalled('Execute', 
@@ -1613,7 +1613,7 @@ class TestNamenode(RMFTestCase):
                        classname = "NameNode",
                        command = "finalize_rolling_upgrade",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assertResourceCalled('Execute', 'hdfs dfsadmin -fs hdfs://c6401.ambari.apache.org:8020 -rollingUpgrade query',
@@ -1638,7 +1638,7 @@ class TestNamenode(RMFTestCase):
                        classname = "NameNode",
                        command = "finalize_rolling_upgrade",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assertResourceCalled('Execute', 'hdfs dfsadmin -fs hdfs://ns1 -rollingUpgrade query',
@@ -1668,7 +1668,7 @@ class TestNamenode(RMFTestCase):
                        classname = "NameNode",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None), (0, None), (0, None), (0, None), (0, None), (0, None), (0, None)],
                        mocks_dict = mocks_dict)
@@ -1694,7 +1694,7 @@ class TestNamenode(RMFTestCase):
                        classname = "NameNode",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None), (0, None), (0, None), (0, None), (0, None), (0, None), (0, None)],
                        mocks_dict = mocks_dict)
@@ -1722,7 +1722,7 @@ class TestNamenode(RMFTestCase):
                        classname = "NameNode",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = itertools.cycle([(0, None)]),
                        mocks_dict = mocks_dict)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_nfsgateway.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_nfsgateway.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_nfsgateway.py
index 36b67f0..ed03ae1 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_nfsgateway.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_nfsgateway.py
@@ -37,7 +37,7 @@ class TestNFSGateway(RMFTestCase):
                        classname = "NFSGateway",
                        command = "configure",
                        config_file = "default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -50,7 +50,7 @@ class TestNFSGateway(RMFTestCase):
                        classname = "NFSGateway",
                        command = "start",
                        config_file = "default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -86,7 +86,7 @@ class TestNFSGateway(RMFTestCase):
                        classname = "NFSGateway",
                        command = "stop",
                        config_file = "default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop nfs3',
@@ -104,7 +104,7 @@ class TestNFSGateway(RMFTestCase):
                        classname = "NFSGateway",
                        command = "configure",
                        config_file = "secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
@@ -117,7 +117,7 @@ class TestNFSGateway(RMFTestCase):
                        classname = "NFSGateway",
                        command = "start",
                        config_file = "secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
@@ -153,7 +153,7 @@ class TestNFSGateway(RMFTestCase):
                        classname = "NFSGateway",
                        command = "stop",
                        config_file = "secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop nfs3',
@@ -296,7 +296,7 @@ class TestNFSGateway(RMFTestCase):
                        classname = "NFSGateway",
                        command = "security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -316,7 +316,7 @@ class TestNFSGateway(RMFTestCase):
                        classname = "NFSGateway",
                        command = "security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -332,7 +332,7 @@ class TestNFSGateway(RMFTestCase):
                          classname = "NFSGateway",
                          command = "security_status",
                          config_file="secured.json",
-                         hdp_stack_version = self.STACK_VERSION,
+                         stack_version = self.STACK_VERSION,
                          target = RMFTestCase.TARGET_COMMON_SERVICES
       )
     except:
@@ -353,7 +353,7 @@ class TestNFSGateway(RMFTestCase):
                        classname = "NFSGateway",
                        command = "security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -373,7 +373,7 @@ class TestNFSGateway(RMFTestCase):
                        classname = "NFSGateway",
                        command = "security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
@@ -393,7 +393,7 @@ class TestNFSGateway(RMFTestCase):
                        classname = "NFSGateway",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None), (0, None), (0, None)])
     self.assertResourceCalled('Link', ('/etc/hadoop/conf'), to='/usr/hdp/current/hadoop-client/conf')

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_service_check.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_service_check.py
index 851aee2..4c66c4f 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_service_check.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_service_check.py
@@ -31,7 +31,7 @@ class TestServiceCheck(RMFTestCase):
                        classname = "HdfsServiceCheck",
                        command = "service_check",
                        config_file = "default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -43,7 +43,7 @@ class TestServiceCheck(RMFTestCase):
                        classname = "HdfsServiceCheck",
                        command = "service_check",
                        config_file = "default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_snamenode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_snamenode.py
index fea5ecf..9e66e51 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_snamenode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_snamenode.py
@@ -30,7 +30,7 @@ class TestSNamenode(RMFTestCase):
                        classname = "SNameNode",
                        command = "configure",
                        config_file = "default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -46,7 +46,7 @@ class TestSNamenode(RMFTestCase):
                        classname = "SNameNode",
                        command = "start",
                        config_file = "default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -83,7 +83,7 @@ class TestSNamenode(RMFTestCase):
                        classname = "SNameNode",
                        command = "stop",
                        config_file = "default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop secondarynamenode'",
@@ -98,7 +98,7 @@ class TestSNamenode(RMFTestCase):
                        classname = "SNameNode",
                        command = "configure",
                        config_file = "secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
@@ -114,7 +114,7 @@ class TestSNamenode(RMFTestCase):
                        classname = "SNameNode",
                        command = "start",
                        config_file = "secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
@@ -151,7 +151,7 @@ class TestSNamenode(RMFTestCase):
                        classname = "SNameNode",
                        command = "stop",
                        config_file = "secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop secondarynamenode'",
@@ -305,7 +305,7 @@ class TestSNamenode(RMFTestCase):
                        classname = "SNameNode",
                        command = "security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -325,7 +325,7 @@ class TestSNamenode(RMFTestCase):
                        classname = "SNameNode",
                        command = "security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -341,7 +341,7 @@ class TestSNamenode(RMFTestCase):
                          classname = "SNameNode",
                          command = "security_status",
                          config_file="secured.json",
-                         hdp_stack_version = self.STACK_VERSION,
+                         stack_version = self.STACK_VERSION,
                          target = RMFTestCase.TARGET_COMMON_SERVICES
       )
     except:
@@ -362,7 +362,7 @@ class TestSNamenode(RMFTestCase):
                        classname = "SNameNode",
                        command = "security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -382,7 +382,7 @@ class TestSNamenode(RMFTestCase):
                        classname = "SNameNode",
                        command = "security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_zkfc.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_zkfc.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_zkfc.py
index 358b56f..72728a3 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_zkfc.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_zkfc.py
@@ -31,7 +31,7 @@ class TestZkfc(RMFTestCase):
                        classname = "ZkfcSlave",
                        command = "start",
                        config_file = "ha_default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Directory', '/usr/lib/hadoop/lib/native/Linux-i386-32',
@@ -111,7 +111,7 @@ class TestZkfc(RMFTestCase):
                        classname = "ZkfcSlave",
                        command = "stop",
                        config_file = "ha_default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop zkfc'",
@@ -126,7 +126,7 @@ class TestZkfc(RMFTestCase):
                        classname = "ZkfcSlave",
                        command = "start",
                        config_file = "ha_secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Directory', '/usr/lib/hadoop/lib/native/Linux-i386-32',
@@ -205,7 +205,7 @@ class TestZkfc(RMFTestCase):
                        classname = "ZkfcSlave",
                        command = "stop",
                        config_file = "ha_secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop zkfc'",
@@ -220,7 +220,7 @@ class TestZkfc(RMFTestCase):
                        classname = "ZkfcSlave",
                        command = "start",
                        config_file="ha_bootstrap_active_node.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Directory', '/usr/lib/hadoop/lib/native/Linux-i386-32',
@@ -300,7 +300,7 @@ class TestZkfc(RMFTestCase):
                        classname = "ZkfcSlave",
                        command = "start",
                        config_file="ha_bootstrap_standby_node.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Directory', '/usr/lib/hadoop/lib/native/Linux-i386-32',
@@ -404,7 +404,7 @@ class TestZkfc(RMFTestCase):
                        classname = "ZkfcSlave",
                        command = "security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -426,7 +426,7 @@ class TestZkfc(RMFTestCase):
                            classname = "ZkfcSlave",
                            command = "security_status",
                            config_file="secured.json",
-                           hdp_stack_version = self.STACK_VERSION,
+                           stack_version = self.STACK_VERSION,
                            target = RMFTestCase.TARGET_COMMON_SERVICES
         )
     except:
@@ -439,7 +439,7 @@ class TestZkfc(RMFTestCase):
                        classname = "ZkfcSlave",
                        command = "security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -460,7 +460,7 @@ class TestZkfc(RMFTestCase):
                        classname = "ZkfcSlave",
                        command = "security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
@@ -470,7 +470,7 @@ class TestZkfc(RMFTestCase):
                        classname = "ZkfcSlave",
                        command = "security_status",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hcat_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hcat_client.py b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hcat_client.py
index 4dfa688..54d3e84 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hcat_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hcat_client.py
@@ -30,7 +30,7 @@ class TestHcatClient(RMFTestCase):
                        classname = "HCatClient",
                        command = "configure",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Directory', '/etc/hive/conf',
@@ -69,7 +69,7 @@ class TestHcatClient(RMFTestCase):
                          classname = "HCatClient",
                          command = "configure",
                          config_file="secured.json",
-                         hdp_stack_version = self.STACK_VERSION,
+                         stack_version = self.STACK_VERSION,
                          target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Directory', '/etc/hive/conf',
@@ -117,7 +117,7 @@ class TestHcatClient(RMFTestCase):
       classname = "HCatClient",
       command = "pre_upgrade_restart",
       config_dict = json_content,
-      hdp_stack_version = self.STACK_VERSION,
+      stack_version = self.STACK_VERSION,
       target = RMFTestCase.TARGET_COMMON_SERVICES,
       call_mocks = [(0, None, ''), (0, None, ''), (0, None, ''), (0, None, '')],
       mocks_dict = mocks_dict)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_client.py b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_client.py
index f008745..77d6bdb 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_client.py
@@ -30,7 +30,7 @@ class TestHiveClient(RMFTestCase):
                        classname = "HiveClient",
                        command = "configure",
                        config_file="default_client.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Directory', '/etc/hive',
@@ -106,7 +106,7 @@ class TestHiveClient(RMFTestCase):
                        classname = "HiveClient",
                        command = "configure",
                        config_file="secured_client.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Directory', '/etc/hive',
@@ -185,7 +185,7 @@ class TestHiveClient(RMFTestCase):
                        classname = "HiveClient",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
                               ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-client', version), sudo=True,)
@@ -206,7 +206,7 @@ class TestHiveClient(RMFTestCase):
                        classname = "HiveClient",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None, ''), (0, None, ''), (0, None, '')],
                        mocks_dict = mocks_dict)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_metastore.py b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_metastore.py
index 1b68c8e..9b1032c 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_metastore.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_metastore.py
@@ -32,7 +32,7 @@ class TestHiveMetastore(RMFTestCase):
                        classname = "HiveMetastore",
                        command = "configure",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -42,7 +42,7 @@ class TestHiveMetastore(RMFTestCase):
                        classname = "HiveMetastore",
                        command = "start",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -67,7 +67,7 @@ class TestHiveMetastore(RMFTestCase):
                        classname = "HiveMetastore",
                        command = "stop",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -91,7 +91,7 @@ class TestHiveMetastore(RMFTestCase):
                        classname = "HiveMetastore",
                        command = "configure",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -102,7 +102,7 @@ class TestHiveMetastore(RMFTestCase):
                        classname = "HiveMetastore",
                        command = "start",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
@@ -126,7 +126,7 @@ class TestHiveMetastore(RMFTestCase):
                        classname = "HiveMetastore",
                        command = "stop",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -363,11 +363,11 @@ class TestHiveMetastore(RMFTestCase):
                               )
 
   @patch("resource_management.core.shell.call")
-  @patch("resource_management.libraries.functions.get_hdp_version")
-  def test_start_ru(self, call_mock, get_hdp_version_mock):
+  @patch("resource_management.libraries.functions.get_stack_version")
+  def test_start_ru(self, call_mock, get_stack_version_mock):
     from ambari_commons.constants import UPGRADE_TYPE_ROLLING
 
-    get_hdp_version_mock.return_value = '2.3.0.0-1234'
+    get_stack_version_mock.return_value = '2.3.0.0-1234'
 
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
     with open(config_file, "r") as f:
@@ -385,7 +385,7 @@ class TestHiveMetastore(RMFTestCase):
                        command = "start",
                        command_args = [UPGRADE_TYPE_ROLLING],
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assertResourceCalled('Directory', '/etc/hive',

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
index 9929d00..5335605 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
@@ -31,7 +31,7 @@ from resource_management.libraries.functions import copy_tarball
 from resource_management.libraries import functions
 from resource_management.core.logger import Logger
 
-@patch.object(functions, "get_hdp_version", new = MagicMock(return_value="2.0.0.0-1234"))
+@patch.object(functions, "get_stack_version", new = MagicMock(return_value="2.0.0.0-1234"))
 @patch("resource_management.libraries.functions.check_thrift_port_sasl", new=MagicMock())
 @patch("resource_management.libraries.functions.get_user_call_output.get_user_call_output", new=MagicMock(return_value=(0,'123','')))
 class TestHiveServer(RMFTestCase):
@@ -43,14 +43,14 @@ class TestHiveServer(RMFTestCase):
     Logger.logger = MagicMock()
 
   @patch("resource_management.libraries.functions.copy_tarball.copy_to_hdfs")
-  @patch.object(Script, "is_hdp_stack_greater_or_equal", new = MagicMock(return_value=False))
+  @patch.object(Script, "is_stack_greater_or_equal", new = MagicMock(return_value=False))
   def test_configure_default(self, copy_to_hdfs_mock):
     copy_to_hdfs_mock.return_value = True
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_server.py",
                        classname = "HiveServer",
                        command = "configure",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -58,7 +58,7 @@ class TestHiveServer(RMFTestCase):
 
   @patch("resource_management.libraries.functions.copy_tarball.copy_to_hdfs")
   @patch("socket.socket")
-  @patch.object(Script, "is_hdp_stack_greater_or_equal", new = MagicMock(return_value=False))
+  @patch.object(Script, "is_stack_greater_or_equal", new = MagicMock(return_value=False))
   def test_start_default(self, socket_mock, copy_to_hfds_mock):
     copy_to_hfds_mock.return_value = None
     s = socket_mock.return_value
@@ -66,7 +66,7 @@ class TestHiveServer(RMFTestCase):
                        classname="HiveServer",
                        command="start",
                        config_file="default.json",
-                       hdp_stack_version=self.STACK_VERSION,
+                       stack_version=self.STACK_VERSION,
                        target=RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -95,7 +95,7 @@ class TestHiveServer(RMFTestCase):
 
   @patch("resource_management.libraries.functions.copy_tarball.copy_to_hdfs")
   @patch("socket.socket")
-  @patch.object(Script, "is_hdp_stack_greater_or_equal", new = MagicMock(return_value=False))
+  @patch.object(Script, "is_stack_greater_or_equal", new = MagicMock(return_value=False))
   def test_start_default_non_hdfs(self, socket_mock, copy_to_hfds_mock):
     copy_to_hfds_mock.return_value = None
     s = socket_mock.return_value
@@ -103,7 +103,7 @@ class TestHiveServer(RMFTestCase):
                        classname="HiveServer",
                        command="start",
                        config_file="default_hive_non_hdfs.json",
-                       hdp_stack_version=self.STACK_VERSION,
+                       stack_version=self.STACK_VERSION,
                        target=RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -125,13 +125,13 @@ class TestHiveServer(RMFTestCase):
     )
     self.assertNoMoreResources()
 
-  @patch.object(Script, "is_hdp_stack_greater_or_equal", new = MagicMock(return_value=False))
+  @patch.object(Script, "is_stack_greater_or_equal", new = MagicMock(return_value=False))
   def test_start_default_no_copy(self):
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_server.py",
                        classname = "HiveServer",
                        command = "start",
                        config_file="default_no_install.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -156,14 +156,14 @@ class TestHiveServer(RMFTestCase):
     self.assertNoMoreResources()
 
   @patch("resource_management.libraries.functions.copy_tarball.copy_to_hdfs")
-  @patch.object(Script, "is_hdp_stack_greater_or_equal", new = MagicMock(return_value=False))
+  @patch.object(Script, "is_stack_greater_or_equal", new = MagicMock(return_value=False))
   def test_start_default_alt_tmp(self, copy_to_hfds_mock):
     copy_to_hfds_mock.return_value = None
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_server.py",
                        classname = "HiveServer",
                        command = "start",
                        config_file="default_hive_nn_ha.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -188,14 +188,14 @@ class TestHiveServer(RMFTestCase):
     self.assertNoMoreResources()
 
   @patch("resource_management.libraries.functions.copy_tarball.copy_to_hdfs")
-  @patch.object(Script, "is_hdp_stack_greater_or_equal", new = MagicMock(return_value=False))
+  @patch.object(Script, "is_stack_greater_or_equal", new = MagicMock(return_value=False))
   def test_start_default_alt_nn_ha_tmp(self, copy_to_hfds_mock):
     copy_to_hfds_mock.return_value = None
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_server.py",
                        classname = "HiveServer",
                        command = "start",
                        config_file="default_hive_nn_ha_2.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -219,13 +219,13 @@ class TestHiveServer(RMFTestCase):
     )
     self.assertNoMoreResources()
 
-  @patch.object(Script, "is_hdp_stack_greater_or_equal", new = MagicMock(return_value=False))
+  @patch.object(Script, "is_stack_greater_or_equal", new = MagicMock(return_value=False))
   def test_stop_default(self):
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_server.py",
                        classname = "HiveServer",
                        command = "stop",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -245,13 +245,13 @@ class TestHiveServer(RMFTestCase):
     
     self.assertNoMoreResources()
 
-  @patch.object(Script, "is_hdp_stack_greater_or_equal", new = MagicMock(return_value=False))
+  @patch.object(Script, "is_stack_greater_or_equal", new = MagicMock(return_value=False))
   def test_configure_secured(self):
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_server.py",
                        classname = "HiveServer",
                        command = "configure",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
@@ -260,7 +260,7 @@ class TestHiveServer(RMFTestCase):
   @patch("resource_management.libraries.functions.copy_tarball.copy_to_hdfs")
   @patch("hive_service.check_fs_root")
   @patch("socket.socket")
-  @patch.object(Script, "is_hdp_stack_greater_or_equal", new = MagicMock(return_value=False))
+  @patch.object(Script, "is_stack_greater_or_equal", new = MagicMock(return_value=False))
   def test_start_secured(self, socket_mock, check_fs_root_mock, copy_to_hfds_mock):
     s = socket_mock.return_value
     copy_to_hfds_mock.return_value = None
@@ -269,7 +269,7 @@ class TestHiveServer(RMFTestCase):
                        classname = "HiveServer",
                        command = "start",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -298,13 +298,13 @@ class TestHiveServer(RMFTestCase):
 
 
   @patch("socket.socket")
-  @patch.object(Script, "is_hdp_stack_greater_or_equal", new = MagicMock(return_value=False))
+  @patch.object(Script, "is_stack_greater_or_equal", new = MagicMock(return_value=False))
   def test_stop_secured(self, socket_mock):
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_server.py",
                        classname = "HiveServer",
                        command = "stop",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute',
@@ -713,7 +713,7 @@ class TestHiveServer(RMFTestCase):
                            classname = "HiveServer",
                            command = "start",
                            config_file="default.json",
-                           hdp_stack_version = self.STACK_VERSION,
+                           stack_version = self.STACK_VERSION,
                            target = RMFTestCase.TARGET_COMMON_SERVICES
       )
       
@@ -722,7 +722,7 @@ class TestHiveServer(RMFTestCase):
       self.assert_configure_default()
 
   @patch("resource_management.libraries.functions.copy_tarball.copy_to_hdfs")
-  @patch.object(Script, "is_hdp_stack_greater_or_equal", new = MagicMock(return_value=True))
+  @patch.object(Script, "is_stack_greater_or_equal", new = MagicMock(return_value=True))
   @patch("os.path.exists", new = MagicMock(return_value=True))
   @patch("platform.linux_distribution", new = MagicMock(return_value="Linux"))
   def test_stop_during_upgrade(self, copy_to_hdfs_mock):
@@ -737,7 +737,7 @@ From source with checksum 150f554beae04f76f814f59549dead8b"""
 
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_server.py",
      classname = "HiveServer", command = "restart", config_file = "hive-upgrade.json",
-     hdp_stack_version = self.UPGRADE_STACK_VERSION,
+     stack_version = self.UPGRADE_STACK_VERSION,
      target = RMFTestCase.TARGET_COMMON_SERVICES,
      call_mocks = call_side_effects
     )
@@ -749,7 +749,7 @@ From source with checksum 150f554beae04f76f814f59549dead8b"""
 
 
   @patch("resource_management.libraries.functions.copy_tarball.copy_to_hdfs")
-  @patch.object(Script, "is_hdp_stack_greater_or_equal", new = MagicMock(return_value=True))
+  @patch.object(Script, "is_stack_greater_or_equal", new = MagicMock(return_value=True))
   def test_stop_during_upgrade_with_default_conf_server(self, copy_to_hdfs_mock):
     hiveServerVersionOutput = """WARNING: Use "yarn jar" to launch YARN applications.
 Hive 1.2.1.2.3.0.0-2434
@@ -761,7 +761,7 @@ From source with checksum 150f554beae04f76f814f59549dead8b"""
 
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_server.py",
      classname = "HiveServer", command = "restart", config_file = "hive-upgrade.json",
-     hdp_stack_version = self.UPGRADE_STACK_VERSION,
+     stack_version = self.UPGRADE_STACK_VERSION,
      target = RMFTestCase.TARGET_COMMON_SERVICES,
      call_mocks = call_side_effects
     )
@@ -775,7 +775,7 @@ From source with checksum 150f554beae04f76f814f59549dead8b"""
     try:
       self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hive_server.py",
        classname = "HiveServer", command = "restart", config_file = "hive-upgrade.json",
-       hdp_stack_version = self.UPGRADE_STACK_VERSION,
+       stack_version = self.UPGRADE_STACK_VERSION,
        target = RMFTestCase.TARGET_COMMON_SERVICES,
        call_mocks = [(0,"BAD VERSION")])
       self.fail("Invalid hive version should have caused an exception")
@@ -823,7 +823,7 @@ From source with checksum 150f554beae04f76f814f59549dead8b"""
                        classname = "HiveServer",
                        command = "security_status",
                        config_file="../../2.1/configs/secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -847,7 +847,7 @@ From source with checksum 150f554beae04f76f814f59549dead8b"""
                          classname = "HiveServer",
                          command = "security_status",
                          config_file="../../2.1/configs/secured.json",
-                         hdp_stack_version = self.STACK_VERSION,
+                         stack_version = self.STACK_VERSION,
                          target = RMFTestCase.TARGET_COMMON_SERVICES
       )
     except:
@@ -864,7 +864,7 @@ From source with checksum 150f554beae04f76f814f59549dead8b"""
                        classname = "HiveServer",
                        command = "security_status",
                        config_file="../../2.1/configs/secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
@@ -882,7 +882,7 @@ From source with checksum 150f554beae04f76f814f59549dead8b"""
                        classname = "HiveServer",
                        command = "security_status",
                        config_file="../../2.1/configs/secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
@@ -892,12 +892,12 @@ From source with checksum 150f554beae04f76f814f59549dead8b"""
                        classname = "HiveServer",
                        command = "security_status",
                        config_file="../../2.1/configs/default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
 
-  @patch.object(Script, "is_hdp_stack_greater_or_equal", new = MagicMock(return_value=True))
+  @patch.object(Script, "is_stack_greater_or_equal", new = MagicMock(return_value=True))
   @patch("resource_management.libraries.functions.copy_tarball.copy_to_hdfs")
   def test_pre_upgrade_restart(self, copy_to_hdfs_mock):
     copy_to_hdfs_mock.return_value = True
@@ -911,7 +911,7 @@ From source with checksum 150f554beae04f76f814f59549dead8b"""
                        classname = "HiveServer",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
                               ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hive-server2', version), sudo=True,)
@@ -933,7 +933,7 @@ From source with checksum 150f554beae04f76f814f59549dead8b"""
 
   @patch("os.path.exists")
   @patch("resource_management.core.shell.call")
-  @patch.object(Script, "is_hdp_stack_greater_or_equal", new = MagicMock(return_value=True))
+  @patch.object(Script, "is_stack_greater_or_equal", new = MagicMock(return_value=True))
   @patch("resource_management.libraries.functions.copy_tarball.copy_to_hdfs")
   def test_pre_upgrade_restart_23(self, copy_to_hdfs_mock, call_mock, os_path__exists_mock):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
@@ -949,7 +949,7 @@ From source with checksum 150f554beae04f76f814f59549dead8b"""
                        classname = "HiveServer",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None, '')],
                        mocks_dict = mocks_dict)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_service_check.py b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_service_check.py
index 589b437..5646b75 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_service_check.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_service_check.py
@@ -38,7 +38,7 @@ class TestServiceCheck(RMFTestCase):
                         classname="HiveServiceCheck",
                         command="service_check",
                         config_file="default.json",
-                        hdp_stack_version = self.STACK_VERSION,
+                        stack_version = self.STACK_VERSION,
                         target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', "! beeline -u 'jdbc:hive2://c6402.ambari.apache.org:10000/;transportMode=binary;auth=noSasl' -e '' 2>&1| awk '{print}'|grep -i -e 'Connection refused' -e 'Invalid URL'",
@@ -146,7 +146,7 @@ class TestServiceCheck(RMFTestCase):
                         classname="HiveServiceCheck",
                         command="service_check",
                         config_file="secured.json",
-                        hdp_stack_version = self.STACK_VERSION,
+                        stack_version = self.STACK_VERSION,
                         target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa@EXAMPLE.COM; ',
@@ -264,7 +264,7 @@ class TestServiceCheck(RMFTestCase):
       classname="HiveServiceCheck",
       command="service_check",
       config_dict = json_content,
-      hdp_stack_version = self.STACK_VERSION,
+      stack_version = self.STACK_VERSION,
       target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assertResourceCalled('Execute', "! beeline -u 'jdbc:hive2://c6402.ambari.apache.org:10010/;transportMode=binary' -e '' 2>&1| awk '{print}'|grep -i -e 'Connection refused' -e 'Invalid URL'",

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_mysql_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_mysql_server.py b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_mysql_server.py
index 1155e9f..827f6f7 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_mysql_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_mysql_server.py
@@ -32,7 +32,7 @@ class TestMySqlServer(RMFTestCase):
                        classname = "MysqlServer",
                        command = "configure",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -43,7 +43,7 @@ class TestMySqlServer(RMFTestCase):
                        classname = "MysqlServer",
                        command = "start",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -59,7 +59,7 @@ class TestMySqlServer(RMFTestCase):
                        classname = "MysqlServer",
                        command = "stop",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', ('service','mysql','stop'),
@@ -75,7 +75,7 @@ class TestMySqlServer(RMFTestCase):
                        classname = "MysqlServer",
                        command = "configure",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
@@ -86,7 +86,7 @@ class TestMySqlServer(RMFTestCase):
                        classname = "MysqlServer",
                        command = "start",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -102,7 +102,7 @@ class TestMySqlServer(RMFTestCase):
                        classname = "MysqlServer",
                        command = "stop",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     
@@ -118,7 +118,7 @@ class TestMySqlServer(RMFTestCase):
                        classname = "MysqlServer",
                        command = "clean",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_clean_default()
@@ -129,7 +129,7 @@ class TestMySqlServer(RMFTestCase):
                        classname = "MysqlServer",
                        command = "clean",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_clean_secured()

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_webhcat_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_webhcat_server.py b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_webhcat_server.py
index 82cf177..6fe49dc 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_webhcat_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_webhcat_server.py
@@ -32,7 +32,7 @@ class TestWebHCatServer(RMFTestCase):
                        classname = "WebHCatServer",
                        command = "configure",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -43,7 +43,7 @@ class TestWebHCatServer(RMFTestCase):
                        classname = "WebHCatServer",
                        command = "start",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -60,7 +60,7 @@ class TestWebHCatServer(RMFTestCase):
                        classname = "WebHCatServer",
                        command = "stop",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -88,7 +88,7 @@ class TestWebHCatServer(RMFTestCase):
                          classname = "WebHCatServer",
                          command = "configure",
                          config_file="secured.json",
-                         hdp_stack_version = self.STACK_VERSION,
+                         stack_version = self.STACK_VERSION,
                          target = RMFTestCase.TARGET_COMMON_SERVICES
       )
 
@@ -100,7 +100,7 @@ class TestWebHCatServer(RMFTestCase):
                        classname = "WebHCatServer",
                        command = "start",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -117,7 +117,7 @@ class TestWebHCatServer(RMFTestCase):
                        classname = "WebHCatServer",
                        command = "stop",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -266,7 +266,7 @@ class TestWebHCatServer(RMFTestCase):
                        classname = "WebHCatServer",
                        command = "security_status",
                        config_file="../../2.1/configs/secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -291,7 +291,7 @@ class TestWebHCatServer(RMFTestCase):
                          classname = "WebHCatServer",
                          command = "security_status",
                          config_file="../../2.1/configs/secured.json",
-                         hdp_stack_version = self.STACK_VERSION,
+                         stack_version = self.STACK_VERSION,
                          target = RMFTestCase.TARGET_COMMON_SERVICES
       )
     except:
@@ -308,7 +308,7 @@ class TestWebHCatServer(RMFTestCase):
                        classname = "WebHCatServer",
                        command = "security_status",
                        config_file="../../2.1/configs/secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
@@ -327,7 +327,7 @@ class TestWebHCatServer(RMFTestCase):
                        classname = "WebHCatServer",
                        command = "security_status",
                        config_file="../../2.1/configs/secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
@@ -337,7 +337,7 @@ class TestWebHCatServer(RMFTestCase):
                        classname = "WebHCatServer",
                        command = "security_status",
                        config_file="../../2.1/configs/default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
@@ -353,7 +353,7 @@ class TestWebHCatServer(RMFTestCase):
                        classname = "WebHCatServer",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
                               ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hive-webhcat', version), sudo=True,)
@@ -375,7 +375,7 @@ class TestWebHCatServer(RMFTestCase):
                        classname = "WebHCatServer",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None, '')],
                        mocks_dict = mocks_dict)
@@ -419,7 +419,7 @@ class TestWebHCatServer(RMFTestCase):
       classname = "WebHCatServer",
       command = "configure",
       config_dict = json_content,
-      hdp_stack_version = self.STACK_VERSION,
+      stack_version = self.STACK_VERSION,
       target = RMFTestCase.TARGET_COMMON_SERVICES,
       call_mocks = [(0, None), (0, None)],
       mocks_dict = mocks_dict)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_client.py b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_client.py
index aab9bb0..46cacf5 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_client.py
@@ -31,7 +31,7 @@ class TestOozieClient(RMFTestCase):
                        classname = "OozieClient",
                        command = "configure",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Directory', '/etc/oozie/conf',
@@ -97,7 +97,7 @@ class TestOozieClient(RMFTestCase):
                        classname = "OozieClient",
                        command = "configure",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Directory', '/etc/oozie/conf',
@@ -169,7 +169,7 @@ class TestOozieClient(RMFTestCase):
                        classname = "OozieClient",
                        command = "configure",
                        config_dict=default_json,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Directory', '/usr/hdp/current/oozie-client/conf',
@@ -241,7 +241,7 @@ class TestOozieClient(RMFTestCase):
                        classname = "OozieClient",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
                               ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'oozie-client', version), sudo=True)
@@ -262,7 +262,7 @@ class TestOozieClient(RMFTestCase):
                        classname = "OozieClient",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None)],
                        mocks_dict = mocks_dict)


[22/51] [abbrv] ambari git commit: AMBARI-15329: Code Cleanup: Remove hdp hardcodings in functions, variables etc. (jluniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_upgrade.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_upgrade.py
index d60e961..664cafa 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_upgrade.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_upgrade.py
@@ -24,8 +24,8 @@ from resource_management.core.exceptions import Fail
 from resource_management.core.resources.system import Execute
 from resource_management.core import shell
 from resource_management.libraries.functions import format
-from resource_management.libraries.functions import hdp_select
-from resource_management.libraries.functions.version import format_hdp_stack_version
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.version import format_stack_version
 from resource_management.libraries.functions.version import compare_versions
 
 
@@ -74,24 +74,24 @@ def post_upgrade_deregister():
   Execute(command, user=params.hive_user, path=hive_execute_path, tries=1 )
 
 
-def _get_hive_execute_path(hdp_stack_version):
+def _get_hive_execute_path(stack_version_formatted):
   """
   Returns the exact execute path to use for the given stack-version.
   This method does not return the "current" path
-  :param hdp_stack_version: Exact stack-version to use in the new path
+  :param stack_version_formatted: Exact stack-version to use in the new path
   :return: Hive execute path for the exact hdp stack-version
   """
   import params
 
   hive_execute_path = params.execute_path
-  formatted_stack_version = format_hdp_stack_version(hdp_stack_version)
+  formatted_stack_version = format_stack_version(stack_version_formatted)
   if formatted_stack_version and compare_versions(formatted_stack_version, "2.2") >= 0:
     # hive_bin
-    new_hive_bin = format('/usr/hdp/{hdp_stack_version}/hive/bin')
+    new_hive_bin = format('/usr/hdp/{stack_version_formatted}/hive/bin')
     if (os.pathsep + params.hive_bin) in hive_execute_path:
       hive_execute_path = hive_execute_path.replace(os.pathsep + params.hive_bin, os.pathsep + new_hive_bin)
     # hadoop_bin_dir
-    new_hadoop_bin = hdp_select.get_hadoop_dir_for_stack_version("bin", hdp_stack_version)
+    new_hadoop_bin = stack_select.get_hadoop_dir_for_stack_version("bin", stack_version_formatted)
     old_hadoop_bin = params.hadoop_bin_dir
     if new_hadoop_bin and len(new_hadoop_bin) > 0 and (os.pathsep + old_hadoop_bin) in hive_execute_path:
       hive_execute_path = hive_execute_path.replace(os.pathsep + old_hadoop_bin, os.pathsep + new_hadoop_bin)
@@ -117,7 +117,7 @@ def _get_current_hiveserver_version():
       source_version = params.current_version
     hive_execute_path = _get_hive_execute_path(source_version)
     version_hive_bin = params.hive_bin
-    formatted_source_version = format_hdp_stack_version(source_version)
+    formatted_source_version = format_stack_version(source_version)
     if formatted_source_version and compare_versions(formatted_source_version, "2.2") >= 0:
       version_hive_bin = format('/usr/hdp/{source_version}/hive/bin')
     command = format('{version_hive_bin}/hive --version')

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
index 2531598..63ad482 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
@@ -32,7 +32,7 @@ from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions.format import format
 from resource_management.libraries.functions.is_empty import is_empty
-from resource_management.libraries.functions.version import format_hdp_stack_version
+from resource_management.libraries.functions.version import format_stack_version
 from resource_management.libraries.functions.copy_tarball import STACK_VERSION_PATTERN
 from resource_management.libraries.functions import get_kinit_path
 from resource_management.libraries.script.script import Script
@@ -53,11 +53,11 @@ hostname = config["hostname"]
 
 # This is expected to be of the form #.#.#.#
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-hdp_stack_version_major = format_hdp_stack_version(stack_version_unformatted)
-stack_is_hdp21 = Script.is_hdp_stack_less_than("2.2")
+stack_version_formatted_major = format_stack_version(stack_version_unformatted)
+stack_is_hdp21 = Script.is_stack_less_than("2.2")
 
 # this is not available on INSTALL action because hdp-select is not available
-hdp_stack_version = functions.get_hdp_version('hive-server2')
+stack_version_formatted = functions.get_stack_version('hive-server2')
 
 # New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade.
 # It cannot be used during the initial Cluser Install because the version is not yet known.
@@ -109,7 +109,7 @@ webhcat_bin_dir = '/usr/lib/hive-hcatalog/sbin'
 
 # Starting from HDP2.3 drop should be executed with purge suffix
 purge_tables = "false"
-if Script.is_hdp_stack_greater_or_equal("2.3"):
+if Script.is_stack_greater_or_equal("2.3"):
   purge_tables = 'true'
 
   # this is NOT a typo.  HDP-2.3 configs for hcatalog/webhcat point to a
@@ -117,7 +117,7 @@ if Script.is_hdp_stack_greater_or_equal("2.3"):
   hcat_conf_dir = '/usr/hdp/current/hive-webhcat/etc/hcatalog'
   config_dir = '/usr/hdp/current/hive-webhcat/etc/webhcat'
 
-if Script.is_hdp_stack_greater_or_equal("2.2"):
+if Script.is_stack_greater_or_equal("2.2"):
   hive_specific_configs_supported = True
 
   component_directory = status_params.component_directory
@@ -287,7 +287,7 @@ target = format("{hive_lib}/{jdbc_jar_name}")
 jars_in_hive_lib = format("{hive_lib}/*.jar")
 
 
-if Script.is_hdp_stack_less_than("2.2"):
+if Script.is_stack_less_than("2.2"):
   source_jdbc_file = target
 else:
   # normally, the JDBC driver would be referenced by /usr/hdp/current/.../foo.jar
@@ -304,7 +304,7 @@ start_metastore_path = format("{tmp_dir}/start_metastore_script")
 hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
 
 if 'role' in config and config['role'] in ["HIVE_SERVER", "HIVE_METASTORE"]:
-  if Script.is_hdp_stack_less_than("2.2"):
+  if Script.is_stack_less_than("2.2"):
     hive_heapsize = config['configurations']['hive-site']['hive.heapsize']
   else:
     hive_heapsize = config['configurations']['hive-env']['hive.heapsize']

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_windows.py
index 359604f..7c21b5f 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_windows.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_windows.py
@@ -26,7 +26,7 @@ config = Script.get_config()
 
 # This is expected to be of the form #.#.#.#
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
+stack_version_formatted = format_stack_version(stack_version_unformatted)
 
 hdp_root = None
 hive_conf_dir = None

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/status_params.py
index 8c035db..d0924b9 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/status_params.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/status_params.py
@@ -21,7 +21,7 @@ limitations under the License.
 from ambari_commons import OSCheck
 
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions import get_kinit_path
@@ -72,7 +72,7 @@ else:
 
   # default configuration directories
   hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-  hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")
+  hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
   webhcat_conf_dir = '/etc/hive-webhcat/conf'
   hive_etc_dir_prefix = "/etc/hive"
   hive_conf_dir = "/etc/hive/conf"
@@ -82,13 +82,13 @@ else:
   hive_server_conf_dir = "/etc/hive/conf.server"
 
   # HDP 2.2+
-  if Script.is_hdp_stack_greater_or_equal("2.2"):
+  if Script.is_stack_greater_or_equal("2.2"):
     webhcat_conf_dir = '/usr/hdp/current/hive-webhcat/conf'
     hive_conf_dir = format("/usr/hdp/current/{component_directory}/conf")
     hive_client_conf_dir = format("/usr/hdp/current/{component_directory}/conf")
 
   # HDP 2.3+
-  if Script.is_hdp_stack_greater_or_equal("2.3"):
+  if Script.is_stack_greater_or_equal("2.3"):
     # ranger is only compatible with this location on HDP 2.3+, not HDP 2.2
     hive_server_conf_dir = format("/usr/hdp/current/{component_directory}/conf/conf.server")
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat.py
index 401debc..b95f6f3 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat.py
@@ -90,7 +90,7 @@ def webhcat():
             )
 
   # if we're in an upgrade of a secure cluster, make sure hive-site and yarn-site are created
-  if Script.is_hdp_stack_greater_or_equal("2.3") and params.version:
+  if Script.is_stack_greater_or_equal("2.3") and params.version:
     XmlConfig("hive-site.xml",
       conf_dir = format("/usr/hdp/{version}/hive/conf"),
       configurations = params.config['configurations']['hive-site'],

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat_server.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat_server.py
index c12b168..b0a876d 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat_server.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat_server.py
@@ -20,7 +20,7 @@ Ambari Agent
 """
 from resource_management import *
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions.security_commons import build_expectations, \
   cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
   FILE_TYPE_XML
@@ -75,11 +75,11 @@ class WebHCatServerDefault(WebHCatServer):
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
       # webhcat has no conf, but uses hadoop home, so verify that regular hadoop conf is set
       conf_select.select(params.stack_name, "hive-hcatalog", params.version)
       conf_select.select(params.stack_name, "hadoop", params.version)
-      hdp_select.select("hive-webhcat", params.version)
+      stack_select.select("hive-webhcat", params.version)
 
   def security_status(self, env):
     import status_params

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/kafka.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/kafka.py b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/kafka.py
index 2556741..43b318c 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/kafka.py
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/kafka.py
@@ -20,7 +20,7 @@ limitations under the License.
 import collections
 import os
 
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
+from resource_management.libraries.functions.version import format_stack_version, compare_versions
 from resource_management.libraries.resources.properties_file import PropertiesFile
 from resource_management.libraries.resources.template_config import TemplateConfig
 from resource_management.core.resources.system import Directory, Execute, File, Link
@@ -39,7 +39,7 @@ def kafka(upgrade_type=None):
     # This still has an issue of hostnames being alphabetically out-of-order for broker.id in HDP-2.2.
     # Starting in HDP 2.3, Kafka handles the generation of broker.id so Ambari doesn't have to.
 
-    effective_version = params.hdp_stack_version if upgrade_type is None else format_hdp_stack_version(params.version)
+    effective_version = params.stack_version_formatted if upgrade_type is None else format_stack_version(params.version)
     Logger.info(format("Effective stack version: {effective_version}"))
 
     if effective_version is not None and effective_version != "" and compare_versions(effective_version, '2.2.0.0') >= 0 and compare_versions(effective_version, '2.3.0.0') < 0:

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/kafka_broker.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/kafka_broker.py b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/kafka_broker.py
index 3f650bd..314d702 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/kafka_broker.py
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/kafka_broker.py
@@ -20,9 +20,9 @@ from resource_management import Script
 from resource_management.core.logger import Logger
 from resource_management.core.resources.system import Execute, File, Directory
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import Direction
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
 from resource_management.libraries.functions.format import format
 from resource_management.libraries.functions.check_process_status import check_process_status
 from kafka import ensure_base_directories
@@ -48,22 +48,22 @@ class KafkaBroker(Script):
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
-      hdp_select.select("kafka-broker", params.version)
+    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
+      stack_select.select("kafka-broker", params.version)
 
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.3.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), '2.3.0.0') >= 0:
       conf_select.select(params.stack_name, "kafka", params.version)
 
     # This is extremely important since it should only be called if crossing the HDP 2.3.4.0 boundary. 
     if params.current_version and params.version and params.upgrade_direction:
       src_version = dst_version = None
       if params.upgrade_direction == Direction.UPGRADE:
-        src_version = format_hdp_stack_version(params.current_version)
-        dst_version = format_hdp_stack_version(params.version)
+        src_version = format_stack_version(params.current_version)
+        dst_version = format_stack_version(params.version)
       else:
         # These represent the original values during the UPGRADE direction
-        src_version = format_hdp_stack_version(params.version)
-        dst_version = format_hdp_stack_version(params.downgrade_from_version)
+        src_version = format_stack_version(params.version)
+        dst_version = format_stack_version(params.downgrade_from_version)
 
       if compare_versions(src_version, '2.3.4.0') < 0 and compare_versions(dst_version, '2.3.4.0') >= 0:
         # Calling the acl migration script requires the configs to be present.

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/params.py b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/params.py
index d629533..ba6857e 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/params.py
@@ -19,15 +19,15 @@ limitations under the License.
 """
 from resource_management.libraries.functions import format
 from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
+from resource_management.libraries.functions.version import format_stack_version, compare_versions
 from resource_management.libraries.functions.default import default
 from utils import get_bare_principal
-from resource_management.libraries.functions.get_hdp_version import get_hdp_version
+from resource_management.libraries.functions.get_stack_version import get_stack_version
 from resource_management.libraries.functions.is_empty import is_empty
 import status_params
 from resource_management.core.logger import Logger
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import conf_select
 from resource_management.libraries.functions import get_kinit_path
 
@@ -47,7 +47,7 @@ current_version = default("/hostLevelParams/current_version", None)
 host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
 
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
+stack_version_formatted = format_stack_version(stack_version_unformatted)
 upgrade_direction = default("/commandParams/upgrade_direction", None)
 
 # When downgrading the 'version' and 'current_version' are both pointing to the downgrade-target version
@@ -69,7 +69,7 @@ kafka_user_nofile_limit = config['configurations']['kafka-env']['kafka_user_nofi
 kafka_user_nproc_limit = config['configurations']['kafka-env']['kafka_user_nproc_limit']
 
 # parameters for 2.2+
-if Script.is_hdp_stack_greater_or_equal("2.2"):
+if Script.is_stack_greater_or_equal("2.2"):
   kafka_home = '/usr/hdp/current/kafka-broker/'
   kafka_bin = kafka_home+'bin/kafka'
   conf_dir = "/usr/hdp/current/kafka-broker/config"
@@ -139,7 +139,7 @@ security_enabled = config['configurations']['cluster-env']['security_enabled']
 kafka_kerberos_enabled = ('security.inter.broker.protocol' in config['configurations']['kafka-broker'] and
                           config['configurations']['kafka-broker']['security.inter.broker.protocol'] == "PLAINTEXTSASL")
 
-if security_enabled and hdp_stack_version != "" and 'kafka_principal_name' in config['configurations']['kafka-env'] and compare_versions(hdp_stack_version, '2.3') >= 0:
+if security_enabled and stack_version_formatted != "" and 'kafka_principal_name' in config['configurations']['kafka-env'] and compare_versions(stack_version_formatted, '2.3') >= 0:
     _hostname_lowercase = config['hostname'].lower()
     _kafka_principal_name = config['configurations']['kafka-env']['kafka_principal_name']
     kafka_jaas_principal = _kafka_principal_name.replace('_HOST',_hostname_lowercase)
@@ -248,7 +248,7 @@ if has_ranger_admin and is_supported_kafka_ranger:
   ssl_truststore_password = unicode(config['configurations']['ranger-kafka-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password']) if xml_configurations_supported else None
   credential_file = format('/etc/ranger/{repo_name}/cred.jceks') if xml_configurations_supported else None
 
-  hdp_version = get_hdp_version('kafka-broker')
+  hdp_version = get_stack_version('kafka-broker')
   setup_ranger_env_sh_source = format('/usr/hdp/{hdp_version}/ranger-kafka-plugin/install/conf.templates/enable/kafka-ranger-env.sh')
   setup_ranger_env_sh_target = format("{conf_dir}/kafka-ranger-env.sh")
 
@@ -264,7 +264,7 @@ hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab'] if
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name'] if has_namenode else None
 hdfs_site = config['configurations']['hdfs-site'] if has_namenode else None
 default_fs = config['configurations']['core-site']['fs.defaultFS'] if has_namenode else None
-hadoop_bin_dir = hdp_select.get_hadoop_dir("bin") if has_namenode else None
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin") if has_namenode else None
 hadoop_conf_dir = conf_select.get_hadoop_conf_dir() if has_namenode else None
 kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/knox.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/knox.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/knox.py
index 5436e47..df10136 100644
--- a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/knox.py
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/knox.py
@@ -66,7 +66,7 @@ def knox():
      content=InlineTemplate(params.admin_topology_template)
   )
 
-  if Script.is_hdp_stack_greater_or_equal_to(params.version_formatted, "2.3.8.0"):
+  if Script.is_stack_greater_or_equal_to(params.version_formatted, "2.3.8.0"):
       File(os.path.join(params.knox_conf_dir, "topologies", "knoxsso.xml"),
          group=params.knox_group,
          owner=params.knox_user,
@@ -123,7 +123,7 @@ def knox():
          content=InlineTemplate(params.admin_topology_template)
     )
 
-    if Script.is_hdp_stack_greater_or_equal_to(params.version_formatted, "2.3.8.0"):
+    if Script.is_stack_greater_or_equal_to(params.version_formatted, "2.3.8.0"):
         File(os.path.join(params.knox_conf_dir, "topologies", "knoxsso.xml"),
             group=params.knox_group,
             owner=params.knox_user,

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/knox_gateway.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/knox_gateway.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/knox_gateway.py
index 4285c6e..30b9a41 100644
--- a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/knox_gateway.py
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/knox_gateway.py
@@ -22,12 +22,12 @@ import tarfile
 
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.functions import conf_select, tar_archive
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions.check_process_status import check_process_status
 from resource_management.libraries.functions import format
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import Direction
 from resource_management.libraries.functions.security_commons import build_expectations, \
   cached_kinit_executor, validate_security_config_properties, get_params_from_filesystem, \
@@ -112,7 +112,7 @@ class KnoxGatewayDefault(KnoxGateway):
   def pre_upgrade_restart(self, env, upgrade_type=None):
     import params
     env.set_params(params)
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
 
       absolute_backup_dir = None
       if params.upgrade_direction and params.upgrade_direction == Direction.UPGRADE:
@@ -123,7 +123,7 @@ class KnoxGatewayDefault(KnoxGateway):
 
       # conf-select will change the symlink to the conf folder.
       conf_select.select(params.stack_name, "knox", params.version)
-      hdp_select.select("knox-server", params.version)
+      stack_select.select("knox-server", params.version)
 
       # Extract the tar of the old conf folder into the new conf directory
       if absolute_backup_dir is not None and params.upgrade_direction and params.upgrade_direction == Direction.UPGRADE:

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py
index c42c123..297f77d 100644
--- a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py
@@ -22,15 +22,15 @@ from resource_management.core.logger import Logger
 
 import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
 from resource_management.libraries.functions import format
-from resource_management.libraries.functions.version import format_hdp_stack_version
+from resource_management.libraries.functions.version import format_stack_version
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions.get_port_from_url import get_port_from_url
-from resource_management.libraries.functions.get_hdp_version import get_hdp_version
+from resource_management.libraries.functions.get_stack_version import get_stack_version
 from resource_management.libraries.functions import get_kinit_path
 from resource_management.libraries.script.script import Script
 from status_params import *
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import conf_select
 
 # server configurations
@@ -41,11 +41,11 @@ stack_name = default("/hostLevelParams/stack_name", None)
 upgrade_direction = default("/commandParams/upgrade_direction", None)
 version = default("/commandParams/version", None)
 # E.g., 2.3.2.0
-version_formatted = format_hdp_stack_version(version)
+version_formatted = format_stack_version(version)
 
 # E.g., 2.3
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
+stack_version_formatted = format_stack_version(stack_version_unformatted)
 
 # This is the version whose state is CURRENT. During an RU, this is the source version.
 # DO NOT format it since we need the build number too.
@@ -59,7 +59,7 @@ knox_data_dir = '/var/lib/knox/data'
 # Important, it has to be strictly greater than 2.3.0.0!!!
 if stack_name and stack_name.upper() == "HDP":
   Logger.info(format("HDP version to use is {version_formatted}"))
-  if Script.is_hdp_stack_greater(version_formatted, "2.3.0.0"):
+  if Script.is_stack_greater(version_formatted, "2.3.0.0"):
     # This is the current version. In the case of a Rolling Upgrade, it will be the newer version.
     # In the case of a Downgrade, it will be the version downgrading to.
     # This is always going to be a symlink to /var/lib/knox/data_${version}
@@ -82,7 +82,7 @@ ldap_bin = '/usr/lib/knox/bin/ldap.sh'
 knox_client_bin = '/usr/lib/knox/bin/knoxcli.sh'
 
 # HDP 2.2+ parameters
-if Script.is_hdp_stack_greater_or_equal("2.2"):
+if Script.is_stack_greater_or_equal("2.2"):
   knox_bin = '/usr/hdp/current/knox-server/bin/gateway.sh'
   knox_conf_dir = '/usr/hdp/current/knox-server/conf'
   ldap_bin = '/usr/hdp/current/knox-server/bin/ldap.sh'
@@ -96,7 +96,7 @@ knox_group = default("/configurations/knox-env/knox_group", "knox")
 mode = 0644
 
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
+stack_version_formatted = format_stack_version(stack_version_unformatted)
 
 dfs_ha_enabled = False
 dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
@@ -334,7 +334,7 @@ hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab'] if
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name'] if has_namenode else None
 hdfs_site = config['configurations']['hdfs-site'] if has_namenode else None
 default_fs = config['configurations']['core-site']['fs.defaultFS'] if has_namenode else None
-hadoop_bin_dir = hdp_select.get_hadoop_dir("bin") if has_namenode else None
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin") if has_namenode else None
 hadoop_conf_dir = conf_select.get_hadoop_conf_dir() if has_namenode else None
 
 import functools

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/status_params.py
index cf47b63..b1a5ebc 100644
--- a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/status_params.py
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/status_params.py
@@ -30,7 +30,7 @@ if OSCheck.is_windows_family():
   knox_ldap_win_service_name = "ldap"
 else:
   knox_conf_dir = '/etc/knox/conf'
-  if Script.is_hdp_stack_greater_or_equal("2.2"):
+  if Script.is_stack_greater_or_equal("2.2"):
     knox_conf_dir = '/usr/hdp/current/knox-server/conf'
   knox_pid_dir = config['configurations']['knox-env']['knox_pid_dir']
   knox_pid_file = format("{knox_pid_dir}/gateway.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/upgrade.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/upgrade.py
index 55c5060..8dd0cb2 100644
--- a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/upgrade.py
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/upgrade.py
@@ -27,7 +27,7 @@ from resource_management.core.exceptions import Fail
 from resource_management.libraries.functions import tar_archive
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions import Direction
-from resource_management.libraries.functions.version import compare_versions,format_hdp_stack_version
+from resource_management.libraries.functions.version import compare_versions,format_stack_version
 
 
 BACKUP_TEMP_DIR = "knox-upgrade-backup"
@@ -82,7 +82,7 @@ def _get_directory_mappings_during_upgrade():
   knox_data_dir = '/var/lib/knox/data'
 
   if params.stack_name and params.stack_name.upper() == "HDP" and \
-          compare_versions(format_hdp_stack_version(params.upgrade_from_version), "2.3.0.0") > 0:
+          compare_versions(format_stack_version(params.upgrade_from_version), "2.3.0.0") > 0:
     # Use the version that is being upgraded from.
     knox_data_dir = format('/usr/hdp/{upgrade_from_version}/knox/data')
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/MAHOUT/1.0.0.2.3/package/scripts/mahout_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/MAHOUT/1.0.0.2.3/package/scripts/mahout_client.py b/ambari-server/src/main/resources/common-services/MAHOUT/1.0.0.2.3/package/scripts/mahout_client.py
index cc41f3a..38269cb 100644
--- a/ambari-server/src/main/resources/common-services/MAHOUT/1.0.0.2.3/package/scripts/mahout_client.py
+++ b/ambari-server/src/main/resources/common-services/MAHOUT/1.0.0.2.3/package/scripts/mahout_client.py
@@ -20,7 +20,7 @@ Ambari Agent
 """
 from resource_management.core.logger import Logger
 from resource_management.core.exceptions import ClientComponentHasNoStatus
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import conf_select
 from resource_management.libraries.script import Script
 from mahout import mahout
@@ -38,7 +38,7 @@ class MahoutClient(Script):
     env.set_params(params)
 
     conf_select.select(params.stack_name, "mahout", params.version)
-    hdp_select.select("mahout-client", params.version )
+    stack_select.select("mahout-client", params.version )
 
 
   def install(self, env):

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/MAHOUT/1.0.0.2.3/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/MAHOUT/1.0.0.2.3/package/scripts/params.py b/ambari-server/src/main/resources/common-services/MAHOUT/1.0.0.2.3/package/scripts/params.py
index b1667a8..2c57e96 100644
--- a/ambari-server/src/main/resources/common-services/MAHOUT/1.0.0.2.3/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/MAHOUT/1.0.0.2.3/package/scripts/params.py
@@ -20,9 +20,9 @@ Ambari Agent
 """
 from resource_management import *
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import format
-from resource_management.libraries.functions.version import format_hdp_stack_version
+from resource_management.libraries.functions.version import format_stack_version
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions import get_kinit_path
 from resource_management.libraries.script.script import Script
@@ -35,7 +35,7 @@ stack_name = default("/hostLevelParams/stack_name", None)
 host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
 
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
+stack_version_formatted = format_stack_version(stack_version_unformatted)
 
 # New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
 version = default("/commandParams/version", None)
@@ -48,8 +48,8 @@ mahout_user = config['configurations']['mahout-env']['mahout_user']
 yarn_log_dir_prefix = config['configurations']['yarn-env']['yarn_log_dir_prefix']
 
 #hadoop params
-hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")
-hadoop_home = hdp_select.get_hadoop_dir("home")
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hadoop_home = stack_select.get_hadoop_dir("home")
 
 # the configuration direction for HDFS/YARN/MapR is the hadoop config
 # directory, which is symlinked by hadoop-client only

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
index df9ecfe..81a227e 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie.py
@@ -146,7 +146,7 @@ def oozie(is_server=False):
       owner=params.oozie_user
     )
 
-  if params.hdp_stack_version != "" and compare_versions(params.hdp_stack_version, '2.2') >= 0:
+  if params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.2') >= 0:
     File(format("{params.conf_dir}/adminusers.txt"),
       mode=0644,
       group=params.user_group,
@@ -318,7 +318,7 @@ def oozie_server_specific():
        mode = 0644,
   )
 
-  if params.hdp_stack_version != "" and compare_versions(params.hdp_stack_version, '2.2') >= 0:
+  if params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.2') >= 0:
     # Create hive-site and tez-site configs for oozie
     Directory(params.hive_conf_dir,
         create_parents = True,

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_client.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_client.py
index 2e29464..4fc50d2 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_client.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_client.py
@@ -21,7 +21,7 @@ limitations under the License.
 import sys
 from resource_management import *
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 
 from oozie import oozie
 from oozie_service import oozie_service
@@ -53,12 +53,12 @@ class OozieClient(Script):
 
     # this function should not execute if the version can't be determined or
     # is not at least HDP 2.2.0.0
-    if not params.version or compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') < 0:
+    if not params.version or compare_versions(format_stack_version(params.version), '2.2.0.0') < 0:
       return
 
     Logger.info("Executing Oozie Client Stack Upgrade pre-restart")
     conf_select.select(params.stack_name, "oozie", params.version)
-    hdp_select.select("oozie-client", params.version)
+    stack_select.select("oozie-client", params.version)
 
   # We substitute some configs (oozie.authentication.kerberos.principal) before generation (see oozie.py and params.py).
   # This function returns changed configs (it's used for config generation before config download)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server.py
index b87e453..030fb2d 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server.py
@@ -22,8 +22,8 @@ from resource_management.core import Logger
 from resource_management.libraries.script import Script
 from resource_management.libraries.functions import compare_versions
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
-from resource_management.libraries.functions import format_hdp_stack_version
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format_stack_version
 from resource_management.libraries.functions.format import format
 from resource_management.libraries.functions import default
 from resource_management.libraries.functions.constants import Direction
@@ -65,17 +65,17 @@ class OozieServer(Script):
 
     if upgrade_type is not None and params.upgrade_direction == Direction.UPGRADE and params.version is not None:
       Logger.info(format("Configuring Oozie during upgrade type: {upgrade_type}, direction: {params.upgrade_direction}, and version {params.version}"))
-      if compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+      if compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
         # In order for the "/usr/hdp/current/oozie-<client/server>" point to the new version of
         # oozie, we need to create the symlinks both for server and client.
         # This is required as both need to be pointing to new installed oozie version.
 
         # Sets the symlink : eg: /usr/hdp/current/oozie-client -> /usr/hdp/2.3.x.y-<version>/oozie
-        hdp_select.select("oozie-client", params.version)
+        stack_select.select("oozie-client", params.version)
         # Sets the symlink : eg: /usr/hdp/current/oozie-server -> /usr/hdp/2.3.x.y-<version>/oozie
-        hdp_select.select("oozie-server", params.version)
+        stack_select.select("oozie-server", params.version)
 
-      if compare_versions(format_hdp_stack_version(params.version), '2.3.0.0') >= 0:
+      if compare_versions(format_stack_version(params.version), '2.3.0.0') >= 0:
         conf_select.select(params.stack_name, "oozie", params.version)
 
     env.set_params(params)
@@ -187,16 +187,16 @@ class OozieServerDefault(OozieServer):
 
     # this function should not execute if the version can't be determined or
     # is not at least HDP 2.2.0.0
-    if not params.version or compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') < 0:
+    if not params.version or compare_versions(format_stack_version(params.version), '2.2.0.0') < 0:
       return
 
     Logger.info("Executing Oozie Server Stack Upgrade pre-restart")
 
     OozieUpgrade.backup_configuration()
 
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "oozie", params.version)
-      hdp_select.select("oozie-server", params.version)
+      stack_select.select("oozie-server", params.version)
 
     OozieUpgrade.restore_configuration()
     OozieUpgrade.prepare_libext_directory()

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server_upgrade.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server_upgrade.py
index f0ebd20..27e2766 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server_upgrade.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/oozie_server_upgrade.py
@@ -30,8 +30,8 @@ from resource_management.core.resources.system import File
 from resource_management.libraries.functions import Direction
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions import compare_versions
-from resource_management.libraries.functions import hdp_select
-from resource_management.libraries.functions import format_hdp_stack_version
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import format_stack_version
 from resource_management.libraries.functions import tar_archive
 from resource_management.libraries.script.script import Script
 
@@ -108,7 +108,7 @@ class OozieUpgrade(Script):
 
     # some versions of HDP don't need the lzo compression libraries
     target_version_needs_compression_libraries = compare_versions(
-      format_hdp_stack_version(params.version), '2.2.1.0') >= 0
+      format_stack_version(params.version), '2.2.1.0') >= 0
 
     # ensure the directory exists
     Directory(params.oozie_libext_dir, mode = 0777)
@@ -162,7 +162,7 @@ class OozieUpgrade(Script):
     oozie.download_database_library_if_needed()
 
     # get the upgrade version in the event that it's needed
-    upgrade_stack = hdp_select._get_upgrade_stack()
+    upgrade_stack = stack_select._get_upgrade_stack()
     if upgrade_stack is None or len(upgrade_stack) < 2 or upgrade_stack[1] is None:
       raise Fail("Unable to determine the stack that is being upgraded to or downgraded to.")
 
@@ -226,7 +226,7 @@ class OozieUpgrade(Script):
       command = format("{kinit_path_local} -kt {oozie_keytab} {oozie_principal_with_host}")
       Execute(command, user=params.oozie_user, logoutput=True)
 
-    upgrade_stack = hdp_select._get_upgrade_stack()
+    upgrade_stack = stack_select._get_upgrade_stack()
     if upgrade_stack is None or len(upgrade_stack) < 2 or upgrade_stack[1] is None:
       raise Fail("Unable to determine the stack that is being upgraded to or downgraded to.")
 
@@ -278,7 +278,7 @@ class OozieUpgrade(Script):
 
     params.HdfsResource(None, action = "execute")
 
-    upgrade_stack = hdp_select._get_upgrade_stack()
+    upgrade_stack = stack_select._get_upgrade_stack()
     if upgrade_stack is None or upgrade_stack[1] is None:
       raise Fail("Unable to determine the stack that is being upgraded to or downgraded to.")
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
index 072b127..0decbc2 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
@@ -22,8 +22,8 @@ from ambari_commons.constants import AMBARI_SUDO_BINARY
 from ambari_commons.str_utils import cbool, cint
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
-from resource_management.libraries.functions.version import format_hdp_stack_version
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.version import format_stack_version
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions import get_kinit_path
 from resource_management.libraries.functions import get_port_from_url
@@ -51,17 +51,17 @@ agent_stack_retry_on_unavailability = cbool(config["hostLevelParams"]["agent_sta
 agent_stack_retry_count = cint(config["hostLevelParams"]["agent_stack_retry_count"])
 
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
+stack_version_formatted = format_stack_version(stack_version_unformatted)
 
 hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")
-hadoop_lib_home = hdp_select.get_hadoop_dir("lib")
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hadoop_lib_home = stack_select.get_hadoop_dir("lib")
 
 #hadoop params
-if Script.is_hdp_stack_greater_or_equal("2.2"):
+if Script.is_stack_greater_or_equal("2.2"):
   # something like 2.3.0.0-1234
   stack_version = None
-  upgrade_stack = hdp_select._get_upgrade_stack()
+  upgrade_stack = stack_select._get_upgrade_stack()
   if upgrade_stack is not None and len(upgrade_stack) == 2 and upgrade_stack[1] is not None:
     stack_version = upgrade_stack[1]
 
@@ -143,7 +143,7 @@ oozie_site = config['configurations']['oozie-site']
 # Need this for yarn.nodemanager.recovery.dir in yarn-site
 yarn_log_dir_prefix = config['configurations']['yarn-env']['yarn_log_dir_prefix']
 
-if security_enabled and Script.is_hdp_stack_less_than("2.2"):
+if security_enabled and Script.is_stack_less_than("2.2"):
   #older versions of oozie have problems when using _HOST in principal
   oozie_site = dict(config['configurations']['oozie-site'])
   oozie_site['oozie.service.HadoopAccessorService.kerberos.principal'] = \
@@ -194,7 +194,7 @@ if https_port is not None:
 hdfs_site = config['configurations']['hdfs-site']
 fs_root = config['configurations']['core-site']['fs.defaultFS']
 
-if Script.is_hdp_stack_less_than("2.2"):
+if Script.is_stack_less_than("2.2"):
   put_shared_lib_to_hdfs_cmd = format("hadoop --config {hadoop_conf_dir} dfs -put {oozie_shared_lib} {oozie_hdfs_user_dir}")
 # for newer
 else:

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/status_params.py
index d575bd1..954bb80 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/status_params.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/status_params.py
@@ -48,7 +48,7 @@ else:
   kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
 
   conf_dir = "/etc/oozie/conf"
-  if Script.is_hdp_stack_greater_or_equal("2.2"):
+  if Script.is_stack_greater_or_equal("2.2"):
     conf_dir = format("/usr/hdp/current/{component_directory}/conf")
 
   tmp_dir = Script.get_tmp_dir()

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/params_linux.py
index fc6fd81..ff41105 100644
--- a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/params_linux.py
@@ -22,8 +22,8 @@ Ambari Agent
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
-from resource_management.libraries.functions.version import format_hdp_stack_version
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.version import format_stack_version
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions import get_kinit_path
 
@@ -34,22 +34,22 @@ tmp_dir = Script.get_tmp_dir()
 stack_name = default("/hostLevelParams/stack_name", None)
 
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
+stack_version_formatted = format_stack_version(stack_version_unformatted)
 
 # New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
 version = default("/commandParams/version", None)
 
 # hadoop default parameters
 hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
 pig_conf_dir = "/etc/pig/conf"
 hadoop_home = '/usr'
 pig_bin_dir = ""
 
 # hadoop parameters for 2.2+
-if Script.is_hdp_stack_greater_or_equal("2.2"):
+if Script.is_stack_greater_or_equal("2.2"):
   pig_conf_dir = "/usr/hdp/current/pig-client/conf"
-  hadoop_home = hdp_select.get_hadoop_dir("home")
+  hadoop_home = stack_select.get_hadoop_dir("home")
   pig_bin_dir = '/usr/hdp/current/pig-client/bin'
 
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/pig_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/pig_client.py b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/pig_client.py
index 36c188e..304ca15 100644
--- a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/pig_client.py
+++ b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/pig_client.py
@@ -23,7 +23,7 @@ import sys
 import os
 from resource_management import *
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from pig import pig
 from ambari_commons import OSConst
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
@@ -46,10 +46,10 @@ class PigClientLinux(PigClient):
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "pig", params.version)
       conf_select.select(params.stack_name, "hadoop", params.version)
-      hdp_select.select("hadoop-client", params.version) # includes pig-client
+      stack_select.select("hadoop-client", params.version) # includes pig-client
 
   def install(self, env):
     self.install_packages(env)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/service_check.py
index fc819b8..155e63c 100644
--- a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/service_check.py
@@ -84,7 +84,7 @@ class PigServiceCheckLinux(PigServiceCheck):
       bin_dir = params.hadoop_bin_dir
     )
 
-    if params.hdp_stack_version != "" and compare_versions(params.hdp_stack_version, '2.2') >= 0:
+    if params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.2') >= 0:
       # cleanup results from previous test
       params.HdfsResource(output_dir,
                           type="directory",

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py
index 6b6bf28..e5b54cd 100644
--- a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/params.py
@@ -19,7 +19,7 @@ limitations under the License.
 """
 import os
 from resource_management.libraries.script import Script
-from resource_management.libraries.functions.version import format_hdp_stack_version
+from resource_management.libraries.functions.version import format_stack_version
 from resource_management.libraries.functions.format import format
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions.is_empty import is_empty
@@ -43,7 +43,7 @@ version = default("/commandParams/version", None)
 host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
 
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
+stack_version_formatted = format_stack_version(stack_version_unformatted)
 
 upgrade_marker_file = format("{tmp_dir}/rangeradmin_ru.inprogress")
 
@@ -51,8 +51,8 @@ xml_configurations_supported = config['configurations']['ranger-env']['xml_confi
 
 create_db_dbuser = config['configurations']['ranger-env']['create_db_dbuser']
 
-stack_is_hdp22_or_further = Script.is_hdp_stack_greater_or_equal("2.2")
-stack_is_hdp23_or_further = Script.is_hdp_stack_greater_or_equal("2.3")
+stack_is_hdp22_or_further = Script.is_stack_greater_or_equal("2.2")
+stack_is_hdp23_or_further = Script.is_stack_greater_or_equal("2.3")
 
 downgrade_from_version = default("/commandParams/downgrade_from_version", None)
 upgrade_direction = default("/commandParams/upgrade_direction", None)
@@ -60,7 +60,7 @@ upgrade_direction = default("/commandParams/upgrade_direction", None)
 ranger_conf    = '/etc/ranger/admin/conf'
 ranger_ugsync_conf = '/etc/ranger/usersync/conf'
 
-if upgrade_direction == Direction.DOWNGRADE and compare_versions(format_hdp_stack_version(version),'2.3' ) < 0:
+if upgrade_direction == Direction.DOWNGRADE and compare_versions(format_stack_version(version),'2.3' ) < 0:
   stack_is_hdp22_or_further = True
   stack_is_hdp23_or_further = False
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_admin.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_admin.py b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_admin.py
index f145ac5..07f3ab6 100644
--- a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_admin.py
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/ranger_admin.py
@@ -17,7 +17,7 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.script import Script
 from resource_management.core.resources.system import Execute
 from resource_management.core.exceptions import ComponentIsNotRunning
@@ -123,7 +123,7 @@ class RangerAdmin(Script):
     import params
     env.set_params(params)
 
-    upgrade_stack = hdp_select._get_upgrade_stack()
+    upgrade_stack = stack_select._get_upgrade_stack()
     if upgrade_stack is None:
       raise Fail('Unable to determine the stack and stack version')
 
@@ -139,7 +139,7 @@ class RangerAdmin(Script):
     import params
     env.set_params(params)
 
-    upgrade_stack = hdp_select._get_upgrade_stack()
+    upgrade_stack = stack_select._get_upgrade_stack()
     if upgrade_stack is None:
       raise Fail('Unable to determine the stack and stack version')
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/upgrade.py b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/upgrade.py
index 64549c3..ed8b690 100644
--- a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/upgrade.py
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/upgrade.py
@@ -20,7 +20,7 @@ limitations under the License.
 """
 from resource_management.core.resources.system import Execute
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions.format import format
 
 def prestart(env, hdp_component):
@@ -28,4 +28,4 @@ def prestart(env, hdp_component):
 
   if params.version and params.stack_is_hdp22_or_further:
     conf_select.select(params.stack_name, hdp_component, params.version)
-    hdp_select.select(hdp_component, params.version)
+    stack_select.select(hdp_component, params.version)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/params.py b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/params.py
index 95f0896..30eda0b 100755
--- a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/params.py
@@ -19,7 +19,7 @@ limitations under the License.
 """
 import os
 from resource_management.libraries.script import Script
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
+from resource_management.libraries.functions.version import format_stack_version, compare_versions
 from resource_management.libraries.functions.format import format
 from resource_management.libraries.functions.default import default
 
@@ -30,9 +30,9 @@ stack_name = default("/hostLevelParams/stack_name", None)
 version = default("/commandParams/version", None)
 
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
+stack_version_formatted = format_stack_version(stack_version_unformatted)
 
-stack_is_hdp23_or_further = Script.is_hdp_stack_greater_or_equal("2.3")
+stack_is_hdp23_or_further = Script.is_stack_greater_or_equal("2.3")
 
 if stack_is_hdp23_or_further:
   kms_home = '/usr/hdp/current/ranger-kms'

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/upgrade.py b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/upgrade.py
index 41885bb..798e8f7 100644
--- a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/upgrade.py
+++ b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/upgrade.py
@@ -19,7 +19,7 @@ limitations under the License.
 """
 from resource_management.core.resources.system import Execute
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions.format import format
 
 def prestart(env, hdp_component):
@@ -27,4 +27,4 @@ def prestart(env, hdp_component):
 
   if params.version and params.stack_is_hdp23_or_further:
     conf_select.select(params.stack_name, hdp_component, params.version)
-    hdp_select.select(hdp_component, params.version)
+    stack_select.select(hdp_component, params.version)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/params.py b/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/params.py
index 7b6a490..cc08a6f 100644
--- a/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/params.py
@@ -20,7 +20,7 @@ limitations under the License.
 from ambari_commons.os_check import OSCheck
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions.version import format_hdp_stack_version
+from resource_management.libraries.functions.version import format_stack_version
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions import get_kinit_path
 from resource_management.libraries.script.script import Script
@@ -41,7 +41,7 @@ stack_name = default("/hostLevelParams/stack_name", None)
 version = default("/commandParams/version", None)
 
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
+stack_version_formatted = format_stack_version(stack_version_unformatted)
 
 #hadoop params
 hadoop_conf_dir = conf_select.get_hadoop_conf_dir()

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/params_linux.py
index 132ff77..b1cec11 100644
--- a/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/params_linux.py
@@ -18,7 +18,7 @@ limitations under the License.
 """
 from resource_management.libraries.resources import HdfsResource
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.functions.format import format
 from resource_management.libraries.functions.default import default
@@ -31,7 +31,7 @@ slider_home_dir = '/usr/hdp/current/slider-client'
 
 #hadoop params
 slider_bin_dir = "/usr/lib/slider/bin"
-if Script.is_hdp_stack_greater_or_equal("2.2"):
+if Script.is_stack_greater_or_equal("2.2"):
     slider_bin_dir = format('{slider_home_dir}/bin')
 
 slider_conf_dir = format("{slider_home_dir}/conf")
@@ -52,7 +52,7 @@ hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 
-hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
 hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
 
 hdfs_site = config['configurations']['hdfs-site']

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/service_check.py
index 1aed032..b93b0eb 100644
--- a/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/service_check.py
@@ -38,7 +38,7 @@ class SliderServiceCheck(Script):
     import params
     env.set_params(params)
     
-    if Script.is_hdp_stack_greater_or_equal("2.2"):
+    if Script.is_stack_greater_or_equal("2.2"):
       copy_to_hdfs("slider", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
     
     smokeuser_kinit_cmd = format(

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/slider.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/slider.py b/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/slider.py
index b487259..f090583 100644
--- a/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/slider.py
+++ b/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/slider.py
@@ -81,7 +81,7 @@ def slider():
     File(format("{params.slider_conf_dir}/log4j.properties"),
          mode=0644
     )
-  if Script.is_hdp_stack_greater_or_equal("2.2"): 
+  if Script.is_stack_greater_or_equal("2.2"):
     File(params.slider_tar_gz,
          owner=params.hdfs_user,
          group=params.user_group,

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/slider_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/slider_client.py b/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/slider_client.py
index 5865048..f584a12 100644
--- a/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/slider_client.py
+++ b/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/slider_client.py
@@ -20,7 +20,7 @@ limitations under the License.
 
 from resource_management import *
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from slider import slider
 from ambari_commons import OSConst
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
@@ -38,15 +38,15 @@ class SliderClientLinux(SliderClient):
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "slider", params.version)
-      hdp_select.select("slider-client", params.version)
+      stack_select.select("slider-client", params.version)
 
       # also set all of the hadoop clients since slider client is upgraded as
       # part of the final "CLIENTS" group and we need to ensure that
       # hadoop-client is also set
       conf_select.select(params.stack_name, "hadoop", params.version)
-      hdp_select.select("hadoop-client", params.version)
+      stack_select.select("hadoop-client", params.version)
 
   def install(self, env):
     self.install_packages(env)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/job_history_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/job_history_server.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/job_history_server.py
index 24b86e3..bc1d6ab 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/job_history_server.py
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/job_history_server.py
@@ -23,8 +23,8 @@ import os
 
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
 from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
 from resource_management.libraries.functions.check_process_status import check_process_status
 from resource_management.core.logger import Logger
@@ -74,15 +74,15 @@ class JobHistoryServer(Script):
     import params
 
     env.set_params(params)
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
       Logger.info("Executing Spark Job History Server Stack Upgrade pre-restart")
       conf_select.select(params.stack_name, "spark", params.version)
-      hdp_select.select("spark-historyserver", params.version)
+      stack_select.select("spark-historyserver", params.version)
 
       # Spark 1.3.1.2.3, and higher, which was included in HDP 2.3, does not have a dependency on Tez, so it does not
       # need to copy the tarball, otherwise, copy it.
 
-      if params.version and compare_versions(format_hdp_stack_version(params.version), '2.3.0.0') < 0:
+      if params.version and compare_versions(format_stack_version(params.version), '2.3.0.0') < 0:
         resource_created = copy_to_hdfs(
           "tez",
           params.user_group,

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/params.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/params.py
index 7bf1f1c..843d8e7 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/params.py
@@ -25,10 +25,10 @@ from setup_spark import *
 
 import resource_management.libraries.functions
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import format
-from resource_management.libraries.functions.get_hdp_version import get_hdp_version
-from resource_management.libraries.functions.version import format_hdp_stack_version
+from resource_management.libraries.functions.get_stack_version import get_stack_version
+from resource_management.libraries.functions.version import format_stack_version
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions import get_kinit_path
 
@@ -49,7 +49,7 @@ tmp_dir = Script.get_tmp_dir()
 
 stack_name = default("/hostLevelParams/stack_name", None)
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
+stack_version_formatted = format_stack_version(stack_version_unformatted)
 host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
 
 # New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade
@@ -58,16 +58,16 @@ version = default("/commandParams/version", None)
 # TODO! FIXME! Version check is not working as of today :
 #   $ yum list installed | grep hdp-select
 #   hdp-select.noarch                            2.2.1.0-2340.el6           @HDP-2.2
-# And hdp_stack_version returned from hostLevelParams/stack_version is : 2.2.0.0
+# And stack_version_formatted returned from hostLevelParams/stack_version is : 2.2.0.0
 # Commenting out for time being
-#stack_is_hdp22_or_further = hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2.1.0') >= 0
+#stack_is_hdp22_or_further = stack_version_formatted != "" and compare_versions(stack_version_formatted, '2.2.1.0') >= 0
 
 spark_conf = '/etc/spark/conf'
 hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
 
-if Script.is_hdp_stack_greater_or_equal("2.2"):
-  hadoop_home = hdp_select.get_hadoop_dir("home")
+if Script.is_stack_greater_or_equal("2.2"):
+  hadoop_home = stack_select.get_hadoop_dir("home")
   spark_conf = format("/usr/hdp/current/{component_directory}/conf")
   spark_log_dir = config['configurations']['spark-env']['spark_log_dir']
   spark_pid_dir = status_params.spark_pid_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/setup_spark.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/setup_spark.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/setup_spark.py
index 19565e7..b585f71 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/setup_spark.py
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/setup_spark.py
@@ -27,7 +27,7 @@ from resource_management.core.exceptions import ComponentIsNotRunning
 from resource_management.core.logger import Logger
 from resource_management.core import shell
 from resource_management.libraries.functions.version import compare_versions
-from resource_management.libraries.functions.version import format_hdp_stack_version
+from resource_management.libraries.functions.version import format_stack_version
 
 def setup_spark(env, type, upgrade_type = None, action = None):
   import params
@@ -99,9 +99,9 @@ def setup_spark(env, type, upgrade_type = None, action = None):
       key_value_delimiter = " ",
     )
 
-  effective_version = params.version if upgrade_type is not None else params.hdp_stack_version
+  effective_version = params.version if upgrade_type is not None else params.stack_version_formatted
   if effective_version:
-    effective_version = format_hdp_stack_version(effective_version)
+    effective_version = format_stack_version(effective_version)
 
   if params.spark_thrift_fairscheduler_content and effective_version and compare_versions(effective_version, '2.4.0.0') >= 0:
     # create spark-thrift-fairscheduler.xml

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/spark_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/spark_client.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/spark_client.py
index fe8cfc4..0d22908 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/spark_client.py
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/spark_client.py
@@ -21,8 +21,8 @@ limitations under the License.
 import sys
 from resource_management import *
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
 from resource_management.core.exceptions import ComponentIsNotRunning
 from resource_management.core.logger import Logger
 from resource_management.core import shell
@@ -50,10 +50,10 @@ class SparkClient(Script):
     import params
 
     env.set_params(params)
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
       Logger.info("Executing Spark Client Stack Upgrade pre-restart")
       conf_select.select(params.stack_name, "spark", params.version)
-      hdp_select.select("spark-client", params.version)
+      stack_select.select("spark-client", params.version)
 
 if __name__ == "__main__":
   SparkClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/spark_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/spark_service.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/spark_service.py
index a428209..32103ae 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/spark_service.py
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/spark_service.py
@@ -25,16 +25,16 @@ from resource_management.libraries.functions.version import compare_versions
 from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
 from resource_management.libraries.functions import format
 from resource_management.core.resources.system import File, Execute
-from resource_management.libraries.functions.version import format_hdp_stack_version
+from resource_management.libraries.functions.version import format_stack_version
 
 def spark_service(name, upgrade_type=None, action=None):
   import params
 
   if action == 'start':
 
-    effective_version = params.version if upgrade_type is not None else params.hdp_stack_version
+    effective_version = params.version if upgrade_type is not None else params.stack_version_formatted
     if effective_version:
-      effective_version = format_hdp_stack_version(effective_version)
+      effective_version = format_stack_version(effective_version)
 
     if effective_version and compare_versions(effective_version, '2.4.0.0') >= 0:
       # copy spark-hdp-assembly.jar to hdfs
@@ -56,7 +56,7 @@ def spark_service(name, upgrade_type=None, action=None):
 
     # Spark 1.3.1.2.3, and higher, which was included in HDP 2.3, does not have a dependency on Tez, so it does not
     # need to copy the tarball, otherwise, copy it.
-    if params.hdp_stack_version and compare_versions(params.hdp_stack_version, '2.3.0.0') < 0:
+    if params.stack_version_formatted and compare_versions(params.stack_version_formatted, '2.3.0.0') < 0:
       resource_created = copy_to_hdfs("tez", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
       if resource_created:
         params.HdfsResource(None, action="execute")


[35/51] [abbrv] ambari git commit: AMBARI-15223: Add ability to display messages on Upgrade UI in paragraphs (dili)

Posted by jl...@apache.org.
AMBARI-15223: Add ability to display messages on Upgrade UI in paragraphs (dili)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/20de17a8
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/20de17a8
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/20de17a8

Branch: refs/heads/AMBARI-13364
Commit: 20de17a827f47ed9bf0d6b9b476032bb16c78eb1
Parents: 087fcff
Author: Di Li <di...@apache.org>
Authored: Wed Mar 9 11:45:29 2016 -0500
Committer: Di Li <di...@apache.org>
Committed: Wed Mar 9 11:45:29 2016 -0500

----------------------------------------------------------------------
 .../internal/UpgradeResourceProvider.java       | 14 ++++++--
 .../ambari/server/state/UpgradeHelper.java      |  9 ++++--
 .../state/stack/upgrade/ColocatedGrouping.java  | 34 ++++++++++++--------
 .../server/state/stack/upgrade/ManualTask.java  |  7 +++-
 .../UpgradeResourceProviderHDP22Test.java       |  2 +-
 .../internal/UpgradeResourceProviderTest.java   | 25 +++++++++++---
 .../ambari/server/state/UpgradeHelperTest.java  | 16 +++++----
 .../main/admin/stack_and_upgrade_controller.js  | 15 +++++++++
 .../stack_upgrade/stack_upgrade_wizard.hbs      |  5 +--
 9 files changed, 93 insertions(+), 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/20de17a8/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
index 3a335ae..07061e1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
@@ -115,6 +115,8 @@ import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.Lists;
 import com.google.gson.Gson;
+import com.google.gson.JsonArray;
+import com.google.gson.JsonObject;
 import com.google.inject.Inject;
 import com.google.inject.Provider;
 import com.google.inject.assistedinject.Assisted;
@@ -1468,20 +1470,28 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
 
     String itemDetail = entity.getText();
     String stageText = StringUtils.abbreviate(entity.getText(), 255);
-
     switch (task.getType()) {
       case MANUAL: {
         ManualTask mt = (ManualTask) task;
-        itemDetail = mt.message;
+        JsonArray messageArray = new JsonArray();
+        for(String message: mt.messages){
+          JsonObject messageObj = new JsonObject();
+          messageObj.addProperty("message", message);
+          messageArray.add(messageObj);
+        }
+        itemDetail = messageArray.toString();
         if (null != mt.summary) {
           stageText = mt.summary;
         }
+
         entity.setText(itemDetail);
 
         if (null != mt.structuredOut) {
           commandParams.put(COMMAND_PARAM_STRUCT_OUT, mt.structuredOut);
         }
 
+        //To be used later on by the Stage...
+        itemDetail = StringUtils.join(mt.messages, " ");
         break;
       }
       case CONFIGURE: {

http://git-wip-us.apache.org/repos/asf/ambari/blob/20de17a8/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
index 05d49c4..2ac4d25 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
@@ -508,9 +508,12 @@ public class UpgradeHelper {
 
           if (task.getType() == Type.MANUAL) {
             ManualTask mt = (ManualTask) task;
-            if (null != mt.message) {
-              mt.message = tokenReplace(ctx, mt.message,
-                  taskWrapper.getService(), taskWrapper.getComponent());
+            if(null != mt.messages && !mt.messages.isEmpty()){
+              for(int i = 0; i < mt.messages.size(); i++){
+                String message =  mt.messages.get(i);
+                message = tokenReplace(ctx, message, taskWrapper.getService(), taskWrapper.getComponent());
+                mt.messages.set(i, message);
+              }
             }
           }
         }

http://git-wip-us.apache.org/repos/asf/ambari/blob/20de17a8/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ColocatedGrouping.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ColocatedGrouping.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ColocatedGrouping.java
index 3705c43..39ccb95 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ColocatedGrouping.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ColocatedGrouping.java
@@ -164,7 +164,9 @@ public class ColocatedGrouping extends Grouping {
 
         ManualTask task = new ManualTask();
         task.summary = m_batch.summary;
-        task.message = m_batch.message;
+        List<String> messages =  new ArrayList<String>();
+        messages.add(m_batch.message);
+        task.messages = messages;
         formatFirstBatch(upgradeContext, task, befores);
 
         StageWrapper wrapper = new StageWrapper(
@@ -264,21 +266,27 @@ public class ColocatedGrouping extends Grouping {
         }
       }
 
-      // !!! add the display names to the message, if needed
-      if (task.message.contains("{{components}}")) {
-        StringBuilder sb = new StringBuilder();
+      for(int i = 0; i < task.messages.size(); i++){
+        String message = task.messages.get(i);
+        // !!! add the display names to the message, if needed
+        if (message.contains("{{components}}")) {
+          StringBuilder sb = new StringBuilder();
 
-        List<String> compNames = new ArrayList<String>(names);
+          List<String> compNames = new ArrayList<String>(names);
 
-        if (compNames.size() == 1) {
-          sb.append(compNames.get(0));
-        } else if (names.size() > 1) {
-          String last = compNames.remove(compNames.size() - 1);
-          sb.append(StringUtils.join(compNames, ", "));
-          sb.append(" and ").append(last);
-        }
+          if (compNames.size() == 1) {
+            sb.append(compNames.get(0));
+          } else if (names.size() > 1) {
+            String last = compNames.remove(compNames.size() - 1);
+            sb.append(StringUtils.join(compNames, ", "));
+            sb.append(" and ").append(last);
+          }
 
-        task.message = task.message.replace("{{components}}", sb.toString());
+          message = message.replace("{{components}}", sb.toString());
+
+          //Add the updated message back to the message list.
+          task.messages.set(i, message);
+        }
       }
 
       // !!! build the structured out to attach to the manual task

http://git-wip-us.apache.org/repos/asf/ambari/blob/20de17a8/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ManualTask.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ManualTask.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ManualTask.java
index a0a347a..e56a602 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ManualTask.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ManualTask.java
@@ -17,6 +17,8 @@
  */
 package org.apache.ambari.server.state.stack.upgrade;
 
+import java.util.List;
+
 import javax.xml.bind.annotation.XmlAccessType;
 import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlElement;
@@ -44,8 +46,11 @@ public class ManualTask extends ServerSideActionTask {
   @XmlTransient
   public String structuredOut = null;
 
+  /*@XmlElement(name="message")
+  public String message;*/
+
   @XmlElement(name="message")
-  public String message;
+  public List<String> messages;
 
   @Override
   public Task.Type getType() {

http://git-wip-us.apache.org/repos/asf/ambari/blob/20de17a8/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderHDP22Test.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderHDP22Test.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderHDP22Test.java
index 654fbd9..a9c4b17 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderHDP22Test.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderHDP22Test.java
@@ -307,7 +307,7 @@ public class UpgradeResourceProviderHDP22Test {
     group = upgrade.getUpgradeGroups().get(0);
     assertEquals(2, group.getItems().size());
     UpgradeItemEntity item = group.getItems().get(1);
-    assertEquals("Value is set for the source stack upgrade pack", "Goo", item.getText());
+    assertEquals("Value is set for the source stack upgrade pack", "[{\"message\":\"Goo\"}]", item.getText());
 
     assertTrue(cluster.getDesiredConfigs().containsKey("hive-site"));
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/20de17a8/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
index 5866453..17c52d2 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
@@ -96,6 +96,9 @@ import org.junit.Ignore;
 import org.junit.Test;
 
 import com.google.gson.Gson;
+import com.google.gson.JsonArray;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParser;
 import com.google.inject.Binder;
 import com.google.inject.Guice;
 import com.google.inject.Injector;
@@ -281,8 +284,8 @@ public class UpgradeResourceProviderTest {
 
     List<UpgradeItemEntity> preClusterUpgradeItems = preClusterGroup.getItems();
     assertEquals(2, preClusterUpgradeItems.size());
-    assertEquals("Foo", preClusterUpgradeItems.get(0).getText());
-    assertEquals("Foo", preClusterUpgradeItems.get(1).getText());
+    assertEquals("Foo", parseSingleMessage(preClusterUpgradeItems.get(0).getText()));
+    assertEquals("Foo", parseSingleMessage(preClusterUpgradeItems.get(1).getText()));
 
     UpgradeGroupEntity zookeeperGroup = upgradeGroups.get(1);
     assertEquals("ZOOKEEPER", zookeeperGroup.getName());
@@ -291,7 +294,7 @@ public class UpgradeResourceProviderTest {
     assertEquals(5, zookeeperUpgradeItems.size());
 
     assertEquals("This is a manual task with a placeholder of placeholder-rendered-properly",
-        zookeeperUpgradeItems.get(0).getText());
+        parseSingleMessage(zookeeperUpgradeItems.get(0).getText()));
     assertEquals("Restarting ZooKeeper Server on h1", zookeeperUpgradeItems.get(1).getText());
     assertEquals("Skipping Configuration Task 2.2.0", zookeeperUpgradeItems.get(2).getText());
     assertEquals("Service Check ZooKeeper", zookeeperUpgradeItems.get(3).getText());
@@ -306,7 +309,7 @@ public class UpgradeResourceProviderTest {
 
     List<UpgradeItemEntity> postClusterUpgradeItems = postClusterGroup.getItems();
     assertEquals(2, postClusterUpgradeItems.size());
-    assertEquals("Please confirm you are ready to finalize", postClusterUpgradeItems.get(0).getText());
+    assertEquals("Please confirm you are ready to finalize", parseSingleMessage(postClusterUpgradeItems.get(0).getText()));
     assertEquals("Save Cluster State", postClusterUpgradeItems.get(1).getText());
   }
 
@@ -492,7 +495,12 @@ public class UpgradeResourceProviderTest {
     res = resources.iterator().next();
 
     assertEquals("Confirm Finalize", res.getPropertyValue("UpgradeItem/context"));
-    assertTrue(res.getPropertyValue("UpgradeItem/text").toString().startsWith("Please confirm"));
+    String msgStr = res.getPropertyValue("UpgradeItem/text").toString();
+    JsonParser parser = new JsonParser();
+    JsonArray msgArray = (JsonArray) parser.parse(msgStr);
+    JsonObject msg = (JsonObject) msgArray.get(0);
+
+    assertTrue(msg.get("message").getAsString().startsWith("Please confirm"));
   }
 
   /**
@@ -1319,6 +1327,13 @@ public class UpgradeResourceProviderTest {
     }
   }
 
+  private String parseSingleMessage(String msgStr){
+    JsonParser parser = new JsonParser();
+    JsonArray msgArray = (JsonArray) parser.parse(msgStr);
+    JsonObject msg = (JsonObject) msgArray.get(0);
+
+    return msg.get("message").getAsString();
+  }
 
   /**
    *

http://git-wip-us.apache.org/repos/asf/ambari/blob/20de17a8/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
index e443d9d..bd3453e 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/UpgradeHelperTest.java
@@ -222,7 +222,7 @@ public class UpgradeHelperTest {
     Task t = sw.getTasks().get(0).getTasks().get(0);
     assertEquals(ManualTask.class, t.getClass());
     ManualTask mt = (ManualTask) t;
-    assertTrue(mt.message.contains("DataNode and NodeManager"));
+    assertTrue(mt.messages.get(0).contains("DataNode and NodeManager"));
     assertNotNull(mt.structuredOut);
     assertTrue(mt.structuredOut.contains("DATANODE"));
     assertTrue(mt.structuredOut.contains("NODEMANAGER"));
@@ -611,9 +611,9 @@ public class UpgradeHelperTest {
     ManualTask manualTask = (ManualTask) zookeeperGroup.items.get(0).getTasks().get(
         0).getTasks().get(0);
 
-    assertEquals(
-        "This is a manual task with a placeholder of placeholder-rendered-properly",
-        manualTask.message);
+    assertEquals(1, manualTask.messages.size());
+    assertEquals("This is a manual task with a placeholder of placeholder-rendered-properly",
+        manualTask.messages.get(0));
   }
 
   @Test
@@ -1044,9 +1044,10 @@ public class UpgradeHelperTest {
     ManualTask manualTask = (ManualTask) zookeeperGroup.items.get(0).getTasks().get(
         0).getTasks().get(0);
 
+    assertEquals(1, manualTask.messages.size());
     assertEquals(
         "This is a manual task with a placeholder of placeholder-rendered-properly",
-        manualTask.message);
+        manualTask.messages.get(0));
   }
 
   @Test
@@ -1072,9 +1073,10 @@ public class UpgradeHelperTest {
     ManualTask manualTask = (ManualTask) zookeeperGroup.items.get(0).getTasks().get(
         0).getTasks().get(0);
 
+    assertEquals(1, manualTask.messages.size());
     assertEquals(
         "This is a manual task with a placeholder of placeholder-rendered-properly",
-        manualTask.message);
+        manualTask.messages.get(0));
   }
 
   @Test
@@ -1128,7 +1130,7 @@ public class UpgradeHelperTest {
     Task t = sw.getTasks().get(0).getTasks().get(0);
     assertEquals(ManualTask.class, t.getClass());
     ManualTask mt = (ManualTask) t;
-    assertTrue(mt.message.contains("DataNode and NodeManager"));
+    assertTrue(mt.messages.get(0).contains("DataNode and NodeManager"));
     assertNotNull(mt.structuredOut);
     assertTrue(mt.structuredOut.contains("DATANODE"));
     assertTrue(mt.structuredOut.contains("NODEMANAGER"));

http://git-wip-us.apache.org/repos/asf/ambari/blob/20de17a8/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js b/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
index 847544f..2dceccc 100644
--- a/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
+++ b/ambari-web/app/controllers/main/admin/stack_and_upgrade_controller.js
@@ -413,6 +413,21 @@ App.MainAdminStackAndUpgradeController = Em.Controller.extend(App.LocalStorage,
         upgradeItems = [];
       newGroup.upgrade_items.forEach(function (item) {
         var oldItem = App.upgradeEntity.create({type: 'ITEM'}, item.UpgradeItem);
+        var status = oldItem.get('status');
+        if ('HOLDING' == status){
+          //manualItem
+          var text = oldItem.get('text');
+          try {
+            var messageArray = $.parseJSON(text)
+            var messages = [];
+            for(var i = 0; i < messageArray.length; i ++){
+              var aMessageObj = messageArray[i];
+              messages.push(aMessageObj.message);
+            }
+            oldItem.set('messages', messages)
+            oldItem.set('text', messages.join(' '))
+          } catch (err){}
+        }
         oldItem.set('tasks', []);
         upgradeItems.pushObject(oldItem);
       });

http://git-wip-us.apache.org/repos/asf/ambari/blob/20de17a8/ambari-web/app/templates/main/admin/stack_upgrade/stack_upgrade_wizard.hbs
----------------------------------------------------------------------
diff --git a/ambari-web/app/templates/main/admin/stack_upgrade/stack_upgrade_wizard.hbs b/ambari-web/app/templates/main/admin/stack_upgrade/stack_upgrade_wizard.hbs
index 02022cf..f1189a3 100644
--- a/ambari-web/app/templates/main/admin/stack_upgrade/stack_upgrade_wizard.hbs
+++ b/ambari-web/app/templates/main/admin/stack_upgrade/stack_upgrade_wizard.hbs
@@ -97,8 +97,9 @@
         {{#if view.plainManualItem}}
           <div class="box details-box">
             <p class="manual-steps-title"><strong>{{t admin.stackUpgrade.dialog.manual}}</strong></p>
-            <p class="manual-steps-content">{{view.manualItem.text}}</p>
-
+            {{#each message in view.manualItem.messages}}
+              <p class="manual-steps-content">{{message}}</p>
+            {{/each}}
             <label class="message">
               {{view Em.Checkbox checkedBinding="view.isManualDone"}}
               {{t admin.stackUpgrade.dialog.manualDone}}


[44/51] [abbrv] ambari git commit: AMBARI-15340. Register Version: multiple UI tweaks .(xiwang)

Posted by jl...@apache.org.
AMBARI-15340. Register Version: multiple UI tweaks .(xiwang)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e5d261f8
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e5d261f8
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e5d261f8

Branch: refs/heads/AMBARI-13364
Commit: e5d261f84c8f10d3276ece640659c7b53a6684d1
Parents: 6d3e291
Author: Xi Wang <xi...@apache.org>
Authored: Tue Mar 8 14:16:52 2016 -0800
Committer: Xi Wang <xi...@apache.org>
Committed: Wed Mar 9 14:53:26 2016 -0800

----------------------------------------------------------------------
 .../stackVersions/StackVersionsCreateCtrl.js            | 10 ++++++++--
 .../src/main/resources/ui/admin-web/app/styles/main.css | 11 +++++++++--
 .../app/views/stackVersions/stackVersionPage.html       | 12 ++++++------
 3 files changed, 23 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/e5d261f8/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js
index 038b197..46e4a11 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/stackVersions/StackVersionsCreateCtrl.js
@@ -39,7 +39,7 @@ angular.module('ambariAdminConsole')
   $scope.option1 = {
     index: 1,
     displayName: $t('versions.uploadFile'),
-    file: null,
+    file: '',
     hasError: false
   };
   $scope.option2 = {
@@ -64,7 +64,11 @@ angular.module('ambariAdminConsole')
     $scope.option2.hasError = false;
   };
   $scope.readInfoButtonDisabled = function () {
-    return $scope.option1.index == $scope.selectedOption.index ? !$scope.option1.file : !$scope.option2.url;
+    return $scope.option1.index == $scope.selectedOption.index ? false : !$scope.option2.url;
+  };
+
+  $scope.allInfoCategoriesBlank = function () {
+    return !$scope.upgradeStack.stack_name;
   };
 
   $scope.onFileSelect = function(e){
@@ -77,6 +81,8 @@ angular.module('ambariAdminConsole')
         };
       })(file);
       reader.readAsText(file);
+    } else {
+      $scope.option1.file = '';
     }
   };
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e5d261f8/ambari-admin/src/main/resources/ui/admin-web/app/styles/main.css
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/styles/main.css b/ambari-admin/src/main/resources/ui/admin-web/app/styles/main.css
index e4db617..9348564 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/styles/main.css
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/styles/main.css
@@ -1433,9 +1433,13 @@ thead.view-permission-header > tr > th {
 .left-menu-all-repos .repos-table .repos-td.active > a {
   color: white;
 }
-
+#upload-definition-file-panel {
+  background-color: #f5f5f5;
+  padding: 20px 10px 5px 10px;
+  margin-bottom: 15px;
+}
 .register-version-options .read-info-button {
-  margin-bottom: 10px;
+  margin-top: 10px;
 }
 
 .register-version-options .option-radio-button {
@@ -1471,6 +1475,9 @@ thead.view-permission-header > tr > th {
   text-align: center;
   cursor: pointer;
 }
+.register-version-form .repos-panel .repo-name-label {
+  text-align: left;
+}
 
 .register-version-form .repos-panel .os-type-label {
   margin-top: 27px;;

http://git-wip-us.apache.org/repos/asf/ambari/blob/e5d261f8/ambari-admin/src/main/resources/ui/admin-web/app/views/stackVersions/stackVersionPage.html
----------------------------------------------------------------------
diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/views/stackVersions/stackVersionPage.html b/ambari-admin/src/main/resources/ui/admin-web/app/views/stackVersions/stackVersionPage.html
index c37006b..839b47d 100644
--- a/ambari-admin/src/main/resources/ui/admin-web/app/views/stackVersions/stackVersionPage.html
+++ b/ambari-admin/src/main/resources/ui/admin-web/app/views/stackVersions/stackVersionPage.html
@@ -40,10 +40,10 @@
       </label>
     </div>
     <div class="col-sm-7">
-      <input type="file" class="choose-file-input" onchange="angular.element(this).scope().onFileSelect(this)"/>
+      <input type="file" class="choose-file-input" ng-model="option1.file" onchange="angular.element(this).scope().onFileSelect(this)"/>
     </div>
   </div>
-  <div class="clearfix register-version-options border-bottom bottom-margin">
+  <div class="clearfix register-version-options bottom-margin">
     <div class="col-sm-5 option-radio-button">
       <label class="option-label">
         <input type="radio" ng-model="selectedOption.index" value="2" ng-change="toggleOptionSelect()"> {{'versions.enterURL' | translate}}
@@ -55,7 +55,7 @@
       </div>
     </div>
     <div class="col-sm-12 read-info-button">
-      <button class="btn btn-primary pull-right" ng-click="readVersionInfo()"
+      <button class="btn btn-primary pull-right" ng-model="button" ng-click="readVersionInfo()"
             ng-disabled="readInfoButtonDisabled()">{{'versions.readInfo' | translate}}</button>
     </div>
   </div>
@@ -84,7 +84,7 @@
   </div>
 </accordion>
 
-<form ng-class="{'col-sm-10': editController}" class="form-horizontal register-version-form" role="form" name="repoRegForm" novalidate>
+<form ng-class="{'col-sm-10': editController, 'visible': !allInfoCategoriesBlank()}" class="form-horizontal register-version-form hide-soft" role="form" name="repoRegForm" novalidate>
   <div class="panel panel-default details-panel">
     <div class="panel-heading">
       <h3 class="panel-title">{{'common.details' | translate}}</h3>
@@ -129,7 +129,7 @@
     <div class="panel-heading">
       <h3 class="panel-title">{{'versions.repos' | translate}}</h3>
     </div>
-    <div class="panel-body ">
+    <div class="panel-body">
       <div class="alert alert-info" role="alert">{{'versions.alerts.baseURLs' | translate}}</div>
       <div class="alert alert-warning hide-soft" ng-class="{'visible' : hasValidationErrors()}" role="alert">{{'versions.alerts.validationFailed' | translate}}</div>
       <div class="border-bottom bottom-margin clearfix">
@@ -147,7 +147,7 @@
             </div>
             <div class="col-sm-9">
               <div class="form-group {{repository.Repositories.repo_name}}" ng-class="{'has-error': repository.hasError }" ng-repeat="repository in os.repositories">
-                <div class="col-sm-3"><label class="control-label">{{repository.Repositories.repo_name}}</label></div>
+                <label class="repo-name-label control-label col-sm-3">{{repository.Repositories.repo_name}}</label>
                 <div class="col-sm-9"><input type="text" class="form-control" ng-model="repository.Repositories.base_url"
                                          ng-change="clearError()" ng-disabled="!os.selected"></div>
               </div>


[05/51] [abbrv] ambari git commit: AMBARI-15308 UI: ability to perform bulk add host components (Joe Wang via rzang)

Posted by jl...@apache.org.
AMBARI-15308 UI: ability to perform bulk add host components (Joe Wang via rzang)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/bf186cfd
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/bf186cfd
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/bf186cfd

Branch: refs/heads/AMBARI-13364
Commit: bf186cfdc36264b7cdf3ab19a7fda32091c09591
Parents: 3dd4c4a
Author: Richard Zang <rz...@apache.org>
Authored: Mon Mar 7 14:29:19 2016 -0800
Committer: Richard Zang <rz...@apache.org>
Committed: Mon Mar 7 14:29:45 2016 -0800

----------------------------------------------------------------------
 .../main/host/bulk_operations_controller.js     | 22 ++++++++++++++------
 .../host/bulk_operations_controller_test.js     |  6 +++---
 2 files changed, 19 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/bf186cfd/ambari-web/app/controllers/main/host/bulk_operations_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/main/host/bulk_operations_controller.js b/ambari-web/app/controllers/main/host/bulk_operations_controller.js
index 659d335..6714231 100644
--- a/ambari-web/app/controllers/main/host/bulk_operations_controller.js
+++ b/ambari-web/app/controllers/main/host/bulk_operations_controller.js
@@ -39,7 +39,7 @@ App.BulkOperationsController = Em.Controller.extend({
         this.bulkOperationForHostComponentsRestart(operationData, hosts);
       }
       else if (operationData.action === 'ADD') {
-        this.bulkOperationForHostComponentsAddConfirm(operationData, hosts);
+        this.bulkOperationForHostComponentsAdd(operationData, hosts);
       }
       else {
         if (operationData.action.indexOf('DECOMMISSION') == -1) {
@@ -310,16 +310,26 @@ App.BulkOperationsController = Em.Controller.extend({
   },
 
   /**
-   * Confirm bulk add for selected hostComponent
+   * bulk add for selected hostComponent
    * @param {Object} operationData - data about bulk operation (action, hostComponent etc)
    * @param {Array} hosts - list of affected hosts
    */
-  bulkOperationForHostComponentsAddConfirm: function (operationData, hosts) {
+  bulkOperationForHostComponentsAdd: function (operationData, hosts) {
+    var self = this;
+    return batchUtils.getComponentsFromServer({
+      components: [operationData.componentName],
+      hosts: hosts.mapProperty('hostName')
+    }, function (data) {
+      return self._getComponentsFromServerForHostComponentsAddCallback(operationData, data, hosts);
+    });
+  },
+
+  _getComponentsFromServerForHostComponentsAddCallback: function (operationData, data, hosts) {
     var self = this;
 
     hosts = hosts.mapProperty('hostName');
 
-    var allHostsWithComponent = App.HostComponent.find().filterProperty('componentName', operationData.componentName).mapProperty('hostName');
+    var allHostsWithComponent = data.items.mapProperty('Hosts.host_name');
     var hostsWithComponent = hosts.filter(function (host) {
       return allHostsWithComponent.contains(host);
     });
@@ -346,7 +356,7 @@ App.BulkOperationsController = Em.Controller.extend({
         },
 
         onPrimary: function() {
-          self.bulkOperationForHostComponentsAdd(operationData, hostsWithOutComponent);
+          self.bulkAddHostComponents(operationData, hostsWithOutComponent);
           this._super();
         },
         bodyClass: Em.View.extend({
@@ -387,7 +397,7 @@ App.BulkOperationsController = Em.Controller.extend({
    * @param {Object} operationData - data about bulk operation (action, hostComponent etc)
    * @param {Array} hostNames - list of affected hosts' names
    */
-  bulkOperationForHostComponentsAdd: function (operationData, hostNames) {
+  bulkAddHostComponents: function (operationData, hostNames) {
     var self= this;
     App.get('router.mainAdminKerberosController').getKDCSessionState(function () {
       App.ajax.send({

http://git-wip-us.apache.org/repos/asf/ambari/blob/bf186cfd/ambari-web/test/controllers/main/host/bulk_operations_controller_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/main/host/bulk_operations_controller_test.js b/ambari-web/test/controllers/main/host/bulk_operations_controller_test.js
index 178a6fd..7a8b270 100644
--- a/ambari-web/test/controllers/main/host/bulk_operations_controller_test.js
+++ b/ambari-web/test/controllers/main/host/bulk_operations_controller_test.js
@@ -32,7 +32,7 @@ describe('BulkOperationsController', function () {
       sinon.stub(hostController, 'bulkOperationForHostComponentsRestart', Em.K);
       sinon.stub(hostController, 'bulkOperationForHostComponentsDecommission', Em.K);
       sinon.stub(hostController, 'bulkOperationForHostComponents', Em.K);
-      sinon.stub(hostController, 'bulkOperationForHostComponentsAddConfirm', Em.K);
+      sinon.stub(hostController, 'bulkOperationForHostComponentsAdd', Em.K);
       sinon.stub(hostController, 'bulkOperationForHostsPassiveState', Em.K);
     });
 
@@ -43,7 +43,7 @@ describe('BulkOperationsController', function () {
       hostController.bulkOperationForHostComponentsRestart.restore();
       hostController.bulkOperationForHostComponentsDecommission.restore();
       hostController.bulkOperationForHostComponents.restore();
-      hostController.bulkOperationForHostComponentsAddConfirm.restore();
+      hostController.bulkOperationForHostComponentsAdd.restore();
       hostController.bulkOperationForHostsPassiveState.restore();
 
     });
@@ -121,7 +121,7 @@ describe('BulkOperationsController', function () {
         componentNameFormatted: 'DataNodes'
       };
       hostController.bulkOperation(operationData, []);
-      expect(hostController.bulkOperationForHostComponentsAddConfirm.calledOnce).to.equal(true);
+      expect(hostController.bulkOperationForHostComponentsAdd.calledOnce).to.equal(true);
     });
 
     it('DECOMMISSION for hostComponents', function() {


[03/51] [abbrv] ambari git commit: AMBARI-15323 : Fix AbstractTimelineAggregatorTest intermittent unit test failure (avijayan)

Posted by jl...@apache.org.
AMBARI-15323 : Fix AbstractTimelineAggregatorTest intermittent unit test failure (avijayan)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/03eb1c51
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/03eb1c51
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/03eb1c51

Branch: refs/heads/AMBARI-13364
Commit: 03eb1c518b8d0fb82871aa83588e425f6f176c10
Parents: 037d933
Author: Aravindan Vijayan <av...@hortonworks.com>
Authored: Mon Mar 7 13:25:10 2016 -0800
Committer: Aravindan Vijayan <av...@hortonworks.com>
Committed: Mon Mar 7 13:25:10 2016 -0800

----------------------------------------------------------------------
 .../AbstractTimelineAggregatorTest.java         | 22 +++++++++++++-------
 1 file changed, 15 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/03eb1c51/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/AbstractTimelineAggregatorTest.java
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/AbstractTimelineAggregatorTest.java b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/AbstractTimelineAggregatorTest.java
index 8f7320b..21b9839 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/AbstractTimelineAggregatorTest.java
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/metrics/timeline/aggregators/AbstractTimelineAggregatorTest.java
@@ -43,7 +43,7 @@ public class AbstractTimelineAggregatorTest {
 
   @Before
   public void setUp() throws Exception {
-    sleepIntervalMillis = 2*2*30000l; //2 minutes
+    sleepIntervalMillis = 5*60*1000l; //5 minutes
     checkpointCutOffMultiplier = 2;
 
     Configuration metricsConf = new Configuration();
@@ -112,10 +112,12 @@ public class AbstractTimelineAggregatorTest {
   public void testDoWorkOnZeroDelay() throws Exception {
 
     long currentTime = System.currentTimeMillis();
-    long roundedOffAggregatorTime = AbstractTimelineAggregator.getRoundedAggregateTimeMillis(currentTime);
-
+    long roundedOffAggregatorTime = AbstractTimelineAggregator.getRoundedCheckPointTimeMillis(currentTime,
+      sleepIntervalMillis);
+    
     //Test first run of aggregator with no checkpoint
-    agg.setLastAggregatedEndTime(roundedOffAggregatorTime);
+    checkPoint.set(-1);
+    agg.setLastAggregatedEndTime(-1l);
     agg.runOnce(sleepIntervalMillis);
     assertEquals("startTime should be zero", 0, startTimeInDoWork.get());
     assertEquals("endTime  should be zero", 0, endTimeInDoWork.get());
@@ -123,7 +125,9 @@ public class AbstractTimelineAggregatorTest {
     assertEquals("Do not aggregate on first run", 0, actualRuns);
 
     //Test first run with Too Old checkpoint
-    checkPoint.set(currentTime - 5*60*1000); //Old checkpoint
+    currentTime = System.currentTimeMillis();
+    checkPoint.set(currentTime - 16*60*1000); //Old checkpoint
+    agg.setLastAggregatedEndTime(-1l);
     agg.runOnce(sleepIntervalMillis);
     assertEquals("startTime should be zero", 0, startTimeInDoWork.get());
     assertEquals("endTime  should be zero", 0, endTimeInDoWork.get());
@@ -132,7 +136,7 @@ public class AbstractTimelineAggregatorTest {
 
     //Test first run with too "recent" checkpoint
     currentTime = System.currentTimeMillis();
-    checkPoint.set(currentTime - 30000);
+    checkPoint.set(currentTime);
     agg.setLastAggregatedEndTime(-1l);
     agg.setSleepIntervalMillis(sleepIntervalMillis);
     agg.runOnce(sleepIntervalMillis);
@@ -142,6 +146,9 @@ public class AbstractTimelineAggregatorTest {
     assertEquals("Do not aggregate on first run", 0, actualRuns);
 
     //Test first run with perfect checkpoint (sleepIntervalMillis back)
+    currentTime = System.currentTimeMillis();
+    roundedOffAggregatorTime = AbstractTimelineAggregator.getRoundedCheckPointTimeMillis(currentTime,
+      sleepIntervalMillis);
     long checkPointTime = roundedOffAggregatorTime - sleepIntervalMillis;
     long expectedCheckPoint = AbstractTimelineAggregator.getRoundedCheckPointTimeMillis(checkPointTime, sleepIntervalMillis);
     checkPoint.set(checkPointTime);
@@ -166,7 +173,8 @@ public class AbstractTimelineAggregatorTest {
       expectedCheckPoint + sleepIntervalMillis, endTimeInDoWork.get());
     assertEquals(expectedCheckPoint + sleepIntervalMillis,
       checkPoint.get());
-    assertEquals("Do not aggregate on first run", 2, actualRuns);
+    assertEquals("Aggregate on second run", 2, actualRuns);
+
 
  }
 }
\ No newline at end of file


[09/51] [abbrv] ambari git commit: AMBARI-15314. Introduce possibility to retry stack installation in case of network instability (Dmytro Grinenko vi alejandro)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py
index f696543..274f29f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py
@@ -29,4 +29,6 @@ def install_packages():
   packages = ['unzip', 'curl']
   if params.hdp_stack_version != "" and compare_versions(params.hdp_stack_version, '2.2') >= 0:
     packages.append('hdp-select')
-  Package(packages)
+  Package(packages,
+          retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
+          retry_count=params.agent_stack_retry_count)

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
index 6069ba6..59ae56e 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
@@ -2030,7 +2030,7 @@ public class AmbariManagementControllerImplTest {
 
     Map<String, String> defaultHostParams = helper.createDefaultHostParams(cluster);
 
-    assertEquals(defaultHostParams.size(), 13);
+    assertEquals(defaultHostParams.size(), 15);
     assertEquals(defaultHostParams.get(DB_DRIVER_FILENAME), MYSQL_JAR);
     assertEquals(defaultHostParams.get(STACK_NAME), SOME_STACK_NAME);
     assertEquals(defaultHostParams.get(STACK_VERSION), SOME_STACK_VERSION);

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
index 69ed9d7..dea83a1 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProviderTest.java
@@ -187,6 +187,7 @@ public class ClusterStackVersionResourceProviderTest {
     AmbariManagementController managementController = createMock(AmbariManagementController.class);
     Clusters clusters = createNiceMock(Clusters.class);
     Cluster cluster = createNiceMock(Cluster.class);
+    Map<String, String> hostLevelParams = new HashMap<>();
     StackId stackId = new StackId("HDP", "2.0.1");
 
     RepositoryVersionEntity repoVersion = new RepositoryVersionEntity();
@@ -288,6 +289,8 @@ public class ClusterStackVersionResourceProviderTest {
     expect(stage.getExecutionCommandWrapper(anyObject(String.class), anyObject(String.class))).
             andReturn(executionCommandWrapper).anyTimes();
 
+    expect(executionCommand.getHostLevelParams()).andReturn(hostLevelParams).anyTimes();
+
     Map<Role, Float> successFactors = new HashMap<>();
     expect(stage.getSuccessFactors()).andReturn(successFactors).atLeastOnce();
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/custom_actions/TestInstallPackages.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/custom_actions/TestInstallPackages.py b/ambari-server/src/test/python/custom_actions/TestInstallPackages.py
index 4f9967f..478c052 100644
--- a/ambari-server/src/test/python/custom_actions/TestInstallPackages.py
+++ b/ambari-server/src/test/python/custom_actions/TestInstallPackages.py
@@ -110,14 +110,14 @@ class TestInstallPackages(RMFTestCase):
                               mirror_list=None,
                               append_to_file=True,
     )
-    self.assertResourceCalled('Package', 'hdp-select', action=["upgrade"])
-    self.assertResourceCalled('Package', 'hadoop_2_2_0_1_885', action=["upgrade"])
-    self.assertResourceCalled('Package', 'snappy', action=["upgrade"])
-    self.assertResourceCalled('Package', 'snappy-devel', action=["upgrade"])
-    self.assertResourceCalled('Package', 'lzo', action=["upgrade"])
-    self.assertResourceCalled('Package', 'hadooplzo_2_2_0_1_885', action=["upgrade"])
-    self.assertResourceCalled('Package', 'hadoop_2_2_0_1_885-libhdfs', action=["upgrade"])
-    self.assertResourceCalled('Package', 'ambari-log4j', action=["upgrade"])
+    self.assertResourceCalled('Package', 'hdp-select', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
+    self.assertResourceCalled('Package', 'hadoop_2_2_0_1_885', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
+    self.assertResourceCalled('Package', 'snappy', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
+    self.assertResourceCalled('Package', 'snappy-devel', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
+    self.assertResourceCalled('Package', 'lzo', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
+    self.assertResourceCalled('Package', 'hadooplzo_2_2_0_1_885', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
+    self.assertResourceCalled('Package', 'hadoop_2_2_0_1_885-libhdfs', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
+    self.assertResourceCalled('Package', 'ambari-log4j', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
     self.assertNoMoreResources()
 
   @patch("ambari_commons.os_check.OSCheck.is_suse_family")
@@ -170,14 +170,14 @@ class TestInstallPackages(RMFTestCase):
                               mirror_list=None,
                               append_to_file=True,
                               )
-    self.assertResourceCalled('Package', 'hdp-select', action=["upgrade"])
-    self.assertResourceCalled('Package', 'hadoop_2_2_0_1_885', action=["upgrade"])
-    self.assertResourceCalled('Package', 'snappy', action=["upgrade"])
-    self.assertResourceCalled('Package', 'snappy-devel', action=["upgrade"])
-    self.assertResourceCalled('Package', 'lzo', action=["upgrade"])
-    self.assertResourceCalled('Package', 'hadooplzo_2_2_0_1_885', action=["upgrade"])
-    self.assertResourceCalled('Package', 'hadoop_2_2_0_1_885-libhdfs', action=["upgrade"])
-    self.assertResourceCalled('Package', 'ambari-log4j', action=["upgrade"])
+    self.assertResourceCalled('Package', 'hdp-select', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
+    self.assertResourceCalled('Package', 'hadoop_2_2_0_1_885', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
+    self.assertResourceCalled('Package', 'snappy', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
+    self.assertResourceCalled('Package', 'snappy-devel', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
+    self.assertResourceCalled('Package', 'lzo', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
+    self.assertResourceCalled('Package', 'hadooplzo_2_2_0_1_885', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
+    self.assertResourceCalled('Package', 'hadoop_2_2_0_1_885-libhdfs', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
+    self.assertResourceCalled('Package', 'ambari-log4j', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
     self.assertNoMoreResources()
 
 
@@ -233,14 +233,14 @@ class TestInstallPackages(RMFTestCase):
                               mirror_list=None,
                               append_to_file=True,
     )
-    self.assertResourceCalled('Package', 'hdp-select', action=["upgrade"])
-    self.assertResourceCalled('Package', 'hadoop_2_2_0_1_885', action=["upgrade"])
-    self.assertResourceCalled('Package', 'snappy', action=["upgrade"])
-    self.assertResourceCalled('Package', 'snappy-devel', action=["upgrade"])
-    self.assertResourceCalled('Package', 'lzo', action=["upgrade"])
-    self.assertResourceCalled('Package', 'hadooplzo_2_2_0_1_885', action=["upgrade"])
-    self.assertResourceCalled('Package', 'hadoop_2_2_0_1_885-libhdfs', action=["upgrade"])
-    self.assertResourceCalled('Package', 'ambari-log4j', action=["upgrade"])
+    self.assertResourceCalled('Package', 'hdp-select', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
+    self.assertResourceCalled('Package', 'hadoop_2_2_0_1_885', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
+    self.assertResourceCalled('Package', 'snappy', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
+    self.assertResourceCalled('Package', 'snappy-devel', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
+    self.assertResourceCalled('Package', 'lzo', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
+    self.assertResourceCalled('Package', 'hadooplzo_2_2_0_1_885', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
+    self.assertResourceCalled('Package', 'hadoop_2_2_0_1_885-libhdfs', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
+    self.assertResourceCalled('Package', 'ambari-log4j', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
     self.assertNoMoreResources()
 
 
@@ -367,14 +367,14 @@ class TestInstallPackages(RMFTestCase):
                               mirror_list=None,
                               append_to_file=True,
                               )
-    self.assertResourceCalled('Package', 'hdp-select', action=["upgrade"])
-    self.assertResourceCalled('Package', 'hadoop_2_2_0_1_885', action=["upgrade"])
-    self.assertResourceCalled('Package', 'snappy', action=["upgrade"])
-    self.assertResourceCalled('Package', 'snappy-devel', action=["upgrade"])
-    self.assertResourceCalled('Package', 'lzo', action=["upgrade"])
-    self.assertResourceCalled('Package', 'hadooplzo_2_2_0_1_885', action=["upgrade"])
-    self.assertResourceCalled('Package', 'hadoop_2_2_0_1_885-libhdfs', action=["upgrade"])
-    self.assertResourceCalled('Package', 'ambari-log4j', action=["upgrade"])
+    self.assertResourceCalled('Package', 'hdp-select', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
+    self.assertResourceCalled('Package', 'hadoop_2_2_0_1_885', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
+    self.assertResourceCalled('Package', 'snappy', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
+    self.assertResourceCalled('Package', 'snappy-devel', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
+    self.assertResourceCalled('Package', 'lzo', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
+    self.assertResourceCalled('Package', 'hadooplzo_2_2_0_1_885', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
+    self.assertResourceCalled('Package', 'hadoop_2_2_0_1_885-libhdfs', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
+    self.assertResourceCalled('Package', 'ambari-log4j', action=["upgrade"], retry_count=5, retry_on_repo_unavailability=False)
     self.assertNoMoreResources()
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/custom_actions/configs/install_packages_config.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/custom_actions/configs/install_packages_config.json b/ambari-server/src/test/python/custom_actions/configs/install_packages_config.json
index 957a3d8..c6575b5 100644
--- a/ambari-server/src/test/python/custom_actions/configs/install_packages_config.json
+++ b/ambari-server/src/test/python/custom_actions/configs/install_packages_config.json
@@ -5,6 +5,8 @@
     "hostname": "0b3.vm", 
     "passiveInfo": [], 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://0b3.vm:8080/resources/", 
         "ambari_db_rca_password": "mapred", 
         "java_home": "/usr/jdk64/jdk1.7.0_67",

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
index d8cecec..719ae3e 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
@@ -38,7 +38,9 @@ class TestHBaseMaster(RMFTestCase):
                        try_install=True,
                        checked_call_mocks = [(0, "OK.", ""),(0, "OK.", "")],
     )
-    self.assertResourceCalled('Package', 'hbase_2_3_*',)
+    self.assertResourceCalled('Package', 'hbase_2_3_*',
+                              retry_count=5,
+                              retry_on_repo_unavailability=False)
 
     self.assertNoMoreResources()
   
@@ -57,8 +59,12 @@ class TestHBaseMaster(RMFTestCase):
                        try_install=True,
                        checked_call_mocks = [(0, "OK.", ""),(0, "OK.", "")],
     )
-    self.assertResourceCalled('Package', 'hbase_2_3_*',)
-    self.assertResourceCalled('Package', 'phoenix_2_3_*',)
+    self.assertResourceCalled('Package', 'hbase_2_3_*',
+                              retry_count=5,
+                              retry_on_repo_unavailability=False)
+    self.assertResourceCalled('Package', 'phoenix_2_3_*',
+                              retry_count=5,
+                              retry_on_repo_unavailability=False)
 
     self.assertNoMoreResources()
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
index 58ca7ef..9c846c8 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
@@ -547,7 +547,7 @@ class TestHbaseRegionServer(RMFTestCase):
                               owner='hbase',
                               content='log4jproperties\nline2')
 
-    self.assertResourceCalled('Package', 'phoenix_2_2_*')
+    self.assertResourceCalled('Package', 'phoenix_2_2_*', retry_count=5, retry_on_repo_unavailability=False)
 
     self.assertResourceCalled('Execute', '/usr/hdp/current/hbase-regionserver/bin/hbase-daemon.sh --config /usr/hdp/current/hbase-regionserver/conf start regionserver',
       not_if = 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hbase/hbase-hbase-regionserver.pid && ps -p `ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E cat /var/run/hbase/hbase-hbase-regionserver.pid` >/dev/null 2>&1',

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json b/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json
index 1a6c046..e45303d 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/altfs_plus_hdfs.json
@@ -3,6 +3,8 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
         "ambari_db_rca_password": "mapred", 
         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json b/ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json
index 180aa46..d3f7f15 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json
@@ -47,6 +47,8 @@
     "clusterName": "c1", 
     "hostname": "c6403.ambari.apache.org", 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://10.0.0.15:8080/resources/", 
         "ambari_db_rca_password": "mapred", 
         "java_home": "/usr/jdk64/jdk1.7.0_45",

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.0.6/configs/default.hbasedecom.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default.hbasedecom.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default.hbasedecom.json
index c2bb45c..92d2052 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default.hbasedecom.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default.hbasedecom.json
@@ -3,6 +3,8 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
         "ambari_db_rca_password": "mapred", 
         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
index 93504f7..590fb23 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default.json
@@ -3,6 +3,8 @@
     "clusterName": "c1",
     "hostname": "c6401.ambari.apache.org",
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "agentCacheDir": "/var/lib/ambari-agent/cache",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
         "ambari_db_rca_password": "mapred",
@@ -1141,9 +1143,6 @@
         ],
         "metrics_collector_hosts": [
             "c6401.ambari.apache.org"
-        ],
-        "metrics_grafana_hosts": [
-            "c6401.ambari.apache.org"
         ]
     }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json
index 6172a95..1918606 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json
@@ -3,6 +3,8 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
         "ambari_db_rca_password": "mapred", 
         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json
index 941a0ac..33270ce 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha.json
@@ -3,6 +3,8 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
         "ambari_db_rca_password": "mapred", 
         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json
index 0fd2de3..6f1aef1 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_nn_ha_2.json
@@ -3,6 +3,8 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
         "ambari_db_rca_password": "mapred", 
         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json
index cda8297..a0beca9 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_hive_non_hdfs.json
@@ -3,6 +3,8 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
         "ambari_db_rca_password": "mapred", 
         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json
index 0a4e2ab..f2ec21b 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_no_install.json
@@ -3,6 +3,8 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
         "ambari_db_rca_password": "mapred", 
         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.0.6/configs/default_oozie_mysql.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_oozie_mysql.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_oozie_mysql.json
index b13c997..cec5431 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_oozie_mysql.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_oozie_mysql.json
@@ -3,6 +3,8 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
         "ambari_db_rca_password": "mapred", 
         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json b/ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json
index bf2b638..3acc31b 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/default_update_exclude_file_only.json
@@ -3,6 +3,8 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
         "ambari_db_rca_password": "mapred", 
         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_active_node.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_active_node.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_active_node.json
index 7a3f4e2..adb97c9 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_active_node.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_active_node.json
@@ -4,6 +4,8 @@
     "hostname": "c6401.ambari.apache.org", 
     "passiveInfo": [], 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
         "ambari_db_rca_password": "mapred", 
         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node.json
index 9223a41..40e18d2 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node.json
@@ -4,6 +4,8 @@
     "hostname": "c6402.ambari.apache.org", 
     "passiveInfo": [], 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
         "ambari_db_rca_password": "mapred", 
         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start.json
index b053993..b9e3b44 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_bootstrap_standby_node_initial_start.json
@@ -4,6 +4,8 @@
     "hostname": "c6402.ambari.apache.org", 
     "passiveInfo": [], 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
         "ambari_db_rca_password": "mapred", 
         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
index 4d12a52..a0a2ee0 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
@@ -4,6 +4,8 @@
     "hostname": "c6401.ambari.apache.org", 
     "passiveInfo": [], 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
         "ambari_db_rca_password": "mapred", 
         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json
index 515bf2b..5799708 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json
@@ -4,6 +4,8 @@
     "hostname": "c6401.ambari.apache.org", 
     "passiveInfo": [], 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
         "ambari_db_rca_password": "mapred", 
         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json
index 5f60fd1..209651d 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-2.2.json
@@ -46,6 +46,8 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://10.0.0.15:8080/resources/", 
         "ambari_db_rca_password": "mapred", 
         "java_home": "/usr/jdk64/jdk1.7.0_45",

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-check-2.2.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-check-2.2.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-check-2.2.json
index 6881456..bb9a3a2 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-check-2.2.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-check-2.2.json
@@ -46,6 +46,8 @@
     "clusterName": "c1", 
     "hostname": "c6403.ambari.apache.org", 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://10.0.0.15:8080/resources/", 
         "ambari_db_rca_password": "mapred", 
         "java_home": "/usr/jdk64/jdk1.7.0_45",

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-preupgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-preupgrade.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-preupgrade.json
index dc2864d..adac072 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-preupgrade.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-preupgrade.json
@@ -6,6 +6,8 @@
     "componentName": "", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://10.0.0.15:8080/resources/", 
         "ambari_db_rca_password": "mapred", 
         "java_home": "/usr/jdk64/jdk1.7.0_45",

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2-phoenix.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2-phoenix.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2-phoenix.json
index 13c7952..fea5e34 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2-phoenix.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2-phoenix.json
@@ -46,6 +46,8 @@
     "clusterName": "c1", 
     "hostname": "c6403.ambari.apache.org", 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://10.0.0.15:8080/resources/", 
         "ambari_db_rca_password": "mapred", 
         "java_home": "/usr/jdk64/jdk1.7.0_45",

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json
index 24b8a3e..71af43a 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase-rs-2.2.json
@@ -46,6 +46,8 @@
     "clusterName": "c1", 
     "hostname": "c6403.ambari.apache.org", 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://10.0.0.15:8080/resources/", 
         "ambari_db_rca_password": "mapred", 
         "java_home": "/usr/jdk64/jdk1.7.0_45",

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_no_phx.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_no_phx.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_no_phx.json
index 8d3b60b..0b1be03 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_no_phx.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_no_phx.json
@@ -3,6 +3,8 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
         "ambari_db_rca_password": "mapred", 
         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_with_phx.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_with_phx.json b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_with_phx.json
index 84db160..47bb75f 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_with_phx.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/hbase_with_phx.json
@@ -3,6 +3,8 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
         "ambari_db_rca_password": "mapred", 
         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json
index 224062f..6160b6a 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu.json
@@ -36,6 +36,8 @@
     "clusterName": "c1", 
     "hostname": "c6402.ambari.apache.org", 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "stack_name": "HDP", 
         "group_list": "[\"hadoop\",\"users\"]", 
         "host_sys_prepped": "false", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu_standby.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu_standby.json b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu_standby.json
index 5d1f50f..576288c 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu_standby.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_eu_standby.json
@@ -36,6 +36,8 @@
     "clusterName": "c1", 
     "hostname": "c6402.ambari.apache.org", 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "stack_name": "HDP", 
         "group_list": "[\"hadoop\",\"users\"]", 
         "host_sys_prepped": "false", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.0.6/configs/nn_ru_lzo.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_ru_lzo.json b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_ru_lzo.json
index 9c8c0e2..5ae0ff2 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/nn_ru_lzo.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/nn_ru_lzo.json
@@ -14,6 +14,8 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://10.0.0.15:8080/resources/", 
         "ambari_db_rca_password": "mapred", 
         "java_home": "/usr/jdk64/jdk1.7.0_45",

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.0.6/configs/oozie_existing_sqla.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/oozie_existing_sqla.json b/ambari-server/src/test/python/stacks/2.0.6/configs/oozie_existing_sqla.json
index 81bcd2f..9e21d9b 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/oozie_existing_sqla.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/oozie_existing_sqla.json
@@ -3,6 +3,8 @@
     "clusterName": "c1",
     "hostname": "c6401.ambari.apache.org",
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
         "ambari_db_rca_password": "mapred",
         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.0.6/configs/ranger-namenode-start.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/ranger-namenode-start.json b/ambari-server/src/test/python/stacks/2.0.6/configs/ranger-namenode-start.json
index 6e3342c..8a16d0c 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/ranger-namenode-start.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/ranger-namenode-start.json
@@ -36,6 +36,8 @@
     "clusterName": "c1",
     "hostname": "c6401.ambari.apache.org",
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://hw10897.ix:8080/resources/",
         "ambari_db_rca_password": "mapred",
         "java_home": "/usr/jdk64/jdk1.7.0_45",

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_default.json b/ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_default.json
index 5092c91..34ffede 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_default.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_default.json
@@ -3,6 +3,8 @@
     "clusterName": "pacan", 
     "hostname": "c6402.ambari.apache.org", 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
         "ambari_db_rca_password": "mapred", 
         "java_home": "/usr/lib/jvm/java-1.6.0-openjdk.x86_64",

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_secured.json b/ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_secured.json
index daa46af..a868eeb 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_secured.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_secured.json
@@ -3,6 +3,8 @@
     "clusterName": "pacan", 
     "hostname": "c6402.ambari.apache.org", 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
         "ambari_db_rca_password": "mapred", 
         "java_home": "/usr/lib/jvm/java-1.6.0-openjdk.x86_64",

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json b/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
index e17559d..d83ffc8 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
@@ -3,6 +3,8 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
         "ambari_db_rca_password": "mapred",
         "current_version" : "2.2.4.2-1234",

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json b/ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json
index 11858f8..6b7dabc 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json
+++ b/ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json
@@ -3,6 +3,8 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
         "ambari_db_rca_password": "mapred", 
         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.0.6/hooks/before-INSTALL/test_before_install.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/hooks/before-INSTALL/test_before_install.py b/ambari-server/src/test/python/stacks/2.0.6/hooks/before-INSTALL/test_before_install.py
index f9d69a8..aacd1f2 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/hooks/before-INSTALL/test_before_install.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/hooks/before-INSTALL/test_before_install.py
@@ -40,6 +40,6 @@ class TestHookBeforeInstall(RMFTestCase):
         repo_file_name='HDP',
         repo_template='[{{repo_id}}]\nname={{repo_id}}\n{% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %}\n\npath=/\nenabled=1\ngpgcheck=0'
     )
-    self.assertResourceCalled('Package', 'unzip',)
-    self.assertResourceCalled('Package', 'curl',)
+    self.assertResourceCalled('Package', 'unzip', retry_count=5, retry_on_repo_unavailability=False)
+    self.assertResourceCalled('Package', 'curl', retry_count=5, retry_on_repo_unavailability=False)
     self.assertNoMoreResources()

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.1/configs/client-upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/client-upgrade.json b/ambari-server/src/test/python/stacks/2.1/configs/client-upgrade.json
index 11e18ff..df13b43 100644
--- a/ambari-server/src/test/python/stacks/2.1/configs/client-upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.1/configs/client-upgrade.json
@@ -47,6 +47,8 @@
     "clusterName": "c1", 
     "hostname": "c6403.ambari.apache.org", 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://10.0.0.15:8080/resources/", 
         "ambari_db_rca_password": "mapred", 
         "java_home": "/usr/jdk64/jdk1.7.0_45",

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.1/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/default.json b/ambari-server/src/test/python/stacks/2.1/configs/default.json
index 76d840b..f02c485 100644
--- a/ambari-server/src/test/python/stacks/2.1/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.1/configs/default.json
@@ -3,6 +3,8 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
         "ambari_db_rca_password": "mapred", 
         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.1/configs/hive-metastore-upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/hive-metastore-upgrade.json b/ambari-server/src/test/python/stacks/2.1/configs/hive-metastore-upgrade.json
index 0fbc282..888666a 100644
--- a/ambari-server/src/test/python/stacks/2.1/configs/hive-metastore-upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.1/configs/hive-metastore-upgrade.json
@@ -23,6 +23,8 @@
     "clusterName": "c1", 
     "hostname": "c6402.ambari.apache.org", 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "stack_name": "HDP", 
         "group_list": "[\"hadoop\",\"users\"]", 
         "host_sys_prepped": "false", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.1/configs/secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/configs/secured.json b/ambari-server/src/test/python/stacks/2.1/configs/secured.json
index 3346ee6..1b0a80c 100644
--- a/ambari-server/src/test/python/stacks/2.1/configs/secured.json
+++ b/ambari-server/src/test/python/stacks/2.1/configs/secured.json
@@ -3,6 +3,8 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
         "ambari_db_rca_password": "mapred", 
         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.2/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/default.json b/ambari-server/src/test/python/stacks/2.2/configs/default.json
index 4159a58..319eb05 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/default.json
@@ -3,6 +3,8 @@
     "clusterName": "c1",
     "hostname": "c6401.ambari.apache.org",
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
         "ambari_db_rca_password": "mapred",
         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.2/configs/hive-upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/hive-upgrade.json b/ambari-server/src/test/python/stacks/2.2/configs/hive-upgrade.json
index 9e597ef..d70ab3f 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/hive-upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/hive-upgrade.json
@@ -34,6 +34,8 @@
     "clusterName": "c1",
     "hostname": "c6402.ambari.apache.org",
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://hw10897.ix:8080/resources/",
         "ambari_db_rca_password": "mapred",
         "java_home": "/usr/jdk64/jdk1.7.0_45",

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.2/configs/journalnode-upgrade-hdfs-secure.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/journalnode-upgrade-hdfs-secure.json b/ambari-server/src/test/python/stacks/2.2/configs/journalnode-upgrade-hdfs-secure.json
index 494069c..9d2fd4a 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/journalnode-upgrade-hdfs-secure.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/journalnode-upgrade-hdfs-secure.json
@@ -64,6 +64,8 @@
     "clusterName": "c1", 
     "hostname": "c6406.ambari.apache.org", 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6406.ambari.apache.org:8080/resources/", 
         "ambari_db_rca_password": "mapred", 
         "java_home": "/usr/jdk64/jdk1.7.0_67",

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.2/configs/journalnode-upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/journalnode-upgrade.json b/ambari-server/src/test/python/stacks/2.2/configs/journalnode-upgrade.json
index 231c395..ab32b44 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/journalnode-upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/journalnode-upgrade.json
@@ -64,6 +64,8 @@
     "clusterName": "c1", 
     "hostname": "c6406.ambari.apache.org", 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6406.ambari.apache.org:8080/resources/", 
         "ambari_db_rca_password": "mapred", 
         "java_home": "/usr/jdk64/jdk1.7.0_67",

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.2/configs/oozie-downgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/oozie-downgrade.json b/ambari-server/src/test/python/stacks/2.2/configs/oozie-downgrade.json
index 40a6e95..b9beed1 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/oozie-downgrade.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/oozie-downgrade.json
@@ -31,6 +31,8 @@
     "clusterName": "c1", 
     "hostname": "c6402.ambari.apache.org", 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://hw10897.ix:8080/resources/", 
         "ambari_db_rca_password": "mapred", 
         "java_home": "/usr/jdk64/jdk1.7.0_45",

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.2/configs/oozie-upgrade.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/oozie-upgrade.json b/ambari-server/src/test/python/stacks/2.2/configs/oozie-upgrade.json
index 66089c8..1c5f4bc 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/oozie-upgrade.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/oozie-upgrade.json
@@ -31,6 +31,8 @@
     "clusterName": "c1", 
     "hostname": "c6402.ambari.apache.org", 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
         "ambari_db_rca_password": "mapred", 
         "java_home": "/usr/jdk64/jdk1.7.0_45",

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.2/configs/secured.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.2/configs/secured.json b/ambari-server/src/test/python/stacks/2.2/configs/secured.json
index 11cf1eb..dbe7252 100644
--- a/ambari-server/src/test/python/stacks/2.2/configs/secured.json
+++ b/ambari-server/src/test/python/stacks/2.2/configs/secured.json
@@ -3,6 +3,8 @@
     "clusterName": "c1",
     "hostname": "c6401.ambari.apache.org",
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
         "ambari_db_rca_password": "mapred",
         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.3/PXF/test_pxf.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/PXF/test_pxf.py b/ambari-server/src/test/python/stacks/2.3/PXF/test_pxf.py
index aaee2a5..299638b 100644
--- a/ambari-server/src/test/python/stacks/2.3/PXF/test_pxf.py
+++ b/ambari-server/src/test/python/stacks/2.3/PXF/test_pxf.py
@@ -67,11 +67,21 @@ class TestPxf(RMFTestCase):
                        target=RMFTestCase.TARGET_COMMON_SERVICES,
                        try_install=True)
 
-    self.assertResourceCalled('Package', 'pxf-service',)
-    self.assertResourceCalled('Package', 'apache-tomcat',)
-    self.assertResourceCalled('Package', 'pxf-hive',)
-    self.assertResourceCalled('Package', 'pxf-hdfs',)
-    self.assertResourceCalled('Package', 'pxf-hbase',)
+    self.assertResourceCalled('Package', 'pxf-service',
+                              retry_count=5,
+                              retry_on_repo_unavailability=False)
+    self.assertResourceCalled('Package', 'apache-tomcat',
+                              retry_count=5,
+                              retry_on_repo_unavailability=False)
+    self.assertResourceCalled('Package', 'pxf-hive',
+                              retry_count=5,
+                              retry_on_repo_unavailability=False)
+    self.assertResourceCalled('Package', 'pxf-hdfs',
+                              retry_count=5,
+                              retry_on_repo_unavailability=False)
+    self.assertResourceCalled('Package', 'pxf-hbase',
+                              retry_count=5,
+                              retry_on_repo_unavailability=False)
 
     self.assert_configure_default()
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.3/configs/default.hbasedecom.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/configs/default.hbasedecom.json b/ambari-server/src/test/python/stacks/2.3/configs/default.hbasedecom.json
index e40c3c4..db157af 100644
--- a/ambari-server/src/test/python/stacks/2.3/configs/default.hbasedecom.json
+++ b/ambari-server/src/test/python/stacks/2.3/configs/default.hbasedecom.json
@@ -3,6 +3,8 @@
     "clusterName": "c1", 
     "hostname": "c6401.ambari.apache.org", 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/", 
         "ambari_db_rca_password": "mapred", 
         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.3/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/configs/default.json b/ambari-server/src/test/python/stacks/2.3/configs/default.json
index 4a72f08..253e833 100644
--- a/ambari-server/src/test/python/stacks/2.3/configs/default.json
+++ b/ambari-server/src/test/python/stacks/2.3/configs/default.json
@@ -3,6 +3,8 @@
     "clusterName": "c1",
     "hostname": "c6401.ambari.apache.org",
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
         "ambari_db_rca_password": "mapred",
         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.3/configs/hbase_default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/configs/hbase_default.json b/ambari-server/src/test/python/stacks/2.3/configs/hbase_default.json
index bad20f3..f560704 100644
--- a/ambari-server/src/test/python/stacks/2.3/configs/hbase_default.json
+++ b/ambari-server/src/test/python/stacks/2.3/configs/hbase_default.json
@@ -39,6 +39,8 @@
     "clusterName": "c1", 
     "hostname": "c6405.ambari.apache.org", 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6405.ambari.apache.org:8080/resources/", 
         "ambari_db_rca_password": "mapred", 
         "java_home": "/usr/jdk64/jdk1.8.0_40", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.3/configs/hbase_secure.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/configs/hbase_secure.json b/ambari-server/src/test/python/stacks/2.3/configs/hbase_secure.json
index 0e2a064..3a8133a 100644
--- a/ambari-server/src/test/python/stacks/2.3/configs/hbase_secure.json
+++ b/ambari-server/src/test/python/stacks/2.3/configs/hbase_secure.json
@@ -44,6 +44,8 @@
     "clusterName": "c1", 
     "hostname": "c6405.ambari.apache.org", 
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6405.ambari.apache.org:8080/resources/", 
         "ambari_db_rca_password": "mapred", 
         "java_home": "/usr/jdk64/jdk1.8.0_40", 

http://git-wip-us.apache.org/repos/asf/ambari/blob/7e81d376/ambari-server/src/test/python/stacks/2.3/configs/pxf_default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/configs/pxf_default.json b/ambari-server/src/test/python/stacks/2.3/configs/pxf_default.json
index ba06162..1f6afe4 100644
--- a/ambari-server/src/test/python/stacks/2.3/configs/pxf_default.json
+++ b/ambari-server/src/test/python/stacks/2.3/configs/pxf_default.json
@@ -3,6 +3,8 @@
     "clusterName": "c1",
     "hostname": "c6401.ambari.apache.org",
     "hostLevelParams": {
+        "agent_stack_retry_count": "5",
+        "agent_stack_retry_on_unavailability": "false",
         "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
         "ambari_db_rca_password": "mapred",
         "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",


[16/51] [abbrv] ambari git commit: AMBARI-15329: Code Cleanup: Remove hdp hardcodings in functions, variables etc. (jluniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.3/HAWQ/test_hawqsegment.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/HAWQ/test_hawqsegment.py b/ambari-server/src/test/python/stacks/2.3/HAWQ/test_hawqsegment.py
index b6c682f..2847405 100644
--- a/ambari-server/src/test/python/stacks/2.3/HAWQ/test_hawqsegment.py
+++ b/ambari-server/src/test/python/stacks/2.3/HAWQ/test_hawqsegment.py
@@ -79,7 +79,7 @@ class TestHawqSegment(RMFTestCase):
         classname = 'HawqSegment',
         command = 'configure',
         config_file ='hawq_default.json',
-        hdp_stack_version = self.STACK_VERSION,
+        stack_version = self.STACK_VERSION,
         target = RMFTestCase.TARGET_COMMON_SERVICES
         )
 
@@ -94,7 +94,7 @@ class TestHawqSegment(RMFTestCase):
         classname = 'HawqSegment',
         command = 'install',
         config_file ='hawq_default.json',
-        hdp_stack_version = self.STACK_VERSION,
+        stack_version = self.STACK_VERSION,
         target = RMFTestCase.TARGET_COMMON_SERVICES
         )
 
@@ -109,7 +109,7 @@ class TestHawqSegment(RMFTestCase):
         classname = 'HawqSegment',
         command = 'start',
         config_file ='hawq_default.json',
-        hdp_stack_version = self.STACK_VERSION,
+        stack_version = self.STACK_VERSION,
         target = RMFTestCase.TARGET_COMMON_SERVICES
         )
 
@@ -144,7 +144,7 @@ class TestHawqSegment(RMFTestCase):
         classname = 'HawqSegment',
         command = 'stop',
         config_file ='hawq_default.json',
-        hdp_stack_version = self.STACK_VERSION,
+        stack_version = self.STACK_VERSION,
         target = RMFTestCase.TARGET_COMMON_SERVICES
         )
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.3/HAWQ/test_hawqstandby.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/HAWQ/test_hawqstandby.py b/ambari-server/src/test/python/stacks/2.3/HAWQ/test_hawqstandby.py
index 7f4d3af..56a3f44 100644
--- a/ambari-server/src/test/python/stacks/2.3/HAWQ/test_hawqstandby.py
+++ b/ambari-server/src/test/python/stacks/2.3/HAWQ/test_hawqstandby.py
@@ -117,7 +117,7 @@ class TestHawqStandby(RMFTestCase):
         classname = 'HawqStandby',
         command = 'configure',
         config_file ='hawq_default.json',
-        hdp_stack_version = self.STACK_VERSION,
+        stack_version = self.STACK_VERSION,
         target = RMFTestCase.TARGET_COMMON_SERVICES
         )
 
@@ -132,7 +132,7 @@ class TestHawqStandby(RMFTestCase):
         classname = 'HawqStandby',
         command = 'install',
         config_file ='hawq_default.json',
-        hdp_stack_version = self.STACK_VERSION,
+        stack_version = self.STACK_VERSION,
         target = RMFTestCase.TARGET_COMMON_SERVICES
         )
 
@@ -152,7 +152,7 @@ class TestHawqStandby(RMFTestCase):
         classname = 'HawqStandby',
         command = 'start',
         config_file ='hawq_default.json',
-        hdp_stack_version = self.STACK_VERSION,
+        stack_version = self.STACK_VERSION,
         target = RMFTestCase.TARGET_COMMON_SERVICES
         )
 
@@ -185,7 +185,7 @@ class TestHawqStandby(RMFTestCase):
         classname = 'HawqStandby',
         command = 'stop',
         config_file ='hawq_default.json',
-        hdp_stack_version = self.STACK_VERSION,
+        stack_version = self.STACK_VERSION,
         target = RMFTestCase.TARGET_COMMON_SERVICES
         )
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_client.py b/ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_client.py
index f0f6326..17be97f 100644
--- a/ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_client.py
+++ b/ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_client.py
@@ -35,7 +35,7 @@ class TestMahoutClient(RMFTestCase):
       classname = "MahoutClient",
       command = "configure",
       config_file = "default.json",
-      hdp_stack_version = self.STACK_VERSION,
+      stack_version = self.STACK_VERSION,
       target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assertResourceCalled('Directory',
@@ -73,7 +73,7 @@ class TestMahoutClient(RMFTestCase):
       classname = "MahoutClient",
       command = "pre_upgrade_restart",
       config_dict = json_content,
-      hdp_stack_version = self.STACK_VERSION,
+      stack_version = self.STACK_VERSION,
       target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'mahout-client', '2.2.1.0-3242'), sudo=True)
@@ -98,7 +98,7 @@ class TestMahoutClient(RMFTestCase):
       classname = "MahoutClient",
       command = "pre_upgrade_restart",
       config_dict = json_content,
-      hdp_stack_version = self.STACK_VERSION,
+      stack_version = self.STACK_VERSION,
       target = RMFTestCase.TARGET_COMMON_SERVICES,
       call_mocks = itertools.cycle([(0, None, '')]),
       mocks_dict = mocks_dict)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_service_check.py b/ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_service_check.py
index 878b7f8..78ae3fe 100644
--- a/ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_service_check.py
+++ b/ambari-server/src/test/python/stacks/2.3/MAHOUT/test_mahout_service_check.py
@@ -32,7 +32,7 @@ class TestMahoutClient(RMFTestCase):
                        classname = "MahoutServiceCheck",
                        command = "service_check",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.3/PXF/test_pxf.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/PXF/test_pxf.py b/ambari-server/src/test/python/stacks/2.3/PXF/test_pxf.py
index 299638b..1147a7e 100644
--- a/ambari-server/src/test/python/stacks/2.3/PXF/test_pxf.py
+++ b/ambari-server/src/test/python/stacks/2.3/PXF/test_pxf.py
@@ -63,7 +63,7 @@ class TestPxf(RMFTestCase):
                        classname="Pxf",
                        command="install",
                        config_file="pxf_default.json",
-                       hdp_stack_version=self.STACK_VERSION,
+                       stack_version=self.STACK_VERSION,
                        target=RMFTestCase.TARGET_COMMON_SERVICES,
                        try_install=True)
 
@@ -91,7 +91,7 @@ class TestPxf(RMFTestCase):
                    classname="Pxf",
                    command="configure",
                    config_file="pxf_default.json",
-                   hdp_stack_version=self.STACK_VERSION,
+                   stack_version=self.STACK_VERSION,
                    target=RMFTestCase.TARGET_COMMON_SERVICES,
                    try_install=True)
 
@@ -103,7 +103,7 @@ class TestPxf(RMFTestCase):
                    classname="Pxf",
                    command="start",
                    config_file="pxf_default.json",
-                   hdp_stack_version=self.STACK_VERSION,
+                   stack_version=self.STACK_VERSION,
                    target=RMFTestCase.TARGET_COMMON_SERVICES,
                    try_install=True)
 
@@ -123,7 +123,7 @@ class TestPxf(RMFTestCase):
                    classname="Pxf",
                    command="stop",
                    config_file="pxf_default.json",
-                   hdp_stack_version=self.STACK_VERSION,
+                   stack_version=self.STACK_VERSION,
                    target=RMFTestCase.TARGET_COMMON_SERVICES,
                    try_install=True)
 
@@ -136,7 +136,7 @@ class TestPxf(RMFTestCase):
                    classname="Pxf",
                    command="status",
                    config_file="pxf_default.json",
-                   hdp_stack_version=self.STACK_VERSION,
+                   stack_version=self.STACK_VERSION,
                    target=RMFTestCase.TARGET_COMMON_SERVICES,
                    try_install=True)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.3/SPARK/test_spark_thrift_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/SPARK/test_spark_thrift_server.py b/ambari-server/src/test/python/stacks/2.3/SPARK/test_spark_thrift_server.py
index f9c741d..c23fd96 100644
--- a/ambari-server/src/test/python/stacks/2.3/SPARK/test_spark_thrift_server.py
+++ b/ambari-server/src/test/python/stacks/2.3/SPARK/test_spark_thrift_server.py
@@ -24,7 +24,7 @@ from stacks.utils.RMFTestCase import *
 from only_for_platform import not_for_platform, PLATFORM_WINDOWS
 
 @not_for_platform(PLATFORM_WINDOWS)
-@patch("resource_management.libraries.functions.get_hdp_version", new=MagicMock(return_value="2.3.2.0-1597"))
+@patch("resource_management.libraries.functions.get_stack_version", new=MagicMock(return_value="2.3.2.0-1597"))
 class TestSparkThriftServer(RMFTestCase):
   COMMON_SERVICES_PACKAGE_DIR = "SPARK/1.2.0.2.2/package"
   STACK_VERSION = "2.3"
@@ -36,7 +36,7 @@ class TestSparkThriftServer(RMFTestCase):
                    classname = "SparkThriftServer",
                    command = "configure",
                    config_file="spark_default.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -49,7 +49,7 @@ class TestSparkThriftServer(RMFTestCase):
                    classname = "SparkThriftServer",
                    command = "start",
                    config_file="spark_default.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -65,7 +65,7 @@ class TestSparkThriftServer(RMFTestCase):
                    classname = "SparkThriftServer",
                    command = "stop",
                    config_file="spark_default.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', '/usr/hdp/current/spark-client/sbin/stop-thriftserver.sh',
@@ -168,7 +168,7 @@ class TestSparkThriftServer(RMFTestCase):
                        classname = "SparkThriftServer",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None)],
                        mocks_dict = mocks_dict)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.3/STORM/test_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/STORM/test_service_check.py b/ambari-server/src/test/python/stacks/2.3/STORM/test_service_check.py
index 81d7827..47101d2 100644
--- a/ambari-server/src/test/python/stacks/2.3/STORM/test_service_check.py
+++ b/ambari-server/src/test/python/stacks/2.3/STORM/test_service_check.py
@@ -32,7 +32,7 @@ class TestStormServiceCheck(TestStormBase):
                        classname="ServiceCheck",
                        command="service_check",
                        config_file="storm_default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.3/STORM/test_storm_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/STORM/test_storm_upgrade.py b/ambari-server/src/test/python/stacks/2.3/STORM/test_storm_upgrade.py
index f53525c..d0356ff 100644
--- a/ambari-server/src/test/python/stacks/2.3/STORM/test_storm_upgrade.py
+++ b/ambari-server/src/test/python/stacks/2.3/STORM/test_storm_upgrade.py
@@ -41,7 +41,7 @@ class TestStormUpgrade(RMFTestCase):
       classname = "StormUpgrade",
       command = "delete_storm_zookeeper_data",
       config_dict = json_content,
-      hdp_stack_version = self.STACK_VERSION,
+      stack_version = self.STACK_VERSION,
       target = RMFTestCase.TARGET_COMMON_SERVICES,
       call_mocks = [],
       mocks_dict = mocks_dict)
@@ -73,7 +73,7 @@ class TestStormUpgrade(RMFTestCase):
       classname = "StormUpgrade",
       command = "delete_storm_zookeeper_data",
       config_dict = json_content,
-      hdp_stack_version = self.STACK_VERSION,
+      stack_version = self.STACK_VERSION,
       target = RMFTestCase.TARGET_COMMON_SERVICES,
       call_mocks = [],
       mocks_dict = mocks_dict)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.3/YARN/test_ats_1_5.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.3/YARN/test_ats_1_5.py b/ambari-server/src/test/python/stacks/2.3/YARN/test_ats_1_5.py
index f826b55..9197cf1 100644
--- a/ambari-server/src/test/python/stacks/2.3/YARN/test_ats_1_5.py
+++ b/ambari-server/src/test/python/stacks/2.3/YARN/test_ats_1_5.py
@@ -31,8 +31,8 @@ origin_exists = os.path.exists
   side_effect=lambda *args: origin_exists(args[0])
   if args[0][-2:] == "j2" else True))
 
-@patch.object(Script, "is_hdp_stack_greater_or_equal", new = MagicMock(return_value=False))
-@patch.object(functions, "get_hdp_version", new = MagicMock(return_value="2.0.0.0-1234"))
+@patch.object(Script, "is_stack_greater_or_equal", new = MagicMock(return_value=False))
+@patch.object(functions, "get_stack_version", new = MagicMock(return_value="2.0.0.0-1234"))
 class TestAts(RMFTestCase):
   COMMON_SERVICES_PACKAGE_DIR = "YARN/2.1.0.2.0/package"
   STACK_VERSION = "2.3"
@@ -42,7 +42,7 @@ class TestAts(RMFTestCase):
                        classname="ApplicationTimelineServer",
                        command="configure",
                        config_file="ats_1_5.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
                        )
     self.assert_configure_default()

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
index ca32649..380fa39 100644
--- a/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
+++ b/ambari-server/src/test/python/stacks/utils/RMFTestCase.py
@@ -61,7 +61,7 @@ class RMFTestCase(TestCase):
                     config_dict=None,
                     # common mocks for all the scripts
                     config_overrides = None,
-                    hdp_stack_version = None,
+                    stack_version = None,
                     checked_call_mocks = itertools.cycle([(0, "OK.")]),
                     call_mocks = itertools.cycle([(0, "OK.")]),
                     os_type=('Suse','11','Final'),
@@ -82,7 +82,7 @@ class RMFTestCase(TestCase):
       configs_path = os.path.join(src_dir, PATH_TO_CUSTOM_ACTION_TESTS, "configs")
     elif target == self.TARGET_COMMON_SERVICES:
       base_path = os.path.join(src_dir, PATH_TO_COMMON_SERVICES)
-      configs_path = os.path.join(src_dir, PATH_TO_STACK_TESTS, hdp_stack_version, "configs")
+      configs_path = os.path.join(src_dir, PATH_TO_STACK_TESTS, stack_version, "configs")
     else:
       raise RuntimeError("Wrong target value %s", target)
     script_path = os.path.join(base_path, norm_path)


[08/51] [abbrv] ambari git commit: AMBARI-15328 : Exception on manual start of AMS is misleading (avijayan)

Posted by jl...@apache.org.
AMBARI-15328 : Exception on manual start of AMS is misleading (avijayan)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/549e70e2
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/549e70e2
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/549e70e2

Branch: refs/heads/AMBARI-13364
Commit: 549e70e27097dc55ee081fbdfedac2d8bca11d83
Parents: bdba8cb
Author: Aravindan Vijayan <av...@hortonworks.com>
Authored: Mon Mar 7 17:18:32 2016 -0800
Committer: Aravindan Vijayan <av...@hortonworks.com>
Committed: Mon Mar 7 17:18:32 2016 -0800

----------------------------------------------------------------------
 .../conf/unix/ambari-metrics-collector                             | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/549e70e2/ambari-metrics/ambari-metrics-timelineservice/conf/unix/ambari-metrics-collector
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/conf/unix/ambari-metrics-collector b/ambari-metrics/ambari-metrics-timelineservice/conf/unix/ambari-metrics-collector
index 4fab821..64a7848 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/conf/unix/ambari-metrics-collector
+++ b/ambari-metrics/ambari-metrics-timelineservice/conf/unix/ambari-metrics-collector
@@ -291,7 +291,7 @@ function start()
     # Wait until METRIC_* tables created
     for retry in {1..5}
     do
-      echo 'list' | ${HBASE_CMD} --config ${HBASE_CONF_DIR} shell | grep ^${METRIC_TABLES[0]} > /dev/null 2>&1
+      echo 'list' | ${HBASE_CMD} --config ${HBASE_CONF_DIR} shell 2> /dev/null | grep ^${METRIC_TABLES[0]} > /dev/null 2>&1
       if [ $? -eq 0 ]; then
         echo "$(date) Ambari Metrics data model initialization completed." | tee -a $STARTUPFILE
         break


[39/51] [abbrv] ambari git commit: AMBARI-15333. Host check warning for User Issues shows incorrect message (alexantonenko)

Posted by jl...@apache.org.
AMBARI-15333. Host check warning for User Issues shows incorrect message (alexantonenko)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/eb6b0da8
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/eb6b0da8
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/eb6b0da8

Branch: refs/heads/AMBARI-13364
Commit: eb6b0da8eb11b14b053c822a6de147e0af6b9d8a
Parents: 28430f3
Author: Alex Antonenko <hi...@gmail.com>
Authored: Wed Mar 9 21:08:39 2016 +0200
Committer: Alex Antonenko <hi...@gmail.com>
Committed: Wed Mar 9 21:09:28 2016 +0200

----------------------------------------------------------------------
 ambari-web/app/controllers/wizard/step3_controller.js | 12 ++++++------
 ambari-web/test/controllers/wizard/step3_test.js      | 12 ++++++------
 2 files changed, 12 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/eb6b0da8/ambari-web/app/controllers/wizard/step3_controller.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/controllers/wizard/step3_controller.js b/ambari-web/app/controllers/wizard/step3_controller.js
index 19ddd20..8e8eb11 100644
--- a/ambari-web/app/controllers/wizard/step3_controller.js
+++ b/ambari-web/app/controllers/wizard/step3_controller.js
@@ -1222,14 +1222,14 @@ App.WizardStep3Controller = Em.Controller.extend(App.ReloadPopupMixin, {
       var existingUsers = lastAgentEnvCheck.existingUsers;
       if (existingUsers) {
         existingUsers.forEach(function (user) {
-          warning = warningCategories.usersWarnings[user.userName];
+          warning = warningCategories.usersWarnings[user.name];
           if (warning) {
             warning.hosts.push(hostName);
             warning.hostsLong.push(hostName);
             warning.onSingleHost = false;
           } else {
-            warningCategories.usersWarnings[user.userName] = warning = {
-              name: user.userName,
+            warningCategories.usersWarnings[user.name] = warning = {
+              name: user.name,
               hosts: [hostName],
               hostsLong: [hostName],
               category: 'users',
@@ -1902,14 +1902,14 @@ App.WizardStep3Controller = Em.Controller.extend(App.ReloadPopupMixin, {
       //todo: to be removed after check in new API
       if (_host.Hosts.last_agent_env.existingUsers) {
         _host.Hosts.last_agent_env.existingUsers.forEach(function (user) {
-          warning = warningCategories.usersWarnings[user.userName];
+          warning = warningCategories.usersWarnings[user.name];
           if (warning) {
             warning.hosts.push(_host.Hosts.host_name);
             warning.hostsLong.push(_host.Hosts.host_name);
             warning.onSingleHost = false;
           } else {
-            warningCategories.usersWarnings[user.userName] = warning = {
-              name: user.userName,
+            warningCategories.usersWarnings[user.name] = warning = {
+              name: user.name,
               hosts: [_host.Hosts.host_name],
               hostsLong: [_host.Hosts.host_name],
               category: 'users',

http://git-wip-us.apache.org/repos/asf/ambari/blob/eb6b0da8/ambari-web/test/controllers/wizard/step3_test.js
----------------------------------------------------------------------
diff --git a/ambari-web/test/controllers/wizard/step3_test.js b/ambari-web/test/controllers/wizard/step3_test.js
index dbfb868..6cb7ee7 100644
--- a/ambari-web/test/controllers/wizard/step3_test.js
+++ b/ambari-web/test/controllers/wizard/step3_test.js
@@ -1425,7 +1425,7 @@ describe('App.WizardStep3Controller', function () {
             {
               tasks: [
                 {Tasks: {host_name: 'c1',
-                         structured_out: {last_agent_env_check: {existingUsers: [{userName: 'n1'}]}}
+                         structured_out: {last_agent_env_check: {existingUsers: [{name: 'n1'}]}}
                         }
                 }
               ],
@@ -1445,11 +1445,11 @@ describe('App.WizardStep3Controller', function () {
             {
               tasks: [
                 {Tasks: {host_name: 'c1',
-                         structured_out:{last_agent_env_check: {existingUsers: [{userName: 'n1'}]}}
+                         structured_out:{last_agent_env_check: {existingUsers: [{name: 'n1'}]}}
                         }
                 },
                 {Tasks: {host_name: 'c2',
-                         structured_out:{last_agent_env_check: {existingUsers: [{userName: 'n1'}]}}
+                         structured_out:{last_agent_env_check: {existingUsers: [{name: 'n1'}]}}
                         }
                 }
               ],
@@ -1910,7 +1910,7 @@ describe('App.WizardStep3Controller', function () {
             {
               items: [
                 {Hosts: {host_name: 'c1', last_agent_env: {existingUsers: [
-                  {userName: 'n1'}
+                  {name: 'n1'}
                 ]}}}
               ],
               m: 'not empty existingUsers',
@@ -1929,10 +1929,10 @@ describe('App.WizardStep3Controller', function () {
             {
               items: [
                 {Hosts: {host_name: 'c1', last_agent_env: {existingUsers: [
-                  {userName: 'n1'}
+                  {name: 'n1'}
                 ]}}},
                 {Hosts: {host_name: 'c2', last_agent_env: {existingUsers: [
-                  {userName: 'n1'}
+                  {name: 'n1'}
                 ]}}}
               ],
               m: 'not empty existingUsers on two hosts',


[20/51] [abbrv] ambari git commit: AMBARI-15329: Code Cleanup: Remove hdp hardcodings in functions, variables etc. (jluniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
index aef9357..7e37873 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
@@ -27,10 +27,10 @@ from resource_management.libraries.script import Script
 from resource_management.libraries.functions import default
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import format_jvm_option
 from resource_management.libraries.functions.is_empty import is_empty
-from resource_management.libraries.functions.version import format_hdp_stack_version
+from resource_management.libraries.functions.version import format_stack_version
 from resource_management.libraries.functions.version import compare_versions
 from ambari_commons.os_check import OSCheck
 from ambari_commons.constants import AMBARI_SUDO_BINARY
@@ -52,13 +52,13 @@ sudo = AMBARI_SUDO_BINARY
 ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
 
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
+stack_version_formatted = format_stack_version(stack_version_unformatted)
 
 restart_type = default("/commandParams/restart_type", "")
 version = default("/commandParams/version", None)
 # Handle upgrade and downgrade
 if (restart_type.lower() == "rolling_upgrade" or restart_type.lower() == "nonrolling_upgrade") and version:
-  hdp_stack_version = format_hdp_stack_version(version)
+  stack_version_formatted = format_stack_version(version)
 
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
@@ -97,8 +97,8 @@ mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
 # upgrades would cause these directories to have a version instead of "current"
 # which would cause a lot of problems when writing out hadoop-env.sh; instead
 # force the use of "current" in the hook
-hadoop_home = hdp_select.get_hadoop_dir("home", force_latest_on_upgrade=True)
-hadoop_libexec_dir = hdp_select.get_hadoop_dir("libexec", force_latest_on_upgrade=True)
+hadoop_home = stack_select.get_hadoop_dir("home", force_latest_on_upgrade=True)
+hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec", force_latest_on_upgrade=True)
 
 hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
 hadoop_secure_dn_user = hdfs_user
@@ -109,7 +109,7 @@ datanode_max_locked_memory = config['configurations']['hdfs-site']['dfs.datanode
 is_datanode_max_locked_memory_set = not is_empty(config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory'])
 
 # HDP 2.2+ params
-if Script.is_hdp_stack_greater_or_equal("2.2"):
+if Script.is_stack_greater_or_equal("2.2"):
   mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
 
   # not supported in HDP 2.2+

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
index d219c2c..f5556fb 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py
@@ -69,7 +69,7 @@ def setup_users():
     if params.has_namenode:
       if should_create_users_and_groups:
         create_dfs_cluster_admins()
-    if params.has_tez and params.hdp_stack_version != "" and compare_versions(params.hdp_stack_version, '2.3') >= 0:
+    if params.has_tez and params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.3') >= 0:
       if should_create_users_and_groups:
         create_tez_am_view_acls()
   else:
@@ -146,7 +146,7 @@ def setup_hadoop_env():
     Directory(params.hadoop_dir, mode=0755)
 
     # HDP < 2.2 used a conf -> conf.empty symlink for /etc/hadoop/
-    if Script.is_hdp_stack_less_than("2.2"):
+    if Script.is_stack_less_than("2.2"):
       Directory(params.hadoop_conf_empty_dir, create_parents = True, owner="root",
         group=params.user_group )
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py
index 226cb0f..f49cb58 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py
@@ -19,7 +19,7 @@ limitations under the License.
 
 from ambari_commons.constants import AMBARI_SUDO_BINARY
 from ambari_commons.str_utils import cbool, cint
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
+from resource_management.libraries.functions.version import format_stack_version, compare_versions
 from resource_management.core.system import System
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.functions import default, format
@@ -32,7 +32,7 @@ stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
 agent_stack_retry_on_unavailability = cbool(config["hostLevelParams"]["agent_stack_retry_on_unavailability"])
 agent_stack_retry_count = cint(config["hostLevelParams"]["agent_stack_retry_count"])
 
-hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
+stack_version_formatted = format_stack_version(stack_version_unformatted)
 
 #users and groups
 hbase_user = config['configurations']['hbase-env']['hbase_user']

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py
index 274f29f..07faae4 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py
@@ -27,7 +27,7 @@ def install_packages():
     return
 
   packages = ['unzip', 'curl']
-  if params.hdp_stack_version != "" and compare_versions(params.hdp_stack_version, '2.2') >= 0:
+  if params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.2') >= 0:
     packages.append('hdp-select')
   Package(packages,
           retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
index 069d1ae..f6f56df 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
@@ -20,11 +20,11 @@ limitations under the License.
 import os
 
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import default
 from resource_management.libraries.functions import format_jvm_option
 from resource_management.libraries.functions import format
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
+from resource_management.libraries.functions.version import format_stack_version, compare_versions
 from ambari_commons.os_check import OSCheck
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.functions import get_kinit_path
@@ -35,7 +35,7 @@ config = Script.get_config()
 host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
 
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
+stack_version_formatted = format_stack_version(stack_version_unformatted)
 
 dfs_type = default("/commandParams/dfs_type", "")
 hadoop_conf_dir = "/etc/hadoop/conf"
@@ -45,16 +45,16 @@ component_list = default("/localComponents", [])
 # hadoop default params
 mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
 
-hadoop_libexec_dir = hdp_select.get_hadoop_dir("libexec")
-hadoop_lib_home = hdp_select.get_hadoop_dir("lib")
-hadoop_bin = hdp_select.get_hadoop_dir("sbin")
+hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
+hadoop_lib_home = stack_select.get_hadoop_dir("lib")
+hadoop_bin = stack_select.get_hadoop_dir("sbin")
 hadoop_home = '/usr'
 create_lib_snappy_symlinks = True
 
 # HDP 2.2+ params
-if Script.is_hdp_stack_greater_or_equal("2.2"):
+if Script.is_stack_greater_or_equal("2.2"):
   mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
-  hadoop_home = hdp_select.get_hadoop_dir("home")
+  hadoop_home = stack_select.get_hadoop_dir("home")
   create_lib_snappy_symlinks = False
   
 current_service = config['serviceName']
@@ -227,8 +227,8 @@ has_core_site = 'core-site' in config['configurations']
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 kinit_path_local = get_kinit_path()
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
-hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
 hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', None)
 hdfs_site = config['configurations']['hdfs-site']
 default_fs = config['configurations']['core-site']['fs.defaultFS']

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/params.py
index f2819aa..713c3b4 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/params.py
@@ -17,14 +17,14 @@ limitations under the License.
 
 """
 
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
+from resource_management.libraries.functions.version import format_stack_version, compare_versions
 from resource_management import *
 import os
 import itertools
 import re
 from resource_management.libraries.functions import conf_select
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 
 config = Script.get_config()
 
@@ -38,8 +38,8 @@ hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 kinit_path_local = functions.get_kinit_path()
 
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
-hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
 
 smoke_user =  config['configurations']['cluster-env']['smokeuser']
 smoke_hdfs_user_dir = format("/user/{smoke_user}")

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/TestVersion.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/TestVersion.py b/ambari-server/src/test/python/TestVersion.py
index 3a98a91..0392908 100644
--- a/ambari-server/src/test/python/TestVersion.py
+++ b/ambari-server/src/test/python/TestVersion.py
@@ -39,10 +39,10 @@ class TestVersion(TestCase):
          ("2.2.1.3", "2.2.1.3")]
     
     for input, expected in l:
-      actual = self.version_module.format_hdp_stack_version(input)
+      actual = self.version_module.format_stack_version(input)
       self.assertEqual(expected, actual)
 
-    gluster_fs_actual = self.version_module.format_hdp_stack_version("GlusterFS")
+    gluster_fs_actual = self.version_module.format_stack_version("GlusterFS")
     self.assertEqual("", gluster_fs_actual)
 
   def test_comparison(self):

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/custom_actions/TestInstallPackages.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/custom_actions/TestInstallPackages.py b/ambari-server/src/test/python/custom_actions/TestInstallPackages.py
index 478c052..80ba480 100644
--- a/ambari-server/src/test/python/custom_actions/TestInstallPackages.py
+++ b/ambari-server/src/test/python/custom_actions/TestInstallPackages.py
@@ -64,7 +64,7 @@ class TestInstallPackages(RMFTestCase):
   @patch("resource_management.libraries.functions.list_ambari_managed_repos.list_ambari_managed_repos")
   @patch("resource_management.libraries.functions.packages_analyzer.allInstalledPackages")
   @patch("resource_management.libraries.script.Script.put_structured_out")
-  @patch("resource_management.libraries.functions.hdp_select.get_hdp_versions")
+  @patch("resource_management.libraries.functions.stack_select.get_stack_versions")
   @patch("resource_management.libraries.functions.repo_version_history.read_actual_version_from_history_file")
   @patch("resource_management.libraries.functions.repo_version_history.write_actual_version_to_history_file")
   def test_normal_flow_rhel(self,
@@ -124,7 +124,7 @@ class TestInstallPackages(RMFTestCase):
   @patch("resource_management.libraries.functions.list_ambari_managed_repos.list_ambari_managed_repos")
   @patch("resource_management.libraries.functions.packages_analyzer.allInstalledPackages")
   @patch("resource_management.libraries.script.Script.put_structured_out")
-  @patch("resource_management.libraries.functions.hdp_select.get_hdp_versions")
+  @patch("resource_management.libraries.functions.stack_select.get_stack_versions")
   @patch("resource_management.libraries.functions.repo_version_history.read_actual_version_from_history_file")
   @patch("resource_management.libraries.functions.repo_version_history.write_actual_version_to_history_file")
   def test_normal_flow_sles(self, write_actual_version_to_history_file_mock,
@@ -185,7 +185,7 @@ class TestInstallPackages(RMFTestCase):
   @patch("ambari_commons.os_check.OSCheck.is_redhat_family")
   @patch("resource_management.libraries.script.Script.put_structured_out")
   @patch("resource_management.libraries.functions.packages_analyzer.allInstalledPackages")
-  @patch("resource_management.libraries.functions.hdp_select.get_hdp_versions")
+  @patch("resource_management.libraries.functions.stack_select.get_stack_versions")
   @patch("resource_management.libraries.functions.repo_version_history.read_actual_version_from_history_file")
   @patch("resource_management.libraries.functions.repo_version_history.write_actual_version_to_history_file")
   def test_exclude_existing_repo(self,  write_actual_version_to_history_file_mock,
@@ -319,7 +319,7 @@ class TestInstallPackages(RMFTestCase):
   @patch("resource_management.core.resources.packaging.Package")
   @patch("resource_management.libraries.script.Script.put_structured_out")
   @patch("resource_management.libraries.functions.packages_analyzer.allInstalledPackages")
-  @patch("resource_management.libraries.functions.hdp_select.get_hdp_versions")
+  @patch("resource_management.libraries.functions.stack_select.get_stack_versions")
   @patch("resource_management.libraries.functions.repo_version_history.read_actual_version_from_history_file")
   @patch("resource_management.libraries.functions.repo_version_history.write_actual_version_to_history_file")
   def test_format_package_name(self,                                                                                    write_actual_version_to_history_file_mock,
@@ -381,7 +381,7 @@ class TestInstallPackages(RMFTestCase):
   @patch("resource_management.libraries.functions.list_ambari_managed_repos.list_ambari_managed_repos")
   @patch("resource_management.libraries.functions.packages_analyzer.allInstalledPackages")
   @patch("resource_management.libraries.script.Script.put_structured_out")
-  @patch("resource_management.libraries.functions.hdp_select.get_hdp_versions")
+  @patch("resource_management.libraries.functions.stack_select.get_stack_versions")
   @patch("resource_management.libraries.functions.repo_version_history.read_actual_version_from_history_file")
   @patch("resource_management.libraries.functions.repo_version_history.write_actual_version_to_history_file")
   def test_version_reporting__build_number_defined(self,
@@ -460,7 +460,7 @@ class TestInstallPackages(RMFTestCase):
   @patch("resource_management.libraries.functions.list_ambari_managed_repos.list_ambari_managed_repos")
   @patch("resource_management.libraries.functions.packages_analyzer.allInstalledPackages")
   @patch("resource_management.libraries.script.Script.put_structured_out")
-  @patch("resource_management.libraries.functions.hdp_select.get_hdp_versions")
+  @patch("resource_management.libraries.functions.stack_select.get_stack_versions")
   @patch("resource_management.libraries.functions.repo_version_history.read_actual_version_from_history_file")
   @patch("resource_management.libraries.functions.repo_version_history.write_actual_version_to_history_file")
   @patch("os.path.exists")
@@ -516,7 +516,7 @@ class TestInstallPackages(RMFTestCase):
   @patch("resource_management.libraries.functions.list_ambari_managed_repos.list_ambari_managed_repos")
   @patch("resource_management.libraries.functions.packages_analyzer.allInstalledPackages")
   @patch("resource_management.libraries.script.Script.put_structured_out")
-  @patch("resource_management.libraries.functions.hdp_select.get_hdp_versions")
+  @patch("resource_management.libraries.functions.stack_select.get_stack_versions")
   @patch("resource_management.libraries.functions.repo_version_history.read_actual_version_from_history_file")
   @patch("resource_management.libraries.functions.repo_version_history.write_actual_version_to_history_file")
   @patch("os.path.exists")
@@ -608,7 +608,7 @@ class TestInstallPackages(RMFTestCase):
   @patch("resource_management.libraries.functions.list_ambari_managed_repos.list_ambari_managed_repos")
   @patch("resource_management.libraries.functions.packages_analyzer.allInstalledPackages")
   @patch("resource_management.libraries.script.Script.put_structured_out")
-  @patch("resource_management.libraries.functions.hdp_select.get_hdp_versions")
+  @patch("resource_management.libraries.functions.stack_select.get_stack_versions")
   @patch("resource_management.libraries.functions.repo_version_history.read_actual_version_from_history_file")
   @patch("resource_management.libraries.functions.repo_version_history.write_actual_version_to_history_file")
   def test_version_reporting__build_number_not_defined__usr_hdp_present(self,
@@ -686,7 +686,7 @@ class TestInstallPackages(RMFTestCase):
   @patch("resource_management.libraries.functions.list_ambari_managed_repos.list_ambari_managed_repos")
   @patch("resource_management.libraries.functions.packages_analyzer.allInstalledPackages")
   @patch("resource_management.libraries.script.Script.put_structured_out")
-  @patch("resource_management.libraries.functions.hdp_select.get_hdp_versions")
+  @patch("resource_management.libraries.functions.stack_select.get_stack_versions")
   @patch("resource_management.libraries.functions.repo_version_history.read_actual_version_from_history_file")
   @patch("resource_management.libraries.functions.repo_version_history.write_actual_version_to_history_file")
   def test_version_reporting__wrong_build_number_specified__usr_hdp_present(self,
@@ -764,7 +764,7 @@ class TestInstallPackages(RMFTestCase):
   @patch("resource_management.libraries.functions.list_ambari_managed_repos.list_ambari_managed_repos")
   @patch("resource_management.libraries.functions.packages_analyzer.allInstalledPackages")
   @patch("resource_management.libraries.script.Script.put_structured_out")
-  @patch("resource_management.libraries.functions.hdp_select.get_hdp_versions")
+  @patch("resource_management.libraries.functions.stack_select.get_stack_versions")
   @patch("resource_management.libraries.functions.repo_version_history.read_actual_version_from_history_file")
   @patch("resource_management.libraries.functions.repo_version_history.write_actual_version_to_history_file")
   @patch("os.path.exists")

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_collector.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_collector.py b/ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_collector.py
index 50bf712..e08e184 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_collector.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_collector.py
@@ -33,7 +33,7 @@ class TestMetricsCollector(RMFTestCase):
                        classname = "AmsCollector",
                        command = "start",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.maxDiff=None
@@ -88,7 +88,7 @@ class TestMetricsCollector(RMFTestCase):
                        classname = "AmsCollector",
                        command = "start",
                        config_file="default_ams_embedded.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.maxDiff=None

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_grafana.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_grafana.py b/ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_grafana.py
index 294c15d..eab50d4 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_grafana.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/AMBARI_METRICS/test_metrics_grafana.py
@@ -44,7 +44,7 @@ class TestMetricsGrafana(RMFTestCase):
                        classname = "AmsGrafana",
                        command = "start",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
                        )
     self.maxDiff=None

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/FLUME/test_flume.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/FLUME/test_flume.py b/ambari-server/src/test/python/stacks/2.0.6/FLUME/test_flume.py
index 3e7c595..5ee1fd9 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/FLUME/test_flume.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/FLUME/test_flume.py
@@ -33,7 +33,7 @@ class TestFlumeHandler(RMFTestCase):
                        classname = "FlumeHandler",
                        command = "configure",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assert_configure_default()
@@ -52,7 +52,7 @@ class TestFlumeHandler(RMFTestCase):
                        classname = "FlumeHandler",
                        command = "start",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assert_configure_default()
@@ -86,7 +86,7 @@ class TestFlumeHandler(RMFTestCase):
                        classname = "FlumeHandler",
                        command = "start",
                        config_file="flume_only.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assert_configure_default(check_mc=False)
@@ -118,7 +118,7 @@ class TestFlumeHandler(RMFTestCase):
                        classname = "FlumeHandler",
                        command = "stop",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assertTrue(glob_mock.called)
@@ -140,7 +140,7 @@ class TestFlumeHandler(RMFTestCase):
                        classname = "FlumeHandler",
                        command = "status",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     except:
       # expected since ComponentIsNotRunning gets raised
@@ -188,7 +188,7 @@ class TestFlumeHandler(RMFTestCase):
                        classname = "FlumeHandler",
                        command = "status",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     except:
       # expected since ComponentIsNotRunning gets raised
@@ -213,7 +213,7 @@ class TestFlumeHandler(RMFTestCase):
        classname = "FlumeHandler",
        command = "status",
        config_file="default.json",
-       hdp_stack_version = self.STACK_VERSION,
+       stack_version = self.STACK_VERSION,
        target = RMFTestCase.TARGET_COMMON_SERVICES)
     except:
       # expected since ComponentIsNotRunning gets raised
@@ -344,7 +344,7 @@ class TestFlumeHandler(RMFTestCase):
                        classname = "FlumeHandler",
                        command = "start",
                        config_file="flume_target.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assert_configure_many()
@@ -374,7 +374,7 @@ class TestFlumeHandler(RMFTestCase):
                        classname = "FlumeHandler",
                        command = "start",
                        config_file="flume_target.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assert_configure_many()
@@ -403,7 +403,7 @@ class TestFlumeHandler(RMFTestCase):
                        classname = "FlumeHandler",
                        command = "stop",
                        config_file="flume_target.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assertTrue(glob_mock.called)
@@ -421,7 +421,7 @@ class TestFlumeHandler(RMFTestCase):
                        classname = "FlumeHandler",
                        command = "configure",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('File', '/etc/flume/conf/x1/ambari-meta.json',
         action = ['delete'],
@@ -435,7 +435,7 @@ class TestFlumeHandler(RMFTestCase):
                        classname = "FlumeHandler",
                        command = "configure",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assertResourceCalled('Directory', '/var/run/flume',)
@@ -486,7 +486,7 @@ class TestFlumeHandler(RMFTestCase):
                        classname = "FlumeHandler",
                        command = "configure",
                        config_file="flume_22.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assertResourceCalled('Directory', '/var/run/flume',)
@@ -538,7 +538,7 @@ class TestFlumeHandler(RMFTestCase):
                        classname = "FlumeHandler",
                        command = "pre_upgrade_restart",
                        config_file="flume_22.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'flume-server', '2.2.1.0-2067'), sudo=True)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/FLUME/test_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/FLUME/test_service_check.py b/ambari-server/src/test/python/stacks/2.0.6/FLUME/test_service_check.py
index c862880..152d00c 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/FLUME/test_service_check.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/FLUME/test_service_check.py
@@ -30,7 +30,7 @@ class TestFlumeCheck(RMFTestCase):
                        classname="FlumeServiceCheck",
                        command="service_check",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/GANGLIA/test_ganglia_monitor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/GANGLIA/test_ganglia_monitor.py b/ambari-server/src/test/python/stacks/2.0.6/GANGLIA/test_ganglia_monitor.py
index fd27cde..47ee2f5 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/GANGLIA/test_ganglia_monitor.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/GANGLIA/test_ganglia_monitor.py
@@ -32,7 +32,7 @@ class TestGangliaMonitor(RMFTestCase):
                        classname="GangliaMonitor",
                        command="configure",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -45,7 +45,7 @@ class TestGangliaMonitor(RMFTestCase):
                        classname="GangliaMonitor",
                        command="configure",
                        config_file="default.non_gmetad_host.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -57,7 +57,7 @@ class TestGangliaMonitor(RMFTestCase):
                        classname="GangliaMonitor",
                        command="start",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -73,7 +73,7 @@ class TestGangliaMonitor(RMFTestCase):
                        classname="GangliaMonitor",
                        command="stop",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E service hdp-gmond stop >> /tmp/gmond.log  2>&1 ; /bin/ps auwx | /bin/grep [g]mond  >> /tmp/gmond.log  2>&1',
@@ -87,7 +87,7 @@ class TestGangliaMonitor(RMFTestCase):
                        classname="GangliaMonitor",
                        command="install",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/GANGLIA/test_ganglia_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/GANGLIA/test_ganglia_server.py b/ambari-server/src/test/python/stacks/2.0.6/GANGLIA/test_ganglia_server.py
index 53415b1..9df0adb 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/GANGLIA/test_ganglia_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/GANGLIA/test_ganglia_server.py
@@ -33,7 +33,7 @@ class TestGangliaServer(RMFTestCase):
                      classname="GangliaServer",
                      command="configure",
                      config_file="default.json",
-                     hdp_stack_version = self.STACK_VERSION,
+                     stack_version = self.STACK_VERSION,
                      target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -44,7 +44,7 @@ class TestGangliaServer(RMFTestCase):
                        classname="GangliaServer",
                        command="start",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -60,7 +60,7 @@ class TestGangliaServer(RMFTestCase):
                        classname="GangliaServer",
                        command="stop",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E service hdp-gmetad stop >> /tmp/gmetad.log  2>&1 ; /bin/ps auwx | /bin/grep [g]metad  >> /tmp/gmetad.log  2>&1',
@@ -75,7 +75,7 @@ class TestGangliaServer(RMFTestCase):
                        classname="GangliaServer",
                        command="install",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py
index 6dc4b0b..cbc9066 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py
@@ -34,7 +34,7 @@ class TestHBaseClient(RMFTestCase):
                    classname = "HbaseClient",
                    command = "configure",
                    config_file="secured.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -131,7 +131,7 @@ class TestHBaseClient(RMFTestCase):
                    classname = "HbaseClient",
                    command = "configure",
                    config_file="default.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Directory', '/etc/hbase',
@@ -228,7 +228,7 @@ class TestHBaseClient(RMFTestCase):
                    classname = "HbaseClient",
                    command = "restart",
                    config_file="client-upgrade.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES,
                    mocks_dict = mocks_dict)
 
@@ -253,7 +253,7 @@ class TestHBaseClient(RMFTestCase):
                        classname = "HbaseClient",
                        command = "restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None, ''), (0, None, ''), (0, None, '')],
                        mocks_dict = mocks_dict)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
index 719ae3e..3045efa 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
@@ -33,7 +33,7 @@ class TestHBaseMaster(RMFTestCase):
                        classname = "HbaseMaster",
                        command = "install",
                        config_file="hbase_no_phx.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        try_install=True,
                        checked_call_mocks = [(0, "OK.", ""),(0, "OK.", "")],
@@ -54,7 +54,7 @@ class TestHBaseMaster(RMFTestCase):
                        classname = "HbaseMaster",
                        command = "install",
                        config_file="hbase_with_phx.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        try_install=True,
                        checked_call_mocks = [(0, "OK.", ""),(0, "OK.", "")],
@@ -73,7 +73,7 @@ class TestHBaseMaster(RMFTestCase):
                    classname = "HbaseMaster",
                    command = "configure",
                    config_file="default.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     
@@ -85,7 +85,7 @@ class TestHBaseMaster(RMFTestCase):
                    classname = "HbaseMaster",
                    command = "start",
                    config_file="default.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     
@@ -101,7 +101,7 @@ class TestHBaseMaster(RMFTestCase):
                    classname = "HbaseMaster",
                    command = "stop",
                    config_file="default.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     
@@ -122,7 +122,7 @@ class TestHBaseMaster(RMFTestCase):
                        classname = "HbaseMaster",
                        command = "decommission",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -153,7 +153,7 @@ class TestHBaseMaster(RMFTestCase):
                        classname = "HbaseMaster",
                        command = "decommission",
                        config_file="default.hbasedecom.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -172,7 +172,7 @@ class TestHBaseMaster(RMFTestCase):
                    classname = "HbaseMaster",
                    command = "configure",
                    config_file="secured.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     
@@ -184,7 +184,7 @@ class TestHBaseMaster(RMFTestCase):
                    classname = "HbaseMaster",
                    command = "start",
                    config_file="secured.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     
@@ -200,7 +200,7 @@ class TestHBaseMaster(RMFTestCase):
                    classname = "HbaseMaster",
                    command = "stop",
                    config_file="secured.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -221,7 +221,7 @@ class TestHBaseMaster(RMFTestCase):
                        classname = "HbaseMaster",
                        command = "decommission",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -517,7 +517,7 @@ class TestHBaseMaster(RMFTestCase):
                    classname = "HbaseMaster",
                    command = "start",
                    config_file="hbase-2.2.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES)
     
     self.assertResourceCalled('Directory', '/etc/hbase',
@@ -701,7 +701,7 @@ class TestHBaseMaster(RMFTestCase):
                    classname = "HbaseMaster",
                    command = "security_status",
                    config_file="secured.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -723,7 +723,7 @@ class TestHBaseMaster(RMFTestCase):
                    classname = "HbaseMaster",
                    command = "security_status",
                    config_file="secured.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
       )
     except:
@@ -740,7 +740,7 @@ class TestHBaseMaster(RMFTestCase):
                    classname = "HbaseMaster",
                    command = "security_status",
                    config_file="secured.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
@@ -758,7 +758,7 @@ class TestHBaseMaster(RMFTestCase):
                    classname = "HbaseMaster",
                    command = "security_status",
                    config_file="default.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
@@ -768,7 +768,7 @@ class TestHBaseMaster(RMFTestCase):
                    classname = "HbaseMaster",
                    command = "security_status",
                    config_file="secured.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
@@ -778,7 +778,7 @@ class TestHBaseMaster(RMFTestCase):
                    classname = "HbaseMasterUpgrade",
                    command = "take_snapshot",
                    config_file="hbase-preupgrade.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assertResourceCalled('Execute', " echo 'snapshot_all' | /usr/hdp/current/hbase-client/bin/hbase shell",
@@ -801,7 +801,7 @@ class TestHBaseMaster(RMFTestCase):
                        classname = "HbaseMaster",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        mocks_dict = mocks_dict)
     self.assertResourceCalled('Execute',
@@ -824,7 +824,7 @@ class TestHBaseMaster(RMFTestCase):
                        classname = "HbaseMaster",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None, ''), (0, None, ''), (0, None, '')],
                        mocks_dict = mocks_dict)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
index 9c846c8..8d187ec 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
@@ -33,7 +33,7 @@ class TestHbaseRegionServer(RMFTestCase):
                    classname = "HbaseRegionServer",
                    command = "configure",
                    config_file="default.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     
@@ -45,7 +45,7 @@ class TestHbaseRegionServer(RMFTestCase):
                    classname = "HbaseRegionServer",
                    command = "start",
                    config_file="default.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     
@@ -61,7 +61,7 @@ class TestHbaseRegionServer(RMFTestCase):
                    classname = "HbaseRegionServer",
                    command = "stop",
                    config_file="default.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     
@@ -82,7 +82,7 @@ class TestHbaseRegionServer(RMFTestCase):
                    classname = "HbaseRegionServer",
                    command = "configure",
                    config_file="secured.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     
@@ -94,7 +94,7 @@ class TestHbaseRegionServer(RMFTestCase):
                    classname = "HbaseRegionServer",
                    command = "start",
                    config_file="secured.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     
@@ -110,7 +110,7 @@ class TestHbaseRegionServer(RMFTestCase):
                    classname = "HbaseRegionServer",
                    command = "stop",
                    config_file="secured.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -327,7 +327,7 @@ class TestHbaseRegionServer(RMFTestCase):
                    classname = "HbaseRegionServer",
                    command = "start",
                    config_file="hbase-rs-2.2.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES)
     
     self.assertResourceCalled('Directory', '/etc/hbase',
@@ -441,7 +441,7 @@ class TestHbaseRegionServer(RMFTestCase):
                    classname = "HbaseRegionServer",
                    command = "start",
                    config_file="hbase-rs-2.2-phoenix.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assertResourceCalled('Directory', '/etc/hbase',
@@ -586,7 +586,7 @@ class TestHbaseRegionServer(RMFTestCase):
                    classname = "HbaseRegionServer",
                    command = "security_status",
                    config_file="secured.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -608,7 +608,7 @@ class TestHbaseRegionServer(RMFTestCase):
                    classname = "HbaseRegionServer",
                    command = "security_status",
                    config_file="secured.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
       )
     except:
@@ -625,7 +625,7 @@ class TestHbaseRegionServer(RMFTestCase):
                    classname = "HbaseRegionServer",
                    command = "security_status",
                    config_file="secured.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
@@ -644,7 +644,7 @@ class TestHbaseRegionServer(RMFTestCase):
                    classname = "HbaseRegionServer",
                    command = "security_status",
                    config_file="secured.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
@@ -654,7 +654,7 @@ class TestHbaseRegionServer(RMFTestCase):
                    classname = "HbaseRegionServer",
                    command = "security_status",
                    config_file="default.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
@@ -669,7 +669,7 @@ class TestHbaseRegionServer(RMFTestCase):
                        classname = "HbaseRegionServer",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
                               ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hbase-regionserver', version), sudo=True,)
@@ -687,7 +687,7 @@ class TestHbaseRegionServer(RMFTestCase):
                        classname = "HbaseRegionServer",
                        command = "post_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        call_mocks = [(0, "Dummy output c6401.ambari.apache.org:")],
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        mocks_dict = mocks_dict)
@@ -709,7 +709,7 @@ class TestHbaseRegionServer(RMFTestCase):
                        classname = "HbaseRegionServer",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None), (0, None), (0, None)],
                        mocks_dict = mocks_dict)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_service_check.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_service_check.py
index a5a360a..69a6665 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_service_check.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_service_check.py
@@ -33,7 +33,7 @@ class TestServiceCheck(RMFTestCase):
                         classname="HbaseServiceCheck",
                         command="service_check",
                         config_file="default.json",
-                        hdp_stack_version = self.STACK_VERSION,
+                        stack_version = self.STACK_VERSION,
                         target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('File', '/tmp/hbaseSmokeVerify.sh',
@@ -58,7 +58,7 @@ class TestServiceCheck(RMFTestCase):
                         classname="HbaseServiceCheck",
                         command="service_check",
                         config_file="secured.json",
-                        hdp_stack_version = self.STACK_VERSION,
+                        stack_version = self.STACK_VERSION,
                         target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('File', '/tmp/hbaseSmokeVerify.sh',
@@ -91,7 +91,7 @@ class TestServiceCheck(RMFTestCase):
                         classname="HbaseServiceCheck",
                         command="service_check",
                         config_file="hbase-check-2.2.json",
-                        hdp_stack_version = self.STACK_VERSION,
+                        stack_version = self.STACK_VERSION,
                         target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('File', '/tmp/hbaseSmokeVerify.sh',

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py
index 6c3ad89..9f52b57 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_phoenix_queryserver.py
@@ -38,7 +38,7 @@ class TestPhoenixQueryServer(RMFTestCase):
       classname = "PhoenixQueryServer",
       command = "configure",
       config_file = "hbase_default.json",
-      hdp_stack_version = self.STACK_VERSION,
+      stack_version = self.STACK_VERSION,
       target = RMFTestCase.TARGET_COMMON_SERVICES,
       call_mocks = [(0, None, None)]
     )
@@ -52,7 +52,7 @@ class TestPhoenixQueryServer(RMFTestCase):
       classname = "PhoenixQueryServer",
       command = "start",
       config_file = "hbase_default.json",
-      hdp_stack_version = self.STACK_VERSION,
+      stack_version = self.STACK_VERSION,
       target = RMFTestCase.TARGET_COMMON_SERVICES,
       call_mocks = [(0, None, None)]
     )
@@ -71,7 +71,7 @@ class TestPhoenixQueryServer(RMFTestCase):
       classname = "PhoenixQueryServer",
       command = "stop",
       config_file = "hbase_default.json",
-      hdp_stack_version = self.STACK_VERSION,
+      stack_version = self.STACK_VERSION,
       target = RMFTestCase.TARGET_COMMON_SERVICES,
       call_mocks = [(0, None, None)]
     )
@@ -98,7 +98,7 @@ class TestPhoenixQueryServer(RMFTestCase):
       classname = "PhoenixQueryServer",
       command = "configure",
       config_file = "hbase_secure.json",
-      hdp_stack_version = self.STACK_VERSION,
+      stack_version = self.STACK_VERSION,
       target = RMFTestCase.TARGET_COMMON_SERVICES,
       call_mocks = [(0, None, None)]
     )
@@ -112,7 +112,7 @@ class TestPhoenixQueryServer(RMFTestCase):
       classname = "PhoenixQueryServer",
       command = "start",
       config_file = "hbase_secure.json",
-      hdp_stack_version = self.STACK_VERSION,
+      stack_version = self.STACK_VERSION,
       target = RMFTestCase.TARGET_COMMON_SERVICES,
       call_mocks = [(0, None, None)]
     )
@@ -131,7 +131,7 @@ class TestPhoenixQueryServer(RMFTestCase):
       classname = "PhoenixQueryServer",
       command = "stop",
       config_file = "hbase_secure.json",
-      hdp_stack_version = self.STACK_VERSION,
+      stack_version = self.STACK_VERSION,
       target = RMFTestCase.TARGET_COMMON_SERVICES,
       call_mocks = [(0, None, None)]
     )
@@ -164,7 +164,7 @@ class TestPhoenixQueryServer(RMFTestCase):
       classname = "PhoenixQueryServer",
       command = "start",
       config_file = "hbase-rs-2.4.json",
-      hdp_stack_version = self.STACK_VERSION,
+      stack_version = self.STACK_VERSION,
       target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assertResourceCalled('Directory', '/etc/hbase',
@@ -470,7 +470,7 @@ class TestPhoenixQueryServer(RMFTestCase):
       command = "pre_upgrade_restart",
       config_dict = json_content,
       call_mocks = [(0, "/etc/hbase/2.3.0.0-1234/0", ''), (0, None, None), (0, None, None)],
-      hdp_stack_version = self.STACK_VERSION,
+      stack_version = self.STACK_VERSION,
       target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assertResourceCalled('Directory', '/etc/hbase/2.3.0.0-1234/0',

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
index 113fb7f..f939eed 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_datanode.py
@@ -35,7 +35,7 @@ class TestDatanode(RMFTestCase):
                        classname = "DataNode",
                        command = "configure",
                        config_file = "default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -46,7 +46,7 @@ class TestDatanode(RMFTestCase):
                        classname = "DataNode",
                        command = "start",
                        config_file = "default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -79,7 +79,7 @@ class TestDatanode(RMFTestCase):
                        classname = "DataNode",
                        command = "stop",
                        config_file = "default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode'",
@@ -95,7 +95,7 @@ class TestDatanode(RMFTestCase):
                        classname = "DataNode",
                        command = "configure",
                        config_file = "secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
@@ -106,7 +106,7 @@ class TestDatanode(RMFTestCase):
                        classname = "DataNode",
                        command = "start",
                        config_file = "secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
@@ -144,7 +144,7 @@ class TestDatanode(RMFTestCase):
                        classname = "DataNode",
                        command = "start",
                        config_dict = secured_json,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured("2.2", snappy_enabled=False)
@@ -185,7 +185,7 @@ class TestDatanode(RMFTestCase):
                        classname = "DataNode",
                        command = "start",
                        config_dict = secured_json,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured("2.2", snappy_enabled=False)
@@ -218,7 +218,7 @@ class TestDatanode(RMFTestCase):
                        classname = "DataNode",
                        command = "stop",
                        config_file = "secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop datanode',
@@ -241,7 +241,7 @@ class TestDatanode(RMFTestCase):
                        classname = "DataNode",
                        command = "stop",
                        config_dict = secured_json,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E /usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config /usr/hdp/current/hadoop-client/conf stop datanode',
@@ -266,7 +266,7 @@ class TestDatanode(RMFTestCase):
                        classname = "DataNode",
                        command = "stop",
                        config_dict = secured_json,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/hdp/current/hadoop-client/sbin/hadoop-daemon.sh --config /usr/hdp/current/hadoop-client/conf stop datanode'",
@@ -437,7 +437,7 @@ class TestDatanode(RMFTestCase):
                        classname = "DataNode",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
                               ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-hdfs-datanode', version), sudo=True,)
@@ -457,7 +457,7 @@ class TestDatanode(RMFTestCase):
                        classname = "DataNode",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None)],
                        mocks_dict = mocks_dict)
@@ -505,7 +505,7 @@ class TestDatanode(RMFTestCase):
                        classname = "DataNode",
                        command = "post_upgrade_restart",
                        config_file = "default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, shell_call_output)],
                        mocks_dict = mocks_dict
@@ -525,7 +525,7 @@ class TestDatanode(RMFTestCase):
                          classname = "DataNode",
                          command = "post_upgrade_restart",
                          config_file = "default.json",
-                         hdp_stack_version = self.STACK_VERSION,
+                         stack_version = self.STACK_VERSION,
                          target = RMFTestCase.TARGET_COMMON_SERVICES,
                          call_mocks = [(0, 'There are no DataNodes here!')],
                          mocks_dict = mocks_dict
@@ -546,7 +546,7 @@ class TestDatanode(RMFTestCase):
                          classname = "DataNode",
                          command = "post_upgrade_restart",
                          config_file = "default.json",
-                         hdp_stack_version = self.STACK_VERSION,
+                         stack_version = self.STACK_VERSION,
                          target = RMFTestCase.TARGET_COMMON_SERVICES,
                          call_mocks = [(0, 'some')],
                          mocks_dict = mocks_dict
@@ -574,7 +574,7 @@ class TestDatanode(RMFTestCase):
         classname = "DataNode",
         command = "stop",
         config_dict = json_content,
-        hdp_stack_version = self.STACK_VERSION,
+        stack_version = self.STACK_VERSION,
         target = RMFTestCase.TARGET_COMMON_SERVICES,
         call_mocks = call_mock_side_effects,
         command_args=["rolling"])
@@ -604,7 +604,7 @@ class TestDatanode(RMFTestCase):
                          classname = "DataNode",
                          command = "stop",
                          config_dict = json_content,
-                         hdp_stack_version = self.STACK_VERSION,
+                         stack_version = self.STACK_VERSION,
                          target = RMFTestCase.TARGET_COMMON_SERVICES,
                          call_mocks = call_mock_side_effects,
                          command_args=["rolling"])
@@ -649,7 +649,7 @@ class TestDatanode(RMFTestCase):
                        classname = "DataNode",
                        command = "security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -669,7 +669,7 @@ class TestDatanode(RMFTestCase):
                        classname = "DataNode",
                        command = "security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -685,7 +685,7 @@ class TestDatanode(RMFTestCase):
                          classname = "DataNode",
                          command = "security_status",
                          config_file="secured.json",
-                         hdp_stack_version = self.STACK_VERSION,
+                         stack_version = self.STACK_VERSION,
                          target = RMFTestCase.TARGET_COMMON_SERVICES
       )
     except:
@@ -704,7 +704,7 @@ class TestDatanode(RMFTestCase):
                        classname = "DataNode",
                        command = "security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -723,7 +723,7 @@ class TestDatanode(RMFTestCase):
                        classname = "DataNode",
                        command = "security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py
index 1e3057f..b5b43d6 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py
@@ -40,7 +40,7 @@ class Test(RMFTestCase):
                        classname = "HdfsClient",
                        command = "generate_configs",
                        config_file = "default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Directory', '/tmp',
@@ -79,7 +79,7 @@ class Test(RMFTestCase):
                    classname = "HdfsClient",
                    command = "restart",
                    config_file="client-upgrade.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-client', '2.2.1.0-2067'), sudo=True)
@@ -114,7 +114,7 @@ class Test(RMFTestCase):
                        classname = "HdfsClient",
                        command = "security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -136,7 +136,7 @@ class Test(RMFTestCase):
                          classname = "HdfsClient",
                          command = "security_status",
                          config_file="secured.json",
-                         hdp_stack_version = self.STACK_VERSION,
+                         stack_version = self.STACK_VERSION,
                          target = RMFTestCase.TARGET_COMMON_SERVICES
       )
     except:
@@ -149,7 +149,7 @@ class Test(RMFTestCase):
                        classname = "HdfsClient",
                        command = "security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -170,7 +170,7 @@ class Test(RMFTestCase):
                        classname = "HdfsClient",
                        command = "security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
@@ -180,7 +180,7 @@ class Test(RMFTestCase):
                        classname = "HdfsClient",
                        command = "security_status",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
@@ -199,7 +199,7 @@ class Test(RMFTestCase):
                        classname = "HdfsClient",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None)],
                        mocks_dict = mocks_dict)
@@ -226,7 +226,7 @@ class Test(RMFTestCase):
                        classname = "HdfsClient",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalledIgnoreEarlier('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-client', version), sudo=True,)
     self.assertNoMoreResources()

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
index 417921a..4d91897 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_journalnode.py
@@ -33,7 +33,7 @@ class TestJournalnode(RMFTestCase):
                        classname = "JournalNode",
                        command = "configure",
                        config_file = "default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -44,7 +44,7 @@ class TestJournalnode(RMFTestCase):
                        classname = "JournalNode",
                        command = "start",
                        config_file = "default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -76,7 +76,7 @@ class TestJournalnode(RMFTestCase):
                        classname = "JournalNode",
                        command = "stop",
                        config_file = "default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop journalnode'",
@@ -91,7 +91,7 @@ class TestJournalnode(RMFTestCase):
                        classname = "JournalNode",
                        command = "configure",
                        config_file = "secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
@@ -102,7 +102,7 @@ class TestJournalnode(RMFTestCase):
                        classname = "JournalNode",
                        command = "start",
                        config_file = "secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
@@ -134,7 +134,7 @@ class TestJournalnode(RMFTestCase):
                        classname = "JournalNode",
                        command = "stop",
                        config_file = "secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', "ambari-sudo.sh su hdfs -l -s /bin/bash -c '[RMF_EXPORT_PLACEHOLDER]ulimit -c unlimited ;  /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf stop journalnode'",
@@ -286,7 +286,7 @@ class TestJournalnode(RMFTestCase):
            classname = "JournalNode", command = "post_upgrade_restart",
            config_file = "journalnode-upgrade.json",
            checked_call_mocks = [(0, str(namenode_status_active)), (0, str(namenode_status_standby))],
-           hdp_stack_version = self.UPGRADE_STACK_VERSION,
+           stack_version = self.UPGRADE_STACK_VERSION,
            target = RMFTestCase.TARGET_COMMON_SERVICES )
 
     # ensure that the mock was called with the http-style version of the URL
@@ -305,7 +305,7 @@ class TestJournalnode(RMFTestCase):
            classname = "JournalNode", command = "post_upgrade_restart",
            config_file = "journalnode-upgrade-hdfs-secure.json",
            checked_call_mocks = [(0, str(namenode_status_active)), (0, str(namenode_status_standby))],
-           hdp_stack_version = self.UPGRADE_STACK_VERSION,
+           stack_version = self.UPGRADE_STACK_VERSION,
            target = RMFTestCase.TARGET_COMMON_SERVICES )
 
     # ensure that the mock was called with the http-style version of the URL
@@ -327,7 +327,7 @@ class TestJournalnode(RMFTestCase):
       self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/journalnode.py",
         classname = "JournalNode", command = "post_upgrade_restart",
         config_file = "journalnode-upgrade.json",
-        hdp_stack_version = self.UPGRADE_STACK_VERSION,
+        stack_version = self.UPGRADE_STACK_VERSION,
         target = RMFTestCase.TARGET_COMMON_SERVICES )
 
       self.fail("Expected a failure since the JMX JSON for JournalTransactionInfo was missing")
@@ -365,7 +365,7 @@ class TestJournalnode(RMFTestCase):
                        classname = "JournalNode",
                        command = "security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -385,7 +385,7 @@ class TestJournalnode(RMFTestCase):
                        classname = "JournalNode",
                        command = "security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -401,7 +401,7 @@ class TestJournalnode(RMFTestCase):
                          classname = "JournalNode",
                          command = "security_status",
                          config_file="secured.json",
-                         hdp_stack_version = self.STACK_VERSION,
+                         stack_version = self.STACK_VERSION,
                          target = RMFTestCase.TARGET_COMMON_SERVICES
       )
     except:
@@ -422,7 +422,7 @@ class TestJournalnode(RMFTestCase):
                        classname = "JournalNode",
                        command = "security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -442,7 +442,7 @@ class TestJournalnode(RMFTestCase):
                        classname = "JournalNode",
                        command = "security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
@@ -458,7 +458,7 @@ class TestJournalnode(RMFTestCase):
                        classname = "JournalNode",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-hdfs-journalnode', version), sudo=True,)
     self.assertNoMoreResources()
@@ -476,7 +476,7 @@ class TestJournalnode(RMFTestCase):
                        classname = "JournalNode",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None)],
                        mocks_dict = mocks_dict)


[21/51] [abbrv] ambari git commit: AMBARI-15329: Code Cleanup: Remove hdp hardcodings in functions, variables etc. (jluniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/spark_thrift_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/spark_thrift_server.py b/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/spark_thrift_server.py
index 86b4010..1220090 100644
--- a/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/spark_thrift_server.py
+++ b/ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/spark_thrift_server.py
@@ -23,8 +23,8 @@ import os
 
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
 from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
 from resource_management.libraries.functions.check_process_status import check_process_status
 from resource_management.core.logger import Logger
@@ -70,10 +70,10 @@ class SparkThriftServer(Script):
     import params
 
     env.set_params(params)
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.3.2.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), '2.3.2.0') >= 0:
       Logger.info("Executing Spark Thrift Server Stack Upgrade pre-restart")
       conf_select.select(params.stack_name, "spark", params.version)
-      hdp_select.select("spark-thriftserver", params.version)
+      stack_select.select("spark-thriftserver", params.version)
 
 if __name__ == "__main__":
   SparkThriftServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/params_linux.py
index 0006078..6218fe0 100644
--- a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/params_linux.py
@@ -17,7 +17,7 @@ limitations under the License.
 
 """
 
-from resource_management.libraries.functions.version import format_hdp_stack_version
+from resource_management.libraries.functions.version import format_stack_version
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions.get_kinit_path import get_kinit_path
 from resource_management.libraries.script import Script
@@ -40,7 +40,7 @@ ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
 stack_name = default("/hostLevelParams/stack_name", None)
 
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
+stack_version_formatted = format_stack_version(stack_version_unformatted)
 
 # New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
 version = default("/commandParams/version", None)
@@ -55,7 +55,7 @@ sqoop_bin_dir = "/usr/bin"
 zoo_conf_dir = "/etc/zookeeper"
 
 # HDP 2.2+ params
-if Script.is_hdp_stack_greater_or_equal("2.2"):
+if Script.is_stack_greater_or_equal("2.2"):
   sqoop_conf_dir = '/usr/hdp/current/sqoop-client/conf'
   sqoop_lib = '/usr/hdp/current/sqoop-client/lib'
   hadoop_home = '/usr/hdp/current/hbase-client'

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/sqoop_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/sqoop_client.py b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/sqoop_client.py
index fa4d07d..06ffae4 100644
--- a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/sqoop_client.py
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/sqoop_client.py
@@ -22,9 +22,9 @@ from resource_management.core.exceptions import ClientComponentHasNoStatus
 from resource_management.core.resources.system import Execute
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
 from sqoop import sqoop
 from ambari_commons.os_family_impl import OsFamilyImpl
 from ambari_commons import OSConst
@@ -51,9 +51,9 @@ class SqoopClientDefault(SqoopClient):
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "sqoop", params.version)
-      hdp_select.select("sqoop-client", params.version)
+      stack_select.select("sqoop-client", params.version)
 
 
 @OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/drpc_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/drpc_server.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/drpc_server.py
index 7dfa0b9..950a93e 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/drpc_server.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/drpc_server.py
@@ -22,10 +22,10 @@ import sys
 from resource_management.libraries.functions import check_process_status
 from resource_management.libraries.script import Script
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import format
 from resource_management.core.resources.system import Execute
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
 from storm import storm
 from service import service
 from service_check import ServiceCheck
@@ -52,9 +52,9 @@ class DrpcServer(Script):
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "storm", params.version)
-      hdp_select.select("storm-client", params.version)
+      stack_select.select("storm-client", params.version)
 
   def start(self, env, upgrade_type=None):
     import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/nimbus.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/nimbus.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/nimbus.py
index 5bad428..df2db1c 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/nimbus.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/nimbus.py
@@ -23,9 +23,9 @@ from resource_management.libraries.functions import check_process_status
 from resource_management.libraries.script import Script
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.core.resources.system import Execute
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
 from storm import storm
 from service import service
 from resource_management.libraries.functions.security_commons import build_expectations, \
@@ -56,10 +56,10 @@ class NimbusDefault(Nimbus):
   def pre_upgrade_restart(self, env, upgrade_type=None):
     import params
     env.set_params(params)
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "storm", params.version)
-      hdp_select.select("storm-client", params.version)
-      hdp_select.select("storm-nimbus", params.version)
+      stack_select.select("storm-client", params.version)
+      stack_select.select("storm-nimbus", params.version)
 
 
   def start(self, env, upgrade_type=None):

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/nimbus_prod.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/nimbus_prod.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/nimbus_prod.py
index 75125dd..1197650 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/nimbus_prod.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/nimbus_prod.py
@@ -23,10 +23,10 @@ from resource_management.libraries.script import Script
 from storm import storm
 from supervisord_service import supervisord_service, supervisord_check_status
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import format
 from resource_management.core.resources.system import Execute
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
 
 class Nimbus(Script):
 
@@ -47,10 +47,10 @@ class Nimbus(Script):
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "storm", params.version)
-      hdp_select.select("storm-client", params.version)
-      hdp_select.select("storm-nimbus", params.version)
+      stack_select.select("storm-client", params.version)
+      stack_select.select("storm-nimbus", params.version)
 
   def start(self, env, upgrade_type=None):
     import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_linux.py
index 33615ba..89ffcad 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_linux.py
@@ -25,12 +25,12 @@ import status_params
 from ambari_commons.constants import AMBARI_SUDO_BINARY
 from resource_management.libraries.functions.constants import Direction
 from resource_management.libraries.functions import format
-from resource_management.libraries.functions.version import format_hdp_stack_version
+from resource_management.libraries.functions.version import format_stack_version
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions.get_bare_principal import get_bare_principal
 from resource_management.libraries.script import Script
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import conf_select
 from resource_management.libraries.functions import get_kinit_path
 
@@ -47,8 +47,8 @@ storm_component_home_dir = status_params.storm_component_home_dir
 conf_dir = status_params.conf_dir
 
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
-stack_is_hdp22_or_further = Script.is_hdp_stack_greater_or_equal("2.2")
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+stack_is_hdp22_or_further = Script.is_stack_greater_or_equal("2.2")
 
 # default hadoop params
 rest_lib_dir = "/usr/lib/storm/contrib/storm-rest"
@@ -285,7 +285,7 @@ hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab'] if
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name'] if has_namenode else None
 hdfs_site = config['configurations']['hdfs-site'] if has_namenode else None
 default_fs = config['configurations']['core-site']['fs.defaultFS'] if has_namenode else None
-hadoop_bin_dir = hdp_select.get_hadoop_dir("bin") if has_namenode else None
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin") if has_namenode else None
 hadoop_conf_dir = conf_select.get_hadoop_conf_dir() if has_namenode else None
 kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_windows.py
index 3408bfb..90bc76d 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_windows.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_windows.py
@@ -25,7 +25,7 @@ from resource_management.libraries.functions.default import default
 # server configurations
 config = Script.get_config()
 
-stack_is_hdp23_or_further = Script.is_hdp_stack_greater_or_equal("2.3")
+stack_is_hdp23_or_further = Script.is_stack_greater_or_equal("2.3")
 
 hdp_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"],".."))
 conf_dir = os.environ["STORM_CONF_DIR"]

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/rest_api.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/rest_api.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/rest_api.py
index eb81301..07065c2 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/rest_api.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/rest_api.py
@@ -22,10 +22,10 @@ import sys
 from resource_management.libraries.functions import check_process_status
 from resource_management.libraries.script import Script
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import format
 from resource_management.core.resources.system import Execute
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
 
 from storm import storm
 from service import service

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/status_params.py
index 49dee47..984a4ba 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/status_params.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/status_params.py
@@ -66,7 +66,7 @@ else:
 
   storm_component_home_dir = "/usr/lib/storm"
   conf_dir = "/etc/storm/conf"
-  if Script.is_hdp_stack_greater_or_equal("2.2"):
+  if Script.is_stack_greater_or_equal("2.2"):
     storm_component_home_dir = format("/usr/hdp/current/{component_directory}")
     conf_dir = format("/usr/hdp/current/{component_directory}/conf")
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/storm.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/storm.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/storm.py
index 43736fa..a15b500 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/storm.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/storm.py
@@ -131,7 +131,7 @@ def storm(name=None):
     TemplateConfig(format("{conf_dir}/storm_jaas.conf"),
                    owner=params.storm_user
     )
-    if params.hdp_stack_version != "" and compare_versions(params.hdp_stack_version, '2.2') >= 0:
+    if params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.2') >= 0:
       TemplateConfig(format("{conf_dir}/client_jaas.conf"),
                      owner=params.storm_user
       )

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/supervisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/supervisor.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/supervisor.py
index 33f06b1..99279be 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/supervisor.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/supervisor.py
@@ -22,10 +22,10 @@ import sys
 from resource_management.libraries.functions import check_process_status
 from resource_management.libraries.script import Script
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import format
 from resource_management.core.resources.system import Execute
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
 from storm import storm
 from service import service
 from ambari_commons import OSConst
@@ -74,10 +74,10 @@ class SupervisorDefault(Supervisor):
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "storm", params.version)
-      hdp_select.select("storm-client", params.version)
-      hdp_select.select("storm-supervisor", params.version)
+      stack_select.select("storm-client", params.version)
+      stack_select.select("storm-supervisor", params.version)
 
   def start(self, env, upgrade_type=None):
     import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/supervisor_prod.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/supervisor_prod.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/supervisor_prod.py
index 3efb9f4..48f65ba 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/supervisor_prod.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/supervisor_prod.py
@@ -24,10 +24,10 @@ from service import service
 from supervisord_service import supervisord_service, supervisord_check_status
 from resource_management.libraries.script import Script
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import format
 from resource_management.core.resources.system import Execute
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
 
 
 class Supervisor(Script):
@@ -48,10 +48,10 @@ class Supervisor(Script):
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "storm", params.version)
-      hdp_select.select("storm-client", params.version)
-      hdp_select.select("storm-supervisor", params.version)
+      stack_select.select("storm-client", params.version)
+      stack_select.select("storm-supervisor", params.version)
 
   def start(self, env, upgrade_type=None):
     import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/ui_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/ui_server.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/ui_server.py
index 3053b05..6c2a5b7 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/ui_server.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/ui_server.py
@@ -25,11 +25,11 @@ from service_check import ServiceCheck
 from resource_management.libraries.functions import check_process_status
 from resource_management.libraries.script import Script
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import format
 from resource_management.core.resources.system import Link
 from resource_management.core.resources.system import Execute
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
 from resource_management.libraries.functions.security_commons import build_expectations, \
   cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
   FILE_TYPE_JAAS_CONF
@@ -79,9 +79,9 @@ class UiServerDefault(UiServer):
   def pre_upgrade_restart(self, env, upgrade_type=None):
     import params
     env.set_params(params)
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "storm", params.version)
-      hdp_select.select("storm-client", params.version)
+      stack_select.select("storm-client", params.version)
 
   def link_metrics_sink_jar(self):
     # Add storm metrics reporter JAR to storm-ui-server classpath.

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/params_linux.py
index da52556..cc87973 100644
--- a/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/params_linux.py
@@ -21,8 +21,8 @@ import os
 
 from resource_management.libraries.resources import HdfsResource
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
-from resource_management.libraries.functions.version import format_hdp_stack_version
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.version import format_stack_version
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions import get_kinit_path
 from resource_management.libraries.script.script import Script
@@ -35,25 +35,25 @@ stack_name = default("/hostLevelParams/stack_name", None)
 
 # This is expected to be of the form #.#.#.#
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
+stack_version_formatted = format_stack_version(stack_version_unformatted)
 
 # New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
 version = default("/commandParams/version", None)
 
 # default hadoop parameters
 hadoop_home = '/usr'
-hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
 hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
 tez_etc_dir = "/etc/tez"
 config_dir = "/etc/tez/conf"
 tez_examples_jar = "/usr/lib/tez/tez-mapreduce-examples*.jar"
 
 # hadoop parameters for 2.2+
-if Script.is_hdp_stack_greater_or_equal("2.2"):
+if Script.is_stack_greater_or_equal("2.2"):
   tez_examples_jar = "/usr/hdp/current/tez-client/tez-examples*.jar"
 
 # tez only started linking /usr/hdp/x.x.x.x/tez-client/conf in HDP 2.3+
-if Script.is_hdp_stack_greater_or_equal("2.3"):
+if Script.is_stack_greater_or_equal("2.3"):
   # !!! use realpath for now since the symlink exists but is broken and a
   # broken symlink messes with the DirectoryProvider class
   config_dir = os.path.realpath("/usr/hdp/current/tez-client/conf")

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/params_windows.py
index 636d092..ad80830 100644
--- a/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/params_windows.py
+++ b/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/params_windows.py
@@ -19,7 +19,7 @@ limitations under the License.
 
 import os
 
-from resource_management.libraries.functions.get_hdp_version import get_hdp_version
+from resource_management.libraries.functions.get_stack_version import get_stack_version
 from resource_management.libraries.script.script import Script
 
 config = Script.get_config()
@@ -34,7 +34,7 @@ try:
 except KeyError:
   hadoop_classpath_prefix_template = ""
 
-hdp_stack_version = ""
+stack_version_formatted = ""
 
 hdp_root = None
 try:
@@ -43,11 +43,11 @@ except:
   pass
 
 def refresh_tez_state_dependent_params():
-  global tez_home_dir, tez_conf_dir, hdp_stack_version
+  global tez_home_dir, tez_conf_dir, stack_version_formatted
   tez_home_dir = os.environ["TEZ_HOME"]
   tez_conf_dir = os.path.join(tez_home_dir, "conf")
   # this is not available on INSTALL action because hdp-select is not available
-  hdp_stack_version = get_hdp_version("tez")
+  stack_version_formatted = get_stack_version("tez")
 
 
 if os.environ.has_key("TEZ_HOME"):

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/pre_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/pre_upgrade.py b/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/pre_upgrade.py
index a38d917..1faedf9 100644
--- a/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/pre_upgrade.py
+++ b/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/pre_upgrade.py
@@ -38,8 +38,8 @@ class TezPreUpgrade(Script):
 
     Logger.info("Before starting Stack Upgrade, check if tez tarball has been copied to HDFS.")
 
-    if params.hdp_stack_version and compare_versions(params.hdp_stack_version, '2.2.0.0') >= 0:
-      Logger.info("Stack version {0} is sufficient to check if need to copy tez.tar.gz to HDFS.".format(params.hdp_stack_version))
+    if params.stack_version_formatted and compare_versions(params.stack_version_formatted, '2.2.0.0') >= 0:
+      Logger.info("Stack version {0} is sufficient to check if need to copy tez.tar.gz to HDFS.".format(params.stack_version_formatted))
 
       # Force it to copy the current version of the tez tarball, rather than the version the RU will go to.
       resource_created = copy_to_hdfs(

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/service_check.py
index 1747cf3..b70256c 100644
--- a/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/service_check.py
@@ -67,7 +67,7 @@ class TezServiceCheckLinux(TezServiceCheck):
       source = format("{tmp_dir}/sample-tez-test"),
     )
 
-    if params.hdp_stack_version and compare_versions(params.hdp_stack_version, '2.2.0.0') >= 0:
+    if params.stack_version_formatted and compare_versions(params.stack_version_formatted, '2.2.0.0') >= 0:
       copy_to_hdfs("tez", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
 
     params.HdfsResource(None, action = "execute")

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/tez_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/tez_client.py b/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/tez_client.py
index 8bdabf1..e770d9b 100644
--- a/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/tez_client.py
+++ b/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/tez_client.py
@@ -29,9 +29,9 @@ from ambari_commons.os_utils import copy_file, extract_path_component
 from resource_management.core.exceptions import ClientComponentHasNoStatus
 from resource_management.core.source import InlineTemplate
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
-from resource_management.libraries.functions.get_hdp_version import get_hdp_version
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.get_stack_version import get_stack_version
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
 from resource_management.libraries.script.script import Script
 
 from tez import tez
@@ -55,10 +55,10 @@ class TezClientLinux(TezClient):
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "tez", params.version)
       conf_select.select(params.stack_name, "hadoop", params.version)
-      hdp_select.select("hadoop-client", params.version)
+      stack_select.select("hadoop-client", params.version)
 
   def install(self, env):
     self.install_packages(env)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/application_timeline_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/application_timeline_server.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/application_timeline_server.py
index 7644225..2966581 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/application_timeline_server.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/application_timeline_server.py
@@ -21,8 +21,8 @@ Ambari Agent
 
 from resource_management import *
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
 from resource_management.libraries.functions.security_commons import build_expectations, \
   cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties,\
   FILE_TYPE_XML
@@ -70,9 +70,9 @@ class ApplicationTimelineServerDefault(ApplicationTimelineServer):
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
-      hdp_select.select("hadoop-yarn-timelineserver", params.version)
+      stack_select.select("hadoop-yarn-timelineserver", params.version)
 
   def status(self, env):
     import status_params

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py
index 5d95c5c..53b0e53 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/historyserver.py
@@ -22,10 +22,10 @@ Ambari Agent
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions.check_process_status import check_process_status
 from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
 from resource_management.libraries.functions.format import format
 from resource_management.libraries.functions.security_commons import build_expectations, \
   cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
@@ -77,9 +77,9 @@ class HistoryServerDefault(HistoryServer):
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
-      hdp_select.select("hadoop-mapreduce-historyserver", params.version)
+      stack_select.select("hadoop-mapreduce-historyserver", params.version)
       # MC Hammer said, "Can't touch this"
       copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
       copy_to_hdfs("tez", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
@@ -91,7 +91,7 @@ class HistoryServerDefault(HistoryServer):
     env.set_params(params)
     self.configure(env) # FOR SECURITY
 
-    if params.hdp_stack_version_major and compare_versions(params.hdp_stack_version_major, '2.2.0.0') >= 0:
+    if params.stack_version_formatted_major and compare_versions(params.stack_version_formatted_major, '2.2.0.0') >= 0:
       # MC Hammer said, "Can't touch this"
       resource_created = copy_to_hdfs(
         "mapreduce",

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/mapreduce2_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/mapreduce2_client.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/mapreduce2_client.py
index 7ceadf0..9fc1e32 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/mapreduce2_client.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/mapreduce2_client.py
@@ -22,7 +22,7 @@ Ambari Agent
 import sys
 from resource_management import *
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from yarn import yarn
 from ambari_commons import OSConst
 from ambari_commons.os_family_impl import OsFamilyImpl
@@ -56,9 +56,9 @@ class MapReduce2ClientDefault(MapReduce2Client):
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
-      hdp_select.select("hadoop-client", params.version)
+      stack_select.select("hadoop-client", params.version)
 
 
 if __name__ == "__main__":

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/nodemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/nodemanager.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/nodemanager.py
index d508d55..fd14d0f 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/nodemanager.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/nodemanager.py
@@ -23,8 +23,8 @@ import nodemanager_upgrade
 
 from resource_management import *
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
 from resource_management.libraries.functions.format import format
 from resource_management.libraries.functions.security_commons import build_expectations, \
   cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
@@ -72,9 +72,9 @@ class NodemanagerDefault(Nodemanager):
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
-      hdp_select.select("hadoop-yarn-nodemanager", params.version)
+      stack_select.select("hadoop-yarn-nodemanager", params.version)
 
   def post_upgrade_restart(self, env, upgrade_type=None):
     Logger.info("Executing NodeManager Stack Upgrade post-restart")

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
index 0ca632f..e02a55d 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
@@ -23,10 +23,10 @@ import os
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions import get_kinit_path
-from resource_management.libraries.functions.version import format_hdp_stack_version
+from resource_management.libraries.functions.version import format_stack_version
 from resource_management.libraries.functions.default import default
 from resource_management.libraries import functions
 
@@ -54,8 +54,8 @@ stack_name = default("/hostLevelParams/stack_name", None)
 
 # This is expected to be of the form #.#.#.#
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-hdp_stack_version_major = format_hdp_stack_version(stack_version_unformatted)
-hdp_stack_version = functions.get_hdp_version('hadoop-yarn-resourcemanager')
+stack_version_formatted_major = format_stack_version(stack_version_unformatted)
+stack_version_formatted = functions.get_stack_version('hadoop-yarn-resourcemanager')
 
 # New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade.
 # It cannot be used during the initial Cluser Install because the version is not yet known.
@@ -64,9 +64,9 @@ version = default("/commandParams/version", None)
 hostname = config['hostname']
 
 # hadoop default parameters
-hadoop_libexec_dir = hdp_select.get_hadoop_dir("libexec")
-hadoop_bin = hdp_select.get_hadoop_dir("sbin")
-hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")
+hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
+hadoop_bin = stack_select.get_hadoop_dir("sbin")
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
 hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
 hadoop_yarn_home = '/usr/lib/hadoop-yarn'
 hadoop_mapred2_jar_location = "/usr/lib/hadoop-mapreduce"
@@ -76,7 +76,7 @@ yarn_container_bin = "/usr/lib/hadoop-yarn/bin"
 hadoop_java_io_tmpdir = os.path.join(tmp_dir, "hadoop_java_io_tmpdir")
 
 # hadoop parameters for 2.2+
-if Script.is_hdp_stack_greater_or_equal("2.2"):
+if Script.is_stack_greater_or_equal("2.2"):
   # MapR directory root
   mapred_role_root = "hadoop-mapreduce-client"
   command_role = default("/role", "")

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
index d40abff..e51ca8a 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/resourcemanager.py
@@ -21,10 +21,10 @@ Ambari Agent
 
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions.check_process_status import check_process_status
 from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
 from resource_management.libraries.functions.security_commons import build_expectations, \
   cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
   FILE_TYPE_XML
@@ -105,9 +105,9 @@ class ResourcemanagerDefault(Resourcemanager):
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
-      hdp_select.select("hadoop-yarn-resourcemanager", params.version)
+      stack_select.select("hadoop-yarn-resourcemanager", params.version)
 
   def start(self, env, upgrade_type=None):
     import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service_check.py
index cd4d558..d82b630 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/service_check.py
@@ -85,7 +85,7 @@ class ServiceCheckDefault(ServiceCheck):
     import params
     env.set_params(params)
 
-    if params.hdp_stack_version_major != "" and compare_versions(params.hdp_stack_version_major, '2.2') >= 0:
+    if params.stack_version_formatted_major != "" and compare_versions(params.stack_version_formatted_major, '2.2') >= 0:
       path_to_distributed_shell_jar = "/usr/hdp/current/hadoop-yarn-client/hadoop-yarn-applications-distributedshell.jar"
     else:
       path_to_distributed_shell_jar = "/usr/lib/hadoop-yarn/hadoop-yarn-applications-distributedshell*.jar"

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn.py
index edfbe44..e05ed60 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn.py
@@ -253,7 +253,7 @@ def yarn(name = None):
     )
 
     # if HDP stack is greater than/equal to 2.2, mkdir for state store property (added in 2.2)
-    if (Script.is_hdp_stack_greater_or_equal("2.2")):
+    if (Script.is_stack_greater_or_equal("2.2")):
       Directory(params.ats_leveldb_state_store_dir,
        owner=params.yarn_user,
        group=params.user_group,

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn_client.py b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn_client.py
index 0c6115f..d300279 100644
--- a/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn_client.py
+++ b/ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/yarn_client.py
@@ -22,7 +22,7 @@ Ambari Agent
 import sys
 from resource_management import *
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from yarn import yarn
 from ambari_commons import OSConst
 from ambari_commons.os_family_impl import OsFamilyImpl
@@ -56,9 +56,9 @@ class YarnClientDefault(YarnClient):
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
-      hdp_select.select("hadoop-client", params.version)
+      stack_select.select("hadoop-client", params.version)
 
 
 if __name__ == "__main__":

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/params_linux.py
index c6264ff..9ff9125 100644
--- a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/params_linux.py
@@ -22,7 +22,7 @@ import status_params
 import os
 
 from resource_management.libraries.functions import format
-from resource_management.libraries.functions.version import format_hdp_stack_version
+from resource_management.libraries.functions.version import format_stack_version
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions import get_kinit_path
 from resource_management.libraries.script.script import Script
@@ -32,7 +32,7 @@ config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
 
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
+stack_version_formatted = format_stack_version(stack_version_unformatted)
 
 stack_name = default("/hostLevelParams/stack_name", None)
 current_version = default("/hostLevelParams/current_version", None)
@@ -49,7 +49,7 @@ config_dir = "/etc/zookeeper/conf"
 zk_smoke_out = os.path.join(tmp_dir, "zkSmoke.out")
 
 # hadoop parameters for 2.2+
-if Script.is_hdp_stack_greater_or_equal("2.2"):
+if Script.is_stack_greater_or_equal("2.2"):
   zk_home = format("/usr/hdp/current/{component_directory}")
   zk_bin = format("/usr/hdp/current/{component_directory}/bin")
   zk_cli_shell = format("/usr/hdp/current/{component_directory}/bin/zkCli.sh")

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/status_params.py
index ae2a1fd..d18e4d7 100644
--- a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/status_params.py
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/status_params.py
@@ -48,5 +48,5 @@ else:
   zk_user =  config['configurations']['zookeeper-env']['zk_user']
 
   config_dir = "/etc/zookeeper/conf"
-  if Script.is_hdp_stack_greater_or_equal("2.2"):
+  if Script.is_stack_greater_or_equal("2.2"):
     config_dir = format("/usr/hdp/current/{component_directory}/conf")

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper.py b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper.py
index 2727641..5712ce4 100644
--- a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper.py
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper.py
@@ -23,8 +23,8 @@ import sys
 
 from resource_management import *
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
 from ambari_commons import OSConst
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 
@@ -36,9 +36,9 @@ def zookeeper(type = None, upgrade_type=None):
     # This path may be missing after Ambari upgrade. We need to create it. We need to do this before any configs will
     # be applied.
     if upgrade_type is None and not os.path.exists("/usr/hdp/current/zookeeper-server") and params.current_version\
-      and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+      and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "zookeeper", params.current_version)
-      hdp_select.select("zookeeper-server", params.version)
+      stack_select.select("zookeeper-server", params.version)
 
   Directory(params.config_dir,
             owner=params.zk_user,

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_client.py b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_client.py
index 7a11fee..25ace24 100644
--- a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_client.py
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_client.py
@@ -22,8 +22,8 @@ Ambari Agent
 import sys
 from resource_management import *
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
 from resource_management.libraries.functions.format import format
 from ambari_commons import OSConst
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
@@ -65,9 +65,9 @@ class ZookeeperClientLinux(ZookeeperClient):
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "zookeeper", params.version)
-      hdp_select.select("zookeeper-client", params.version)
+      stack_select.select("zookeeper-client", params.version)
 
 @OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
 class ZookeeperClientWindows(ZookeeperClient):

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_server.py b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_server.py
index 842deb0..d5b6898 100644
--- a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_server.py
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_server.py
@@ -24,8 +24,8 @@ import sys
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.functions import get_unique_id_and_date
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
 from resource_management.libraries.functions.security_commons import build_expectations, \
   cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
   FILE_TYPE_JAAS_CONF
@@ -74,9 +74,9 @@ class ZookeeperServerLinux(ZookeeperServer):
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "zookeeper", params.version)
-      hdp_select.select("zookeeper-server", params.version)
+      stack_select.select("zookeeper-server", params.version)
 
   def post_upgrade_restart(self, env, upgrade_type=None):
     if upgrade_type == "nonrolling":

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_service.py b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_service.py
index afa332a..6fe0772 100644
--- a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_service.py
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_service.py
@@ -24,8 +24,8 @@ from resource_management import *
 from ambari_commons import OSConst
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
 
 @OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
 def zookeeper_service(action='start', upgrade_type=None):
@@ -33,9 +33,9 @@ def zookeeper_service(action='start', upgrade_type=None):
 
   # This path may be missing after Ambari upgrade. We need to create it.
   if upgrade_type is None and not os.path.exists("/usr/hdp/current/zookeeper-server") and params.current_version \
-    and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+    and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
     conf_select.select(params.stack_name, "zookeeper", params.current_version)
-    hdp_select.select("zookeeper-server", params.version)
+    stack_select.select("zookeeper-server", params.version)
 
   cmd = format("env ZOOCFGDIR={config_dir} ZOOCFG=zoo.cfg {zk_bin}/zkServer.sh")
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py b/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py
index 6f8ef9f..08bdcc3 100644
--- a/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py
+++ b/ambari-server/src/main/resources/custom_actions/scripts/install_packages.py
@@ -34,8 +34,8 @@ from ambari_commons.os_check import OSCheck, OSConst
 from ambari_commons.str_utils import cbool, cint
 from resource_management.libraries.functions.packages_analyzer import allInstalledPackages
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions.hdp_select import get_hdp_versions
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions.stack_select import get_stack_versions
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
 from resource_management.libraries.functions.repo_version_history \
   import read_actual_version_from_history_file, write_actual_version_to_history_file, REPO_VERSION_HISTORY_FILE
 from resource_management.core.resources.system import Execute
@@ -83,10 +83,10 @@ class InstallPackages(Script):
       stack_id = config['commandParams']['stack_id']
 
     # current stack information
-    self.current_hdp_stack_version = None
+    self.current_stack_version_formatted = None
     if 'stack_version' in config['hostLevelParams']:
       current_stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-      self.current_hdp_stack_version = format_hdp_stack_version(current_stack_version_unformatted)
+      self.current_stack_version_formatted = format_stack_version(current_stack_version_unformatted)
 
 
     stack_name = None
@@ -145,7 +145,7 @@ class InstallPackages(Script):
       raise Fail("Failed to distribute repositories/install packages")
 
     # Initial list of versions, used to compute the new version installed
-    self.old_versions = get_hdp_versions(self.stack_root_folder)
+    self.old_versions = get_stack_versions(self.stack_root_folder)
 
     try:
       is_package_install_successful = False
@@ -189,13 +189,13 @@ class InstallPackages(Script):
     if args[0] != "HDP":
       Logger.info("Unrecognized stack name {0}, cannot create config links".format(args[0]))
 
-    if compare_versions(format_hdp_stack_version(args[1]), "2.3.0.0") < 0:
+    if compare_versions(format_stack_version(args[1]), "2.3.0.0") < 0:
       Logger.info("Configuration symlinks are not needed for {0}, only HDP-2.3+".format(stack_version))
       return
 
     for package_name, directories in conf_select.PACKAGE_DIRS.iteritems():
       # if already on HDP 2.3, then we should skip making conf.backup folders
-      if self.current_hdp_stack_version and compare_versions(self.current_hdp_stack_version, '2.3') >= 0:
+      if self.current_stack_version_formatted and compare_versions(self.current_stack_version_formatted, '2.3') >= 0:
         Logger.info("The current cluster stack of {0} does not require backing up configurations; "
                     "only conf-select versioned config directories will be created.".format(stack_version))
         # only link configs for all known packages
@@ -226,7 +226,7 @@ class InstallPackages(Script):
     Logger.info("Attempting to determine actual version with build number.")
     Logger.info("Old versions: {0}".format(self.old_versions))
 
-    new_versions = get_hdp_versions(self.stack_root_folder)
+    new_versions = get_stack_versions(self.stack_root_folder)
     Logger.info("New versions: {0}".format(new_versions))
 
     deltas = set(new_versions) - set(self.old_versions)
@@ -268,7 +268,7 @@ class InstallPackages(Script):
     Logger.info("Installation of packages failed. Checking if installation was partially complete")
     Logger.info("Old versions: {0}".format(self.old_versions))
 
-    new_versions = get_hdp_versions(self.stack_root_folder)
+    new_versions = get_stack_versions(self.stack_root_folder)
     Logger.info("New versions: {0}".format(new_versions))
 
     deltas = set(new_versions) - set(self.old_versions)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/custom_actions/scripts/ru_set_all.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/custom_actions/scripts/ru_set_all.py b/ambari-server/src/main/resources/custom_actions/scripts/ru_set_all.py
index a573420..b8bf176 100644
--- a/ambari-server/src/main/resources/custom_actions/scripts/ru_set_all.py
+++ b/ambari-server/src/main/resources/custom_actions/scripts/ru_set_all.py
@@ -28,13 +28,13 @@ from resource_management.libraries.functions import conf_select
 from resource_management.libraries.functions.constants import Direction
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions.version import compare_versions
-from resource_management.libraries.functions.version import format_hdp_stack_version
+from resource_management.libraries.functions.version import format_stack_version
 from resource_management.core import shell
 from resource_management.core.exceptions import Fail
 from resource_management.core.logger import Logger
 from resource_management.core.resources.system import Execute, Link, Directory
 
-HDP_SELECT = '/usr/bin/hdp-select'
+STACK_SELECT = '/usr/bin/hdp-select'
 
 class UpgradeSetAll(Script):
   """
@@ -56,14 +56,14 @@ class UpgradeSetAll(Script):
       cmd = ('/usr/bin/yum', 'clean', 'all')
       code, out = shell.call(cmd, sudo=True)
 
-    min_ver = format_hdp_stack_version("2.2")
-    real_ver = format_hdp_stack_version(version)
+    min_ver = format_stack_version("2.2")
+    real_ver = format_stack_version(version)
     if stack_name == "HDP":
       if compare_versions(real_ver, min_ver) >= 0:
-        cmd = ('ambari-python-wrap', HDP_SELECT, 'set', 'all', version)
+        cmd = ('ambari-python-wrap', STACK_SELECT, 'set', 'all', version)
         code, out = shell.call(cmd, sudo=True)
 
-      if compare_versions(real_ver, format_hdp_stack_version("2.3")) >= 0:
+      if compare_versions(real_ver, format_stack_version("2.3")) >= 0:
         # backup the old and symlink /etc/[component]/conf to /usr/hdp/current/[component]
         for k, v in conf_select.PACKAGE_DIRS.iteritems():
           for dir_def in v:
@@ -97,9 +97,9 @@ class UpgradeSetAll(Script):
     Logger.info("Unlinking all configs when downgrading from HDP 2.3 to 2.2")
 
     # normalize the versions
-    stack_23 = format_hdp_stack_version("2.3")
-    downgrade_to_version = format_hdp_stack_version(downgrade_to_version)
-    downgrade_from_version = format_hdp_stack_version(downgrade_from_version)
+    stack_23 = format_stack_version("2.3")
+    downgrade_to_version = format_stack_version(downgrade_to_version)
+    downgrade_from_version = format_stack_version(downgrade_from_version)
 
     # downgrade-to-version must be 2.2 (less than 2.3)
     if compare_versions(downgrade_to_version, stack_23) >= 0:

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/scripts/Ambaripreupload.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/scripts/Ambaripreupload.py b/ambari-server/src/main/resources/scripts/Ambaripreupload.py
index 5ce75dd..941539c 100644
--- a/ambari-server/src/main/resources/scripts/Ambaripreupload.py
+++ b/ambari-server/src/main/resources/scripts/Ambaripreupload.py
@@ -48,40 +48,40 @@ SQL_DRIVER_PATH = "/var/lib/ambari-server/resources/sqljdbc41.jar"
  
 """
 This file provides helper methods needed for the versioning of RPMs. Specifically, it does dynamic variable
-interpretation to replace strings like {{ hdp_stack_version }}  where the value of the
+interpretation to replace strings like {{ stack_version_formatted }}  where the value of the
 variables cannot be determined ahead of time, but rather, depends on what files are found.
  
-It assumes that {{ hdp_stack_version }} is constructed as ${major.minor.patch.rev}-${build_number}
+It assumes that {{ stack_version_formatted }} is constructed as ${major.minor.patch.rev}-${build_number}
 E.g., 998.2.2.1.0-998
 Please note that "-${build_number}" is optional.
 """
 
 with Environment() as env:
-  def get_hdp_version():
+  def get_stack_version():
     if not options.hdp_version:
       # Ubuntu returns: "stdin: is not a tty", as subprocess output.
       tmpfile = tempfile.NamedTemporaryFile()
       out = None
       with open(tmpfile.name, 'r+') as file:
-        get_hdp_version_cmd = '/usr/bin/hdp-select status %s > %s' % ('hadoop-mapreduce-historyserver', tmpfile.name)
-        code, stdoutdata = shell.call(get_hdp_version_cmd)
+        get_stack_version_cmd = '/usr/bin/hdp-select status %s > %s' % ('hadoop-mapreduce-historyserver', tmpfile.name)
+        code, stdoutdata = shell.call(get_stack_version_cmd)
         out = file.read()
       pass
       if code != 0 or out is None:
         Logger.warning("Could not verify HDP version by calling '%s'. Return Code: %s, Output: %s." %
-                       (get_hdp_version_cmd, str(code), str(out)))
+                       (get_stack_version_cmd, str(code), str(out)))
         return 1
      
       matches = re.findall(r"([\d\.]+\-\d+)", out)
-      hdp_version = matches[0] if matches and len(matches) > 0 else None
+      stack_version = matches[0] if matches and len(matches) > 0 else None
      
-      if not hdp_version:
+      if not stack_version:
         Logger.error("Could not parse HDP version from output of hdp-select: %s" % str(out))
         return 1
     else:
-      hdp_version = options.hdp_version
+      stack_version = options.hdp_version
       
-    return hdp_version
+    return stack_version
   
   parser = OptionParser()
   parser.add_option("-v", "--hdp-version", dest="hdp_version", default="",
@@ -97,7 +97,7 @@ with Environment() as env:
   if len(args) > 0:
     hdfs_path_prefix = args[0]
   
-  hdp_version = get_hdp_version()
+  stack_version = get_stack_version()
   
   def getPropertyValueFromConfigXMLFile(xmlfile, name, defaultValue=None):
     xmldoc = minidom.parse(xmlfile)
@@ -135,12 +135,12 @@ with Environment() as env:
     hdfs_path_prefix = hdfs_path_prefix
     hdfs_user = "hdfs"
     mapred_user ="mapred"
-    hadoop_bin_dir="/usr/hdp/" + hdp_version + "/hadoop/bin"
+    hadoop_bin_dir="/usr/hdp/" + stack_version + "/hadoop/bin"
     hadoop_conf_dir = "/etc/hadoop/conf"
     user_group = "hadoop"
     security_enabled = False
     oozie_user = "oozie"
-    execute_path = "/usr/hdp/" + hdp_version + "/hadoop/bin"
+    execute_path = "/usr/hdp/" + stack_version + "/hadoop/bin"
     ambari_libs_dir = "/var/lib/ambari-agent/lib"
     hdfs_site = ConfigDictionary({'dfs.webhdfs.enabled':False, 
     })
@@ -157,7 +157,7 @@ with Environment() as env:
   export CATALINA_TMPDIR=${{CATALINA_TMPDIR:-/var/tmp/oozie}}
   export CATALINA_PID=${{CATALINA_PID:-/var/run/oozie/oozie.pid}}
   export OOZIE_CATALINA_HOME=/usr/lib/bigtop-tomcat
-  '''.format(hdp_version)
+  '''.format(stack_version)
     
     HdfsResource = functools.partial(
       HdfsResource,
@@ -196,17 +196,17 @@ with Environment() as env:
       )
    
    
-  def copy_tarballs_to_hdfs(source, dest, hdp_select_component_name, component_user, file_owner, group_owner):
+  def copy_tarballs_to_hdfs(source, dest, stack_select_component_name, component_user, file_owner, group_owner):
     """
     :param tarball_prefix: Prefix of the tarball must be one of tez, hive, mr, pig
-    :param hdp_select_component_name: Component name to get the status to determine the version
+    :param stack_select_component_name: Component name to get the status to determine the version
     :param component_user: User that will execute the Hadoop commands
     :param file_owner: Owner of the files copied to HDFS (typically hdfs account)
     :param group_owner: Group owner of the files copied to HDFS (typically hadoop group)
     :return: Returns 0 on success, 1 if no files were copied, and in some cases may raise an exception.
    
     In order to call this function, params.py must have all of the following,
-    hdp_stack_version, kinit_path_local, security_enabled, hdfs_user, hdfs_principal_name, hdfs_user_keytab,
+    stack_version_formatted, kinit_path_local, security_enabled, hdfs_user, hdfs_principal_name, hdfs_user_keytab,
     hadoop_bin_dir, hadoop_conf_dir, and HdfsDirectory as a partial function.
     """
    
@@ -220,7 +220,7 @@ with Environment() as env:
    
     file_name = os.path.basename(component_tar_source_file)
     destination_file = os.path.join(component_tar_destination_folder, file_name)
-    destination_file = destination_file.replace("{{ hdp_stack_version }}", hdp_version)
+    destination_file = destination_file.replace("{{ stack_version_formatted }}", stack_version)
    
   
     kinit_if_needed = ""
@@ -278,10 +278,10 @@ with Environment() as env:
   env.set_params(params)
   hadoop_conf_dir = params.hadoop_conf_dir
    
-  oozie_libext_dir = format("/usr/hdp/{hdp_version}/oozie/libext")
+  oozie_libext_dir = format("/usr/hdp/{stack_version}/oozie/libext")
   sql_driver_filename = os.path.basename(SQL_DRIVER_PATH)
-  oozie_home=format("/usr/hdp/{hdp_version}/oozie")
-  oozie_setup_sh=format("/usr/hdp/{hdp_version}/oozie/bin/oozie-setup.sh")
+  oozie_home=format("/usr/hdp/{stack_version}/oozie")
+  oozie_setup_sh=format("/usr/hdp/{stack_version}/oozie/bin/oozie-setup.sh")
   oozie_setup_sh_current="/usr/hdp/current/oozie-server/bin/oozie-setup.sh"
   oozie_tmp_dir = "/var/tmp/oozie"
   configure_cmds = []
@@ -353,7 +353,7 @@ with Environment() as env:
   ###############################################
   # PREPARE-WAR END [BEGIN]
   ###############################################
-  oozie_shared_lib = format("/usr/hdp/{hdp_version}/oozie/share")
+  oozie_shared_lib = format("/usr/hdp/{stack_version}/oozie/share")
   oozie_user = 'oozie'
   oozie_hdfs_user_dir = format("{hdfs_path_prefix}/user/{oozie_user}")
   kinit_if_needed = ''
@@ -376,12 +376,12 @@ with Environment() as env:
     )
 
   print "Copying tarballs..."
-  copy_tarballs_to_hdfs(format("/usr/hdp/{hdp_version}/hadoop/mapreduce.tar.gz"), hdfs_path_prefix+"/hdp/apps/{{ hdp_stack_version }}/mapreduce/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
-  copy_tarballs_to_hdfs(format("/usr/hdp/{hdp_version}/tez/lib/tez.tar.gz"), hdfs_path_prefix+"/hdp/apps/{{ hdp_stack_version }}/tez/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
-  copy_tarballs_to_hdfs(format("/usr/hdp/{hdp_version}/hive/hive.tar.gz"), hdfs_path_prefix+"/hdp/apps/{{ hdp_stack_version }}/hive/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
-  copy_tarballs_to_hdfs(format("/usr/hdp/{hdp_version}/pig/pig.tar.gz"), hdfs_path_prefix+"/hdp/apps/{{ hdp_stack_version }}/pig/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
-  copy_tarballs_to_hdfs(format("/usr/hdp/{hdp_version}/hadoop-mapreduce/hadoop-streaming.jar"), hdfs_path_prefix+"/hdp/apps/{{ hdp_stack_version }}/mapreduce/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
-  copy_tarballs_to_hdfs(format("/usr/hdp/{hdp_version}/sqoop/sqoop.tar.gz"), hdfs_path_prefix+"/hdp/apps/{{ hdp_stack_version }}/sqoop/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
+  copy_tarballs_to_hdfs(format("/usr/hdp/{stack_version}/hadoop/mapreduce.tar.gz"), hdfs_path_prefix+"/hdp/apps/{{ stack_version_formatted }}/mapreduce/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
+  copy_tarballs_to_hdfs(format("/usr/hdp/{stack_version}/tez/lib/tez.tar.gz"), hdfs_path_prefix+"/hdp/apps/{{ stack_version_formatted }}/tez/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
+  copy_tarballs_to_hdfs(format("/usr/hdp/{stack_version}/hive/hive.tar.gz"), hdfs_path_prefix+"/hdp/apps/{{ stack_version_formatted }}/hive/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
+  copy_tarballs_to_hdfs(format("/usr/hdp/{stack_version}/pig/pig.tar.gz"), hdfs_path_prefix+"/hdp/apps/{{ stack_version_formatted }}/pig/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
+  copy_tarballs_to_hdfs(format("/usr/hdp/{stack_version}/hadoop-mapreduce/hadoop-streaming.jar"), hdfs_path_prefix+"/hdp/apps/{{ stack_version_formatted }}/mapreduce/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
+  copy_tarballs_to_hdfs(format("/usr/hdp/{stack_version}/sqoop/sqoop.tar.gz"), hdfs_path_prefix+"/hdp/apps/{{ stack_version_formatted }}/sqoop/", 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
   print "Creating hdfs directories..."
   createHdfsResources()
   putSQLDriverToOozieShared()

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/params.py
index c22349f..1159491 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/params.py
@@ -30,7 +30,7 @@ tmp_dir = Script.get_tmp_dir()
 rpm_version = default("/configurations/cluster-env/rpm_version", None)
 
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-hdp_stack_version = config['hostLevelParams']['stack_version']
+stack_version_formatted = config['hostLevelParams']['stack_version']
 
 #hadoop params
 if rpm_version:
@@ -62,7 +62,7 @@ else:
   hive_tar_file = '/usr/share/HDP-webhcat/hive.tar.gz'
   sqoop_tar_file = '/usr/share/HDP-webhcat/sqoop*.tar.gz'
 
-  if str(hdp_stack_version).startswith('2.0'):
+  if str(stack_version_formatted).startswith('2.0'):
     hcat_lib = '/usr/lib/hcatalog/share/hcatalog'
     webhcat_bin_dir = '/usr/lib/hcatalog/sbin'
   # for newer versions
@@ -262,7 +262,7 @@ webhcat_hdfs_user_mode = 0755
 #for create_hdfs_directory
 security_param = "true" if security_enabled else "false"
 
-if str(hdp_stack_version).startswith('2.0') or str(hdp_stack_version).startswith('2.1'):
+if str(stack_version_formatted).startswith('2.0') or str(stack_version_formatted).startswith('2.1'):
   app_dir_files = {tez_local_api_jars:None}
 else:
   app_dir_files = {

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
index 68fe9f9..ab7dea5 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
@@ -21,9 +21,9 @@ from ambari_commons.constants import AMBARI_SUDO_BINARY
 from resource_management.libraries.script import Script
 from resource_management.libraries.functions import default
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import format_jvm_option
-from resource_management.libraries.functions.version import format_hdp_stack_version
+from resource_management.libraries.functions.version import format_stack_version
 
 config = Script.get_config()
 
@@ -32,18 +32,18 @@ dfs_type = default("/commandParams/dfs_type", "")
 sudo = AMBARI_SUDO_BINARY
 
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
+stack_version_formatted = format_stack_version(stack_version_unformatted)
 
 # current host stack version
 current_version = default("/hostLevelParams/current_version", None)
 
 # default hadoop params
 mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
-hadoop_libexec_dir = hdp_select.get_hadoop_dir("libexec")
+hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
 hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
 
 # HDP 2.2+ params
-if Script.is_hdp_stack_greater_or_equal("2.2"):
+if Script.is_stack_greater_or_equal("2.2"):
   mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
 
   # not supported in HDP 2.2+

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py
index b957bd0..8cf75cb 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py
@@ -21,7 +21,7 @@ import os
 import ambari_simplejson as json
 from resource_management.core.logger import Logger
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions.format import format
 from resource_management.libraries.functions.version import compare_versions
 from resource_management.libraries.resources.xml_config import XmlConfig
@@ -37,11 +37,11 @@ def setup_hdp_symlinks():
   :return:
   """
   import params
-  if params.hdp_stack_version != "" and compare_versions(params.hdp_stack_version, '2.2') >= 0:
+  if params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.2') >= 0:
     # try using the exact version first, falling back in just the stack if it's not defined
     # which would only be during an intial cluster installation
     version = params.current_version if params.current_version is not None else params.stack_version_unformatted
-    hdp_select.select_all(version)
+    stack_select.select_all(version)
 
 
 def setup_config():
@@ -87,7 +87,7 @@ def link_configs(struct_out_file):
   Links configs, only on a fresh install of HDP-2.3 and higher
   """
 
-  if not Script.is_hdp_stack_greater_or_equal("2.3"):
+  if not Script.is_stack_greater_or_equal("2.3"):
     Logger.info("Can only link configs for HDP-2.3 and higher.")
     return
 


[13/51] [abbrv] ambari git commit: AMBARI-15308. UI: ability to perform bulk add host components (Joe Wang via rzang)

Posted by jl...@apache.org.
AMBARI-15308. UI: ability to perform bulk add host components <fix> (Joe Wang via rzang)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/86d37806
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/86d37806
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/86d37806

Branch: refs/heads/AMBARI-13364
Commit: 86d37806535715ae38a4062ab83b3e6e57877f38
Parents: 1986078
Author: Richard Zang <rz...@apache.org>
Authored: Tue Mar 8 10:55:09 2016 -0800
Committer: Richard Zang <rz...@apache.org>
Committed: Tue Mar 8 10:55:09 2016 -0800

----------------------------------------------------------------------
 ambari-web/app/views/main/host/hosts_table_menu_view.js | 11 -----------
 1 file changed, 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/86d37806/ambari-web/app/views/main/host/hosts_table_menu_view.js
----------------------------------------------------------------------
diff --git a/ambari-web/app/views/main/host/hosts_table_menu_view.js b/ambari-web/app/views/main/host/hosts_table_menu_view.js
index 0d119f7..5670814 100644
--- a/ambari-web/app/views/main/host/hosts_table_menu_view.js
+++ b/ambari-web/app/views/main/host/hosts_table_menu_view.js
@@ -145,17 +145,6 @@ App.HostTableMenuView = Em.View.extend({
               serviceName: content.serviceName,
               componentNameFormatted: content.componentNameFormatted
             })
-          }),
-          O.create({
-            label: Em.I18n.t('common.delete'),
-            delete: true,
-            operationData: O.create({
-              action: 'DELETE',
-              message: Em.I18n.t('common.delete'),
-              componentName: content.componentName,
-              serviceName: content.serviceName,
-              componentNameFormatted: content.componentNameFormatted
-            })
           })
         ])
       }


[45/51] [abbrv] ambari git commit: AMBARI-14435: Parameterize distro-specific stack information for ZOOKEEPER (Juanjo Marron via dili)

Posted by jl...@apache.org.
AMBARI-14435: Parameterize distro-specific stack information for ZOOKEEPER (Juanjo Marron via dili)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/e5df4144
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/e5df4144
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/e5df4144

Branch: refs/heads/AMBARI-13364
Commit: e5df414480f43c1cb0217a471af23026c839b636
Parents: e5d261f
Author: Di Li <di...@apache.org>
Authored: Mon Feb 29 10:19:21 2016 -0500
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Wed Mar 9 15:05:42 2016 -0800

----------------------------------------------------------------------
 .../3.4.5.2.0/package/scripts/params_linux.py        | 15 ++++++++-------
 .../3.4.5.2.0/package/scripts/status_params.py       | 10 +++++++---
 .../ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper.py |  4 ++--
 .../3.4.5.2.0/package/scripts/zookeeper_client.py    |  5 +++--
 .../3.4.5.2.0/package/scripts/zookeeper_server.py    |  5 +++--
 .../3.4.5.2.0/package/scripts/zookeeper_service.py   |  6 +++---
 .../stacks/HDP/2.0.6/configuration/cluster-env.xml   | 10 ++++++++++
 7 files changed, 36 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/e5df4144/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/params_linux.py
index 9ff9125..de31c8b 100644
--- a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/params_linux.py
@@ -33,8 +33,10 @@ tmp_dir = Script.get_tmp_dir()
 
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
 stack_version_formatted = format_stack_version(stack_version_unformatted)
+stack_version_ru_support = config['configurations']['cluster-env']['stack_version_ru_support']
 
 stack_name = default("/hostLevelParams/stack_name", None)
+stack_dir = config['configurations']['cluster-env']['stack_dir']
 current_version = default("/hostLevelParams/current_version", None)
 component_directory = status_params.component_directory
 
@@ -48,14 +50,13 @@ zk_cli_shell = "/usr/lib/zookeeper/bin/zkCli.sh"
 config_dir = "/etc/zookeeper/conf"
 zk_smoke_out = os.path.join(tmp_dir, "zkSmoke.out")
 
-# hadoop parameters for 2.2+
-if Script.is_stack_greater_or_equal("2.2"):
-  zk_home = format("/usr/hdp/current/{component_directory}")
-  zk_bin = format("/usr/hdp/current/{component_directory}/bin")
-  zk_cli_shell = format("/usr/hdp/current/{component_directory}/bin/zkCli.sh")
+# hadoop parameters for stack_version_ru_support+
+if Script.is_stack_greater_or_equal(stack_version_ru_support):
+  zk_home = format("{stack_dir}/current/{component_directory}")
+  zk_bin = format("{stack_dir}/current/{component_directory}/bin")
+  zk_cli_shell = format("{stack_dir}/current/{component_directory}/bin/zkCli.sh")
   config_dir = status_params.config_dir
 
-
 zk_user = config['configurations']['zookeeper-env']['zk_user']
 hostname = config['hostname']
 user_group = config['configurations']['cluster-env']['user_group']
@@ -98,4 +99,4 @@ kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executab
 if ('zookeeper-log4j' in config['configurations']) and ('content' in config['configurations']['zookeeper-log4j']):
   log4j_props = config['configurations']['zookeeper-log4j']['content']
 else:
-  log4j_props = None
\ No newline at end of file
+  log4j_props = None

http://git-wip-us.apache.org/repos/asf/ambari/blob/e5df4144/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/status_params.py
index d18e4d7..e349d48 100644
--- a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/status_params.py
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/status_params.py
@@ -24,7 +24,7 @@ from resource_management.libraries.functions import get_kinit_path
 from resource_management.libraries.script.script import Script
 
 # a map of the Ambari role to the component name
-# for use with /usr/hdp/current/<component>
+# for use with <stack_dir>/current/<component>
 SERVER_ROLE_DIRECTORY_MAP = {
   'ZOOKEEPER_SERVER' : 'zookeeper-server',
   'ZOOKEEPER_CLIENT' : 'zookeeper-client'
@@ -47,6 +47,10 @@ else:
   tmp_dir = Script.get_tmp_dir()
   zk_user =  config['configurations']['zookeeper-env']['zk_user']
 
+  # Stack related params
+  stack_version_ru_support = config['configurations']['cluster-env']['stack_version_ru_support']
+  stack_dir = config['configurations']['cluster-env']['stack_dir']
+
   config_dir = "/etc/zookeeper/conf"
-  if Script.is_stack_greater_or_equal("2.2"):
-    config_dir = format("/usr/hdp/current/{component_directory}/conf")
+  if Script.is_stack_greater_or_equal(stack_version_ru_support):
+    config_dir = format("{stack_dir}/current/{component_directory}/conf")

http://git-wip-us.apache.org/repos/asf/ambari/blob/e5df4144/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper.py b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper.py
index 5712ce4..cebc3b8 100644
--- a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper.py
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper.py
@@ -35,8 +35,8 @@ def zookeeper(type = None, upgrade_type=None):
   if type == 'server':
     # This path may be missing after Ambari upgrade. We need to create it. We need to do this before any configs will
     # be applied.
-    if upgrade_type is None and not os.path.exists("/usr/hdp/current/zookeeper-server") and params.current_version\
-      and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
+    if upgrade_type is None and not os.path.exists(os.path.join(params.stack_dir, "/current/zookeeper-server")) and params.current_version\
+      and compare_versions(format_stack_version(params.version), params.stack_version_ru_support) >= 0:
       conf_select.select(params.stack_name, "zookeeper", params.current_version)
       stack_select.select("zookeeper-server", params.version)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e5df4144/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_client.py b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_client.py
index 25ace24..854d583 100644
--- a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_client.py
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_client.py
@@ -54,7 +54,8 @@ class ZookeeperClient(Script):
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class ZookeeperClientLinux(ZookeeperClient):
   def get_stack_to_component(self):
-    return {"HDP": "zookeeper-client"}
+    import params
+    return {params.stack_name: "zookeeper-client"}
 
   def install(self, env):
     self.install_packages(env)
@@ -65,7 +66,7 @@ class ZookeeperClientLinux(ZookeeperClient):
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), params.stack_version_ru_support) >= 0:
       conf_select.select(params.stack_name, "zookeeper", params.version)
       stack_select.select("zookeeper-client", params.version)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e5df4144/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_server.py b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_server.py
index d5b6898..163ebb7 100644
--- a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_server.py
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_server.py
@@ -63,7 +63,8 @@ class ZookeeperServer(Script):
 class ZookeeperServerLinux(ZookeeperServer):
 
   def get_stack_to_component(self):
-    return {"HDP": "zookeeper-server"}
+    import params
+    return {params.stack_name: "zookeeper-server"}
 
   def install(self, env):
     self.install_packages(env)
@@ -74,7 +75,7 @@ class ZookeeperServerLinux(ZookeeperServer):
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), params.stack_version_ru_support) >= 0:
       conf_select.select(params.stack_name, "zookeeper", params.version)
       stack_select.select("zookeeper-server", params.version)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/e5df4144/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_service.py b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_service.py
index 6fe0772..0afea17 100644
--- a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_service.py
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/zookeeper_service.py
@@ -32,8 +32,8 @@ def zookeeper_service(action='start', upgrade_type=None):
   import params
 
   # This path may be missing after Ambari upgrade. We need to create it.
-  if upgrade_type is None and not os.path.exists("/usr/hdp/current/zookeeper-server") and params.current_version \
-    and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
+  if upgrade_type is None and not os.path.exists(os.path.join(params.stack_dir, "/current/zookeeper-server")) and params.current_version \
+    and compare_versions(format_stack_version(params.version), params.stack_version_ru_support) >= 0:
     conf_select.select(params.stack_name, "zookeeper", params.current_version)
     stack_select.select("zookeeper-server", params.version)
 
@@ -68,4 +68,4 @@ def zookeeper_service(action='start', rolling_restart=False):
   if action == 'start':
     Service(params.zookeeper_win_service_name, action="start")
   elif action == 'stop':
-    Service(params.zookeeper_win_service_name, action="stop")
\ No newline at end of file
+    Service(params.zookeeper_win_service_name, action="stop")

http://git-wip-us.apache.org/repos/asf/ambari/blob/e5df4144/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
index 784a88f..8ac1b5b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml
@@ -21,6 +21,16 @@
 -->
 
 <configuration>
+     <property>
+        <name>stack_dir</name>
+        <value>/usr/hdp</value>
+        <description>Directory prefix for stacks installation</description>
+    </property>
+    <property>
+        <name>stack_version_ru_support</name>
+        <value>2.2.0.0</value>
+        <description>Stack version from which rolling upgrade is supported and installation layout changed</description>
+    </property>
     <property>
         <name>security_enabled</name>
         <value>false</value>


[29/51] [abbrv] ambari git commit: AMBARI-15343. Alias for metrics in ambari-metrics-grafana. (Prajwal Rao via yusaku)

Posted by jl...@apache.org.
AMBARI-15343. Alias for metrics in ambari-metrics-grafana. (Prajwal Rao via yusaku)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/5e69da9d
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/5e69da9d
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/5e69da9d

Branch: refs/heads/AMBARI-13364
Commit: 5e69da9d0c5ca21d6e046cf7c7ab5b9c21063fba
Parents: ec4b1d1
Author: Yusaku Sako <yu...@hortonworks.com>
Authored: Tue Mar 8 21:23:04 2016 -0800
Committer: Yusaku Sako <yu...@hortonworks.com>
Committed: Tue Mar 8 21:23:04 2016 -0800

----------------------------------------------------------------------
 .../ambari-metrics/datasource.js                      |  7 +++++--
 .../ambari-metrics/partials/query.editor.html         | 14 ++++++++++++++
 2 files changed, 19 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/5e69da9d/ambari-metrics/ambari-metrics-grafana/ambari-metrics/datasource.js
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-grafana/ambari-metrics/datasource.js b/ambari-metrics/ambari-metrics-grafana/ambari-metrics/datasource.js
index 3e665e0..fa83b47 100644
--- a/ambari-metrics/ambari-metrics-grafana/ambari-metrics/datasource.js
+++ b/ambari-metrics/ambari-metrics-grafana/ambari-metrics/datasource.js
@@ -94,6 +94,9 @@ define([
           };
           var self = this;
           var getMetricsData = function (target) {
+            var alias = target.alias ? target.alias : target.metric;
+            if(!_.isEmpty(templateSrv.variables) && templateSrv.variables[0].query === "yarnqueues") {
+              alias = alias + ' on ' + target.qmetric; }
             return function (res) {
               console.log('processing metric ' + target.metric);
               if (!res.metrics[0] || target.hide) {
@@ -106,12 +109,12 @@ define([
               var timeSeries = {};
               if (target.hosts === undefined || target.hosts.trim() === "") {
                 timeSeries = {
-                  target: res.metrics[0].metricname + hostLegend,
+                  target: alias + hostLegend,
                   datapoints: []
                 };
               } else {
                 timeSeries = {
-                  target: target.metric + ' on ' + target.hosts,
+                  target: alias + ' on ' + target.hosts,
                   datapoints: []
                 };
               }

http://git-wip-us.apache.org/repos/asf/ambari/blob/5e69da9d/ambari-metrics/ambari-metrics-grafana/ambari-metrics/partials/query.editor.html
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-grafana/ambari-metrics/partials/query.editor.html b/ambari-metrics/ambari-metrics-grafana/ambari-metrics/partials/query.editor.html
index f51866e..fed38ac 100644
--- a/ambari-metrics/ambari-metrics-grafana/ambari-metrics/partials/query.editor.html
+++ b/ambari-metrics/ambari-metrics-grafana/ambari-metrics/partials/query.editor.html
@@ -117,6 +117,20 @@
 <div class="tight-form">
     <ul class="tight-form-list" role="menu">
         <li class="tight-form-item tight-form-align" style="width: 86px">
+            Alias
+        </li>
+        <li>
+            <input type="text" class="tight-form-input input-large"
+                   ng-model="target.alias"
+                   spellcheck='false'
+                   placeholder="series alias"
+                   data-min-length=0 data-items=100
+                   ng-blur="targetBlur()"></input>
+            <a bs-tooltip="target.errors.metric" style="color: rgb(229, 189, 28)" ng-show="target.errors.metric">
+                <i class="fa fa-warning"></i>
+            </a>
+        </li>
+        <li class="tight-form-item tight-form-align" style="width: 86px">
             <editor-checkbox text="Rate" model="target.shouldComputeRate" change="targetBlur()"></editor-checkbox>
         </li>
 


[23/51] [abbrv] ambari git commit: AMBARI-15329: Code Cleanup: Remove hdp hardcodings in functions, variables etc. (jluniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-common/src/main/python/resource_management/libraries/functions/stack_select.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/stack_select.py b/ambari-common/src/main/python/resource_management/libraries/functions/stack_select.py
new file mode 100644
index 0000000..c94d956
--- /dev/null
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/stack_select.py
@@ -0,0 +1,307 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import sys
+import re
+from resource_management.core.logger import Logger
+from resource_management.core.exceptions import Fail
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.get_stack_version import get_stack_version
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.script.script import Script
+from resource_management.core.shell import call
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.version_select_util import get_versions_from_stack_root
+
+STACK_SELECT = '/usr/bin/hdp-select'
+STACK_SELECT_PREFIX = ('ambari-python-wrap', STACK_SELECT)
+
+# hdp-select set oozie-server 2.2.0.0-1234
+TEMPLATE = STACK_SELECT_PREFIX + ('set',)
+
+# a mapping of Ambari server role to hdp-select component name for all
+# non-clients
+SERVER_ROLE_DIRECTORY_MAP = {
+  'ACCUMULO_MASTER' : 'accumulo-master',
+  'ACCUMULO_MONITOR' : 'accumulo-monitor',
+  'ACCUMULO_GC' : 'accumulo-gc',
+  'ACCUMULO_TRACER' : 'accumulo-tracer',
+  'ACCUMULO_TSERVER' : 'accumulo-tablet',
+  'ATLAS_SERVER' : 'atlas-server',
+  'FLUME_HANDLER' : 'flume-server',
+  'FALCON_SERVER' : 'falcon-server',
+  'NAMENODE' : 'hadoop-hdfs-namenode',
+  'DATANODE' : 'hadoop-hdfs-datanode',
+  'SECONDARY_NAMENODE' : 'hadoop-hdfs-secondarynamenode',
+  'NFS_GATEWAY' : 'hadoop-hdfs-nfs3',
+  'JOURNALNODE' : 'hadoop-hdfs-journalnode',
+  'HBASE_MASTER' : 'hbase-master',
+  'HBASE_REGIONSERVER' : 'hbase-regionserver',
+  'HIVE_METASTORE' : 'hive-metastore',
+  'HIVE_SERVER' : 'hive-server2',
+  'WEBHCAT_SERVER' : 'hive-webhcat',
+  'KAFKA_BROKER' : 'kafka-broker',
+  'KNOX_GATEWAY' : 'knox-server',
+  'OOZIE_SERVER' : 'oozie-server',
+  'RANGER_ADMIN' : 'ranger-admin',
+  'RANGER_USERSYNC' : 'ranger-usersync',
+  'SPARK_JOBHISTORYSERVER' : 'spark-historyserver',
+  'SPARK_THRIFTSERVER' : 'spark-thriftserver',
+  'NIMBUS' : 'storm-nimbus',
+  'SUPERVISOR' : 'storm-supervisor',
+  'HISTORYSERVER' : 'hadoop-mapreduce-historyserver',
+  'APP_TIMELINE_SERVER' : 'hadoop-yarn-timelineserver',
+  'NODEMANAGER' : 'hadoop-yarn-nodemanager',
+  'RESOURCEMANAGER' : 'hadoop-yarn-resourcemanager',
+  'ZOOKEEPER_SERVER' : 'zookeeper-server',
+
+  # ZKFC is tied to NN since it doesn't have its own componnet in hdp-select and there is
+  # a requirement that the ZKFC is installed on each NN
+  'ZKFC' : 'hadoop-hdfs-namenode'
+}
+
+# mapping of service check to hdp-select component
+SERVICE_CHECK_DIRECTORY_MAP = {
+  "HDFS_SERVICE_CHECK" : "hadoop-client",
+  "TEZ_SERVICE_CHECK" : "hadoop-client",
+  "PIG_SERVICE_CHECK" : "hadoop-client",
+  "HIVE_SERVICE_CHECK" : "hadoop-client",
+  "OOZIE_SERVICE_CHECK" : "hadoop-client",
+  "MAHOUT_SERVICE_CHECK" : "mahout-client"
+}
+
+# /usr/hdp/current/hadoop-client/[bin|sbin|libexec|lib]
+# /usr/hdp/2.3.0.0-1234/hadoop/[bin|sbin|libexec|lib]
+HADOOP_DIR_TEMPLATE = "/usr/hdp/{0}/{1}/{2}"
+
+# /usr/hdp/current/hadoop-client
+# /usr/hdp/2.3.0.0-1234/hadoop
+HADOOP_HOME_DIR_TEMPLATE = "/usr/hdp/{0}/{1}"
+
+HADOOP_DIR_DEFAULTS = {
+  "home": "/usr/lib/hadoop",
+  "libexec": "/usr/lib/hadoop/libexec",
+  "sbin": "/usr/lib/hadoop/sbin",
+  "bin": "/usr/bin",
+  "lib": "/usr/lib/hadoop/lib"
+}
+
+def select_all(version_to_select):
+  """
+  Executes hdp-select on every component for the specified version. If the value passed in is a
+  stack version such as "2.3", then this will find the latest installed version which
+  could be "2.3.0.0-9999". If a version is specified instead, such as 2.3.0.0-1234, it will use
+  that exact version.
+  :param version_to_select: the version to hdp-select on, such as "2.3" or "2.3.0.0-1234"
+  """
+  # it's an error, but it shouldn't really stop anything from working
+  if version_to_select is None:
+    Logger.error("Unable to execute hdp-select after installing because there was no version specified")
+    return
+
+  Logger.info("Executing hdp-select set all on {0}".format(version_to_select))
+
+  command = format('{sudo} /usr/bin/hdp-select set all `ambari-python-wrap /usr/bin/hdp-select versions | grep ^{version_to_select} | tail -1`')
+  only_if_command = format('ls -d /usr/hdp/{version_to_select}*')
+  Execute(command, only_if = only_if_command)
+
+
+def select(component, version):
+  """
+  Executes hdp-select on the specific component and version. Some global
+  variables that are imported via params/status_params/params_linux will need
+  to be recalcuated after the hdp-select. However, python does not re-import
+  existing modules. The only way to ensure that the configuration variables are
+  recalculated is to call reload(...) on each module that has global parameters.
+  After invoking hdp-select, this function will also reload params, status_params,
+  and params_linux.
+  :param component: the hdp-select component, such as oozie-server. If "all", then all components
+  will be updated.
+  :param version: the version to set the component to, such as 2.2.0.0-1234
+  """
+  command = TEMPLATE + (component, version)
+  Execute(command, sudo=True)
+
+  # don't trust the ordering of modules:
+  # 1) status_params
+  # 2) params_linux
+  # 3) params
+  modules = sys.modules
+  param_modules = "status_params", "params_linux", "params"
+  for moduleName in param_modules:
+    if moduleName in modules:
+      module = modules.get(moduleName)
+      reload(module)
+      Logger.info("After {0}, reloaded module {1}".format(command, moduleName))
+
+
+def get_role_component_current_stack_version():
+  """
+  Gets the current HDP version of the component that this role command is for.
+  :return:  the current HDP version of the specified component or None
+  """
+  stack_select_component = None
+  role = default("/role", "")
+  role_command =  default("/roleCommand", "")
+
+  if role in SERVER_ROLE_DIRECTORY_MAP:
+    stack_select_component = SERVER_ROLE_DIRECTORY_MAP[role]
+  elif role_command == "SERVICE_CHECK" and role in SERVICE_CHECK_DIRECTORY_MAP:
+    stack_select_component = SERVICE_CHECK_DIRECTORY_MAP[role]
+
+  if stack_select_component is None:
+    return None
+
+  current_stack_version = get_stack_version(stack_select_component)
+
+  if current_stack_version is None:
+    Logger.warning("Unable to determine hdp-select version for {0}".format(
+      stack_select_component))
+  else:
+    Logger.info("{0} is currently at version {1}".format(
+      stack_select_component, current_stack_version))
+
+  return current_stack_version
+
+
+def get_hadoop_dir(target, force_latest_on_upgrade=False):
+  """
+  Return the hadoop shared directory in the following override order
+  1. Use default for 2.1 and lower
+  2. If 2.2 and higher, use /usr/hdp/current/hadoop-client/{target}
+  3. If 2.2 and higher AND for an upgrade, use /usr/hdp/<version>/hadoop/{target}.
+  However, if the upgrade has not yet invoked hdp-select, return the current
+  version of the component.
+  :target: the target directory
+  :force_latest_on_upgrade: if True, then this will return the "current" directory
+  without the HDP version built into the path, such as /usr/hdp/current/hadoop-client
+  """
+
+  if not target in HADOOP_DIR_DEFAULTS:
+    raise Fail("Target {0} not defined".format(target))
+
+  hadoop_dir = HADOOP_DIR_DEFAULTS[target]
+
+  if Script.is_stack_greater_or_equal("2.2"):
+    # home uses a different template
+    if target == "home":
+      hadoop_dir = HADOOP_HOME_DIR_TEMPLATE.format("current", "hadoop-client")
+    else:
+      hadoop_dir = HADOOP_DIR_TEMPLATE.format("current", "hadoop-client", target)
+
+    # if we are not forcing "current" for HDP 2.2, then attempt to determine
+    # if the exact version needs to be returned in the directory
+    if not force_latest_on_upgrade:
+      stack_info = _get_upgrade_stack()
+
+      if stack_info is not None:
+        stack_version = stack_info[1]
+
+        # determine if hdp-select has been run and if not, then use the current
+        # hdp version until this component is upgraded
+        current_stack_version = get_role_component_current_stack_version()
+        if current_stack_version is not None and stack_version != current_stack_version:
+          stack_version = current_stack_version
+
+        if target == "home":
+          # home uses a different template
+          hadoop_dir = HADOOP_HOME_DIR_TEMPLATE.format(stack_version, "hadoop")
+        else:
+          hadoop_dir = HADOOP_DIR_TEMPLATE.format(stack_version, "hadoop", target)
+
+  return hadoop_dir
+
+def get_hadoop_dir_for_stack_version(target, stack_version):
+  """
+  Return the hadoop shared directory for the provided stack version. This is necessary
+  when folder paths of downgrade-source stack-version are needed after hdp-select. 
+  :target: the target directory
+  :stack_version: stack version to get hadoop dir for
+  """
+
+  if not target in HADOOP_DIR_DEFAULTS:
+    raise Fail("Target {0} not defined".format(target))
+
+  hadoop_dir = HADOOP_DIR_DEFAULTS[target]
+
+  formatted_stack_version = format_stack_version(stack_version)
+  if Script.is_stack_greater_or_equal_to(formatted_stack_version, "2.2"):
+    # home uses a different template
+    if target == "home":
+      hadoop_dir = HADOOP_HOME_DIR_TEMPLATE.format(stack_version, "hadoop")
+    else:
+      hadoop_dir = HADOOP_DIR_TEMPLATE.format(stack_version, "hadoop", target)
+
+  return hadoop_dir
+
+
+def _get_upgrade_stack():
+  """
+  Gets the stack name and stack version if an upgrade is currently in progress.
+  :return:  the stack name and stack version as a tuple, or None if an
+  upgrade is not in progress.
+  """
+  from resource_management.libraries.functions.default import default
+  direction = default("/commandParams/upgrade_direction", None)
+  stack_name = default("/hostLevelParams/stack_name", None)
+  stack_version = default("/commandParams/version", None)
+
+  if direction and stack_name and stack_version:
+    return (stack_name, stack_version)
+
+  return None
+
+
+def get_stack_versions(stack_root):
+  """
+  Gets list of stack versions installed on the host.
+  Be default a call to hdp-select versions is made to get the list of installed stack versions.
+  As a fallback list of installed versions is collected from stack version directories in stack install root.
+  :param stack_root: Stack install root
+  :return: Returns list of installed stack versions.
+  """
+  code, out = call(STACK_SELECT_PREFIX + ('versions',))
+  versions = []
+  if 0 == code:
+    for line in out.splitlines():
+      versions.append(line.rstrip('\n'))
+  if not versions:
+    versions = get_versions_from_stack_root(stack_root)
+  return versions
+
+def get_stack_version_before_install(component_name):
+  """
+  Works in the similar way to 'hdp-select status component', 
+  but also works for not yet installed packages.
+  
+  Note: won't work if doing initial install.
+  """
+  component_dir = HADOOP_HOME_DIR_TEMPLATE.format("current", component_name)
+  if os.path.islink(component_dir):
+    stack_version = os.path.basename(os.path.dirname(os.readlink(component_dir)))
+    match = re.match('[0-9]+.[0-9]+.[0-9]+.[0-9]+-[0-9]+', stack_version)
+    if match is None:
+      Logger.info('Failed to get extracted version with hdp-select in method get_stack_version_before_install')
+      return None # lazy fail
+    return stack_version
+  else:
+    return None

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-common/src/main/python/resource_management/libraries/functions/version.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/version.py b/ambari-common/src/main/python/resource_management/libraries/functions/version.py
index 6269989..2500430 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/version.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/version.py
@@ -34,7 +34,7 @@ def _normalize(v, desired_segments=0):
   return [int(x) for x in v_list]
 
 
-def format_hdp_stack_version(input):
+def format_stack_version(input):
   """
   :param input: Input string, e.g. "2.2" or "GlusterFS", or "2.0.6.GlusterFS", or "2.2.0.1-885"
   :return: Returns a well-formatted HDP stack version of the form #.#.#.# as a string.
@@ -67,11 +67,11 @@ def compare_versions(version1, version2, format=False):
   Stack Version 2.0.6.0 vs 2.2.0.0
   :param version1: First parameter for version
   :param version2: Second parameter for version
-  :param format: optionally format the versions via format_hdp_stack_version before comparing them
+  :param format: optionally format the versions via format_stack_version before comparing them
   :return: Returns -1 if version1 is before version2, 0 if they are equal, and 1 if version1 is after version2
   """
-  v1 = version1 if not format else format_hdp_stack_version(version1)
-  v2 = version2 if not format else format_hdp_stack_version(version2)
+  v1 = version1 if not format else format_stack_version(version1)
+  v2 = version2 if not format else format_stack_version(version2)
 
   max_segments = max(len(v1.split(".")), len(v2.split(".")))
   return cmp(_normalize(v1, desired_segments=max_segments), _normalize(v2, desired_segments=max_segments))
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-common/src/main/python/resource_management/libraries/functions/version_select_util.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/version_select_util.py b/ambari-common/src/main/python/resource_management/libraries/functions/version_select_util.py
index f1a484b..95c5cba 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/version_select_util.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/version_select_util.py
@@ -47,23 +47,23 @@ def get_component_version(stack_name, component_name):
   if stack_name == "HDP":
     tmpfile = tempfile.NamedTemporaryFile()
 
-    get_hdp_comp_version_cmd = ""
+    get_stack_comp_version_cmd = ""
     try:
       # This is necessary because Ubuntu returns "stdin: is not a tty", see AMBARI-8088
       with open(tmpfile.name, 'r') as file:
-        get_hdp_comp_version_cmd = '/usr/bin/hdp-select status %s > %s' % (component_name, tmpfile.name)
-        code, stdoutdata = shell.call(get_hdp_comp_version_cmd)
+        get_stack_comp_version_cmd = '/usr/bin/hdp-select status %s > %s' % (component_name, tmpfile.name)
+        code, stdoutdata = shell.call(get_stack_comp_version_cmd)
         out = file.read()
 
       if code != 0 or out is None:
         raise Exception("Code is nonzero or output is empty")
 
-      Logger.debug("Command: %s\nOutput: %s" % (get_hdp_comp_version_cmd, str(out)))
+      Logger.debug("Command: %s\nOutput: %s" % (get_stack_comp_version_cmd, str(out)))
       matches = re.findall(r"([\d\.]+\-\d+)", out)
       version = matches[0] if matches and len(matches) > 0 else None
     except Exception, e:
       Logger.error("Could not determine HDP version for component %s by calling '%s'. Return Code: %s, Output: %s." %
-                   (component_name, get_hdp_comp_version_cmd, str(code), str(out)))
+                   (component_name, get_stack_comp_version_cmd, str(code), str(out)))
   elif stack_name == "HDPWIN":
     pass
   elif stack_name == "GlusterFS":

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-common/src/main/python/resource_management/libraries/script/script.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/script/script.py b/ambari-common/src/main/python/resource_management/libraries/script/script.py
index 5e76562..a8098a0 100644
--- a/ambari-common/src/main/python/resource_management/libraries/script/script.py
+++ b/ambari-common/src/main/python/resource_management/libraries/script/script.py
@@ -42,7 +42,7 @@ from resource_management.core.exceptions import Fail, ClientComponentHasNoStatus
 from resource_management.core.resources.packaging import Package
 from resource_management.libraries.functions.version_select_util import get_component_version
 from resource_management.libraries.functions.version import compare_versions
-from resource_management.libraries.functions.version import format_hdp_stack_version
+from resource_management.libraries.functions.version import format_stack_version
 from resource_management.libraries.functions.constants import Direction
 from resource_management.libraries.functions import packages_analyzer
 from resource_management.libraries.script.config_dictionary import ConfigDictionary, UnknownConfiguration
@@ -52,7 +52,7 @@ from contextlib import closing
 import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
 
 if OSCheck.is_windows_family():
-  from resource_management.libraries.functions.install_hdp_msi import install_windows_msi
+  from resource_management.libraries.functions.install_windows_msi import install_windows_msi
   from resource_management.libraries.functions.reload_windows_env import reload_windows_env
   from resource_management.libraries.functions.zip_archive import archive_dir
   from resource_management.libraries.resources import Msi
@@ -177,8 +177,8 @@ class Script(object):
     """
     from resource_management.libraries.functions.default import default
     stack_version_unformatted = str(default("/hostLevelParams/stack_version", ""))
-    hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
-    if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
+    stack_version_formatted = format_stack_version(stack_version_unformatted)
+    if stack_version_formatted != "" and compare_versions(stack_version_formatted, '2.2') >= 0:
       if command_name.lower() == "status":
         request_version = default("/commandParams/request_version", None)
         if request_version is not None:
@@ -259,13 +259,13 @@ class Script(object):
     
     before the call. However takes a bit of time, so better to avoid.
 
-    :return: hdp version including the build number. e.g.: 2.3.4.0-1234.
+    :return: stack version including the build number. e.g.: 2.3.4.0-1234.
     """
     # preferred way is to get the actual selected version of current component
     component_name = self.get_component_name()
     if not Script.stack_version_from_distro_select and component_name:
-      from resource_management.libraries.functions import hdp_select
-      Script.stack_version_from_distro_select = hdp_select.get_hdp_version_before_install(component_name)
+      from resource_management.libraries.functions import stack_select
+      Script.stack_version_from_distro_select = stack_select.get_stack_version_before_install(component_name)
       
     # if hdp-select has not yet been done (situations like first install), we can use hdp-select version itself.
     if not Script.stack_version_from_distro_select:
@@ -329,7 +329,7 @@ class Script(object):
     return default("/hostLevelParams/stack_name", None)
 
   @staticmethod
-  def get_hdp_stack_version():
+  def get_stack_version():
     """
     Gets the normalized version of the HDP stack in the form #.#.#.# if it is
     present on the configurations sent.
@@ -348,7 +348,7 @@ class Script(object):
     if stack_version_unformatted is None or stack_version_unformatted == '':
       return None
 
-    return format_hdp_stack_version(stack_version_unformatted)
+    return format_stack_version(stack_version_unformatted)
 
 
   @staticmethod
@@ -360,57 +360,57 @@ class Script(object):
 
 
   @staticmethod
-  def is_hdp_stack_greater(formatted_hdp_stack_version, compare_to_version):
+  def is_stack_greater(stack_version_formatted, compare_to_version):
     """
-    Gets whether the provided formatted_hdp_stack_version (normalized)
+    Gets whether the provided stack_version_formatted (normalized)
     is greater than the specified stack version
-    :param formatted_hdp_stack_version: the version of stack to compare
+    :param stack_version_formatted: the version of stack to compare
     :param compare_to_version: the version of stack to compare to
     :return: True if the command's stack is greater than the specified version
     """
-    if formatted_hdp_stack_version is None or formatted_hdp_stack_version == "":
+    if stack_version_formatted is None or stack_version_formatted == "":
       return False
 
-    return compare_versions(formatted_hdp_stack_version, compare_to_version) > 0
+    return compare_versions(stack_version_formatted, compare_to_version) > 0
 
   @staticmethod
-  def is_hdp_stack_greater_or_equal(compare_to_version):
+  def is_stack_greater_or_equal(compare_to_version):
     """
     Gets whether the hostLevelParams/stack_version, after being normalized,
     is greater than or equal to the specified stack version
     :param compare_to_version: the version to compare to
     :return: True if the command's stack is greater than or equal the specified version
     """
-    return Script.is_hdp_stack_greater_or_equal_to(Script.get_hdp_stack_version(), compare_to_version)
+    return Script.is_stack_greater_or_equal_to(Script.get_stack_version(), compare_to_version)
 
   @staticmethod
-  def is_hdp_stack_greater_or_equal_to(formatted_hdp_stack_version, compare_to_version):
+  def is_stack_greater_or_equal_to(stack_version_formatted, compare_to_version):
     """
-    Gets whether the provided formatted_hdp_stack_version (normalized)
+    Gets whether the provided stack_version_formatted (normalized)
     is greater than or equal to the specified stack version
-    :param formatted_hdp_stack_version: the version of stack to compare
+    :param stack_version_formatted: the version of stack to compare
     :param compare_to_version: the version of stack to compare to
     :return: True if the command's stack is greater than or equal to the specified version
     """
-    if formatted_hdp_stack_version is None or formatted_hdp_stack_version == "":
+    if stack_version_formatted is None or stack_version_formatted == "":
       return False
 
-    return compare_versions(formatted_hdp_stack_version, compare_to_version) >= 0
+    return compare_versions(stack_version_formatted, compare_to_version) >= 0
 
   @staticmethod
-  def is_hdp_stack_less_than(compare_to_version):
+  def is_stack_less_than(compare_to_version):
     """
     Gets whether the hostLevelParams/stack_version, after being normalized,
     is less than the specified stack version
     :param compare_to_version: the version to compare to
     :return: True if the command's stack is less than the specified version
     """
-    hdp_stack_version = Script.get_hdp_stack_version()
+    stack_version_formatted = Script.get_stack_version()
 
-    if hdp_stack_version is None:
+    if stack_version_formatted is None:
       return False
 
-    return compare_versions(hdp_stack_version, compare_to_version) < 0
+    return compare_versions(stack_version_formatted, compare_to_version) < 0
 
   def install(self, env):
     """

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_client.py b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_client.py
index febee1c..fd975c5 100644
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_client.py
+++ b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_client.py
@@ -21,7 +21,7 @@ limitations under the License.
 from resource_management.core.logger import Logger
 from resource_management.core.exceptions import ClientComponentHasNoStatus
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.script.script import Script
 
 from accumulo_configuration import setup_conf_dir
@@ -54,12 +54,12 @@ class AccumuloClient(Script):
 
     # this function should not execute if the version can't be determined or
     # is not at least HDP 2.2.0.0
-    if Script.is_hdp_stack_less_than("2.2"):
+    if Script.is_stack_less_than("2.2"):
       return
 
     Logger.info("Executing Accumulo Client Upgrade pre-restart")
     conf_select.select(params.stack_name, "accumulo", params.version)
-    hdp_select.select("accumulo-client", params.version)
+    stack_select.select("accumulo-client", params.version)
 
 if __name__ == "__main__":
   AccumuloClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_script.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_script.py b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_script.py
index eda333d..12ca388 100644
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_script.py
+++ b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_script.py
@@ -22,7 +22,7 @@ from resource_management.core.logger import Logger
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions import check_process_status
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions.security_commons import build_expectations
 from resource_management.libraries.functions.security_commons import cached_kinit_executor
 from resource_management.libraries.functions.security_commons import get_params_from_filesystem
@@ -37,7 +37,7 @@ class AccumuloScript(Script):
 
   # a mapping between the component named used by these scripts and the name
   # which is used by hdp-select
-  COMPONENT_TO_HDP_SELECT_MAPPING = {
+  COMPONENT_TO_STACK_SELECT_MAPPING = {
     "gc" : "accumulo-gc",
     "master" : "accumulo-master",
     "monitor" : "accumulo-monitor",
@@ -55,11 +55,11 @@ class AccumuloScript(Script):
     :return:  the name of the component on the HDP stack which is used by
               hdp-select
     """
-    if self.component not in self.COMPONENT_TO_HDP_SELECT_MAPPING:
+    if self.component not in self.COMPONENT_TO_STACK_SELECT_MAPPING:
       return None
 
-    hdp_component = self.COMPONENT_TO_HDP_SELECT_MAPPING[self.component]
-    return {"HDP": hdp_component}
+    stack_component = self.COMPONENT_TO_STACK_SELECT_MAPPING[self.component]
+    return {"HDP": stack_component}
 
 
   def install(self, env):
@@ -102,21 +102,21 @@ class AccumuloScript(Script):
 
     # this function should not execute if the version can't be determined or
     # is not at least HDP 2.2.0.0
-    if Script.is_hdp_stack_less_than("2.2"):
+    if Script.is_stack_less_than("2.2"):
       return
 
-    if self.component not in self.COMPONENT_TO_HDP_SELECT_MAPPING:
+    if self.component not in self.COMPONENT_TO_STACK_SELECT_MAPPING:
       Logger.info("Unable to execute an upgrade for unknown component {0}".format(self.component))
       raise Fail("Unable to execute an upgrade for unknown component {0}".format(self.component))
 
-    hdp_component = self.COMPONENT_TO_HDP_SELECT_MAPPING[self.component]
+    stack_component = self.COMPONENT_TO_STACK_SELECT_MAPPING[self.component]
 
-    Logger.info("Executing Accumulo Upgrade pre-restart for {0}".format(hdp_component))
+    Logger.info("Executing Accumulo Upgrade pre-restart for {0}".format(stack_component))
     conf_select.select(params.stack_name, "accumulo", params.version)
-    hdp_select.select(hdp_component, params.version)
+    stack_select.select(stack_component, params.version)
 
     # some accumulo components depend on the client, so update that too
-    hdp_select.select("accumulo-client", params.version)
+    stack_select.select("accumulo-client", params.version)
 
 
   def security_status(self, env):

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
index 993d4cf..a8aebbf 100644
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
@@ -18,10 +18,10 @@ limitations under the License.
 
 """
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions import format
-from resource_management.libraries.functions.version import format_hdp_stack_version
+from resource_management.libraries.functions.version import format_stack_version
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions.get_bare_principal import get_bare_principal
 from resource_management.libraries.script.script import Script
@@ -39,10 +39,10 @@ security_enabled = status_params.security_enabled
 stack_name = default("/hostLevelParams/stack_name", None)
 version = default("/commandParams/version", None)
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
+stack_version_formatted = format_stack_version(stack_version_unformatted)
 
 has_secure_user_auth = False
-if Script.is_hdp_stack_greater_or_equal("2.3"):
+if Script.is_stack_greater_or_equal("2.3"):
   has_secure_user_auth = True
 
 # configuration directories
@@ -50,8 +50,8 @@ conf_dir = status_params.conf_dir
 server_conf_dir = status_params.server_conf_dir
 
 # service locations
-hadoop_prefix = hdp_select.get_hadoop_dir("home")
-hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")
+hadoop_prefix = stack_select.get_hadoop_dir("home")
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
 zookeeper_home = "/usr/hdp/current/zookeeper-client"
 
 # the configuration direction for HDFS/YARN/MapR is the hadoop config

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/atlas_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/atlas_client.py b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/atlas_client.py
index 59e0562..d000846 100644
--- a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/atlas_client.py
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/atlas_client.py
@@ -21,7 +21,7 @@ limitations under the License.
 import sys
 from resource_management import *
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 
 from metadata import metadata
 
@@ -37,9 +37,9 @@ class AtlasClient(Script):
   #   import params
   #   env.set_params(params)
   #
-  #   if params.version and compare_versions(format_hdp_stack_version(params.version), '2.3.0.0') >= 0:
+  #   if params.version and compare_versions(format_stack_version(params.version), '2.3.0.0') >= 0:
   #     conf_select.select(params.stack_name, "atlas", params.version)
-  #     hdp_select.select("atlas-client", params.version)
+  #     stack_select.select("atlas-client", params.version)
 
   def install(self, env):
     self.install_packages(env)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/metadata_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/metadata_server.py b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/metadata_server.py
index 9ba519f..5a39278 100644
--- a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/metadata_server.py
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/metadata_server.py
@@ -18,10 +18,10 @@ limitations under the License.
 """
 from metadata import metadata
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management import Execute, check_process_status, Script
 from resource_management.libraries.functions import format
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
 from resource_management.libraries.functions.security_commons import build_expectations, \
   get_params_from_filesystem, validate_security_config_properties, \
   FILE_TYPE_PROPERTIES
@@ -43,9 +43,9 @@ class MetadataServer(Script):
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.3.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), '2.3.0.0') >= 0:
       # conf_select.select(params.stack_name, "atlas", params.version)
-      hdp_select.select("atlas-server", params.version)
+      stack_select.select("atlas-server", params.version)
 
   def start(self, env, upgrade_type=None):
     import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py
index eb2d816..38c2c9b 100644
--- a/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/ATLAS/0.1.0.2.3/package/scripts/params.py
@@ -19,7 +19,7 @@ limitations under the License.
 """
 import os
 import sys
-from resource_management import format_hdp_stack_version, Script
+from resource_management import format_stack_version, Script
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions.default import default
 
@@ -46,7 +46,7 @@ version = default("/commandParams/version", None)
 
 # hdp version
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
+stack_version_formatted = format_stack_version(stack_version_unformatted)
 
 metadata_home = os.environ['METADATA_HOME_DIR'] if 'METADATA_HOME_DIR' in os.environ else '/usr/hdp/current/atlas-server'
 metadata_bin = format("{metadata_home}/bin")

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon_client.py b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon_client.py
index ef65ecb..2894844 100644
--- a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon_client.py
+++ b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon_client.py
@@ -19,7 +19,7 @@ limitations under the License.
 
 from resource_management import *
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from falcon import falcon
 from ambari_commons import OSConst
 from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
@@ -49,12 +49,12 @@ class FalconClientLinux(FalconClient):
 
     # this function should not execute if the version can't be determined or
     # is not at least HDP 2.2.0.0
-    if not params.version or compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') < 0:
+    if not params.version or compare_versions(format_stack_version(params.version), '2.2.0.0') < 0:
       return
 
     Logger.info("Executing Falcon Client Stack Upgrade pre-restart")
     conf_select.select(params.stack_name, "falcon", params.version)
-    hdp_select.select("falcon-client", params.version)
+    stack_select.select("falcon-client", params.version)
 
   def security_status(self, env):
     import status_params

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon_server.py b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon_server.py
index 13401dc..ccc1c9d 100644
--- a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon_server.py
+++ b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon_server.py
@@ -22,7 +22,7 @@ import falcon_server_upgrade
 from resource_management.core.logger import Logger
 from resource_management.libraries.script import Script
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import check_process_status
 from resource_management.libraries.functions.security_commons import build_expectations
 from resource_management.libraries.functions.security_commons import cached_kinit_executor
@@ -77,12 +77,12 @@ class FalconServerLinux(FalconServer):
 
     # this function should not execute if the version can't be determined or
     # is not at least HDP 2.2.0.0
-    if Script.is_hdp_stack_less_than("2.2"):
+    if Script.is_stack_less_than("2.2"):
       return
 
     Logger.info("Executing Falcon Server Stack Upgrade pre-restart")
     conf_select.select(params.stack_name, "falcon", params.version)
-    hdp_select.select("falcon-server", params.version)
+    stack_select.select("falcon-server", params.version)
     falcon_server_upgrade.pre_start_restore()
 
   def security_status(self, env):

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/params_linux.py
index 74ac010..707c4ed 100644
--- a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/params_linux.py
@@ -19,9 +19,9 @@ limitations under the License.
 import status_params
 
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import format
-from resource_management.libraries.functions.version import format_hdp_stack_version
+from resource_management.libraries.functions.version import format_stack_version
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions import get_kinit_path
 from resource_management.libraries.script.script import Script
@@ -35,14 +35,14 @@ stack_name = default("/hostLevelParams/stack_name", None)
 version = default("/commandParams/version", None)
 
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
+stack_version_formatted = format_stack_version(stack_version_unformatted)
 etc_prefix_dir = "/etc/falcon"
 
 # hadoop params
-hadoop_home_dir = hdp_select.get_hadoop_dir("home")
-hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")
+hadoop_home_dir = stack_select.get_hadoop_dir("home")
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
 
-if Script.is_hdp_stack_greater_or_equal("2.2"):
+if Script.is_stack_greater_or_equal("2.2"):
 
   # if this is a server action, then use the server binaries; smoke tests
   # use the client binaries

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/status_params.py
index 399ff22..2c06c40 100644
--- a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/status_params.py
+++ b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/status_params.py
@@ -46,7 +46,7 @@ else:
   hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
 
   falcon_conf_dir = "/etc/falcon/conf"
-  if Script.is_hdp_stack_greater_or_equal("2.2"):
+  if Script.is_stack_greater_or_equal("2.2"):
     falcon_conf_dir = format("/usr/hdp/current/{component_directory}/conf")
 
   # Security related/required params

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/flume_handler.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/flume_handler.py b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/flume_handler.py
index 937547c..60138bb 100644
--- a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/flume_handler.py
+++ b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/flume_handler.py
@@ -24,7 +24,7 @@ from flume import get_desired_state
 
 from resource_management import *
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions.flume_agent_helper import find_expected_agent_names
 from resource_management.libraries.functions.flume_agent_helper import get_flume_status
 
@@ -89,12 +89,12 @@ class FlumeHandlerLinux(FlumeHandler):
 
     # this function should not execute if the version can't be determined or
     # is not at least HDP 2.2.0.0
-    if not params.version or Script.is_hdp_stack_less_than("2.2"):
+    if not params.version or Script.is_stack_less_than("2.2"):
       return
 
     Logger.info("Executing Flume Stack Upgrade pre-restart")
     conf_select.select(params.stack_name, "flume", params.version)
-    hdp_select.select("flume-server", params.version)
+    stack_select.select("flume-server", params.version)
 
     # only restore on upgrade, not downgrade
     if params.upgrade_direction == Direction.UPGRADE:

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py
index 5ec879c..29f71a7 100644
--- a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params.py
@@ -19,7 +19,7 @@ limitations under the License.
 from ambari_commons import OSCheck
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions import format
-from resource_management.libraries.functions.version import format_hdp_stack_version
+from resource_management.libraries.functions.version import format_stack_version
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.script.script import Script
 
@@ -42,7 +42,7 @@ proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
 security_enabled = False
 
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
+stack_version_formatted = format_stack_version(stack_version_unformatted)
 
 # hadoop default parameters
 flume_bin = '/usr/bin/flume-ng'
@@ -50,7 +50,7 @@ flume_hive_home = '/usr/lib/hive'
 flume_hcat_home = '/usr/lib/hive-hcatalog'
 
 # hadoop parameters for 2.2+
-if Script.is_hdp_stack_greater_or_equal("2.2"):
+if Script.is_stack_greater_or_equal("2.2"):
   flume_bin = '/usr/hdp/current/flume-server/bin/flume-ng'
   flume_hive_home = '/usr/hdp/current/hive-metastore'
   flume_hcat_home = '/usr/hdp/current/hive-webhcat'

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params_linux.py
index 22f3324..1067ba7 100644
--- a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params_linux.py
@@ -29,7 +29,7 @@ upgrade_direction = default("/commandParams/upgrade_direction", Direction.UPGRAD
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
 
 flume_conf_dir = '/etc/flume/conf'
-if Script.is_hdp_stack_greater_or_equal("2.2"):
+if Script.is_stack_greater_or_equal("2.2"):
   flume_conf_dir = '/usr/hdp/current/flume-server/conf'
 
 flume_user = 'flume'

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawqmaster.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawqmaster.py b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawqmaster.py
index 8c42848..2c3493a 100644
--- a/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawqmaster.py
+++ b/ambari-server/src/main/resources/common-services/HAWQ/2.0.0/package/scripts/hawqmaster.py
@@ -21,7 +21,7 @@ from resource_management.core.resources.system import Execute
 from resource_management.core.logger import Logger
 from resource_management.libraries.functions.check_process_status import check_process_status
 try:
-    from resource_management.libraries.functions import hdp_select as hadoop_select
+    from resource_management.libraries.functions import stack_select as hadoop_select
 except ImportError:
     from resource_management.libraries.functions import phd_select as hadoop_select
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_client.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_client.py
index ac34d40..c31bbf6 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_client.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_client.py
@@ -21,7 +21,7 @@ limitations under the License.
 import sys
 from resource_management import *
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from hbase import hbase
 from ambari_commons import OSCheck, OSConst
 from ambari_commons.os_family_impl import OsFamilyImpl
@@ -57,13 +57,13 @@ class HbaseClientDefault(HbaseClient):
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "hbase", params.version)
-      hdp_select.select("hbase-client", params.version)
+      stack_select.select("hbase-client", params.version)
 
       # phoenix may not always be deployed
       try:
-        hdp_select.select("phoenix-client", params.version)
+        stack_select.select("phoenix-client", params.version)
       except Exception as e:
         print "Ignoring error due to missing phoenix-client"
         print str(e)
@@ -73,7 +73,7 @@ class HbaseClientDefault(HbaseClient):
       # of the final "CLIENTS" group and we need to ensure that hadoop-client
       # is also set
       conf_select.select(params.stack_name, "hadoop", params.version)
-      hdp_select.select("hadoop-client", params.version)
+      stack_select.select("hadoop-client", params.version)
 
 
 if __name__ == "__main__":

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
index f9694c6..01503fe 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
@@ -28,9 +28,9 @@ from ambari_commons.str_utils import cbool, cint
 
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import format
-from resource_management.libraries.functions.version import format_hdp_stack_version
+from resource_management.libraries.functions.version import format_stack_version
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions import get_kinit_path
 from resource_management.libraries.functions import is_empty
@@ -51,10 +51,10 @@ component_directory = status_params.component_directory
 etc_prefix_dir = "/etc/hbase"
 
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
+stack_version_formatted = format_stack_version(stack_version_unformatted)
 
 # hadoop default parameters
-hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
 hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
 daemon_script = "/usr/lib/hbase/bin/hbase-daemon.sh"
 region_mover = "/usr/lib/hbase/bin/region_mover.rb"
@@ -63,7 +63,7 @@ hbase_cmd = "/usr/lib/hbase/bin/hbase"
 hbase_max_direct_memory_size = None
 
 # hadoop parameters for 2.2+
-if Script.is_hdp_stack_greater_or_equal("2.2"):
+if Script.is_stack_greater_or_equal("2.2"):
   daemon_script = format('/usr/hdp/current/hbase-client/bin/hbase-daemon.sh')
   region_mover = format('/usr/hdp/current/hbase-client/bin/region_mover.rb')
   region_drainer = format('/usr/hdp/current/hbase-client/bin/draining_servers.rb')

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/phoenix_queryserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/phoenix_queryserver.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/phoenix_queryserver.py
index 693bb08..87e4899 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/phoenix_queryserver.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/phoenix_queryserver.py
@@ -18,7 +18,7 @@ limitations under the License.
 """
 
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.script import Script
 from phoenix_service import phoenix_service
 from hbase import hbase
@@ -59,10 +59,10 @@ class PhoenixQueryServer(Script):
     import params
     env.set_params(params)
 
-    if Script.is_hdp_stack_greater_or_equal("2.3"):
+    if Script.is_stack_greater_or_equal("2.3"):
       # phoenix uses hbase configs
       conf_select.select(params.stack_name, "hbase", params.version)
-      hdp_select.select("phoenix-server", params.version)
+      stack_select.select("phoenix-server", params.version)
 
 
   def status(self, env):

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/status_params.py
index 014e8d7..535c821 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/status_params.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/status_params.py
@@ -51,5 +51,5 @@ else:
 
   hbase_conf_dir = "/etc/hbase/conf"
   limits_conf_dir = "/etc/security/limits.d"
-  if Script.is_hdp_stack_greater_or_equal("2.2"):
+  if Script.is_stack_greater_or_equal("2.2"):
     hbase_conf_dir = format("/usr/hdp/current/{component_directory}/conf")
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/upgrade.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/upgrade.py
index 00040fa..c5ba682 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/upgrade.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/upgrade.py
@@ -22,16 +22,16 @@ from resource_management import *
 from resource_management.core.resources.system import Execute
 from resource_management.core import shell
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
 from resource_management.libraries.functions.decorator import retry
 
 def prestart(env, hdp_component):
   import params
 
-  if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+  if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
     conf_select.select(params.stack_name, "hbase", params.version)
-    hdp_select.select(hdp_component, params.version)
+    stack_select.select(hdp_component, params.version)
 
 def post_regionserver(env):
   import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
index 5adeab4..3cdfda9 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/datanode.py
@@ -20,8 +20,8 @@ import datanode_upgrade
 from hdfs_datanode import datanode
 from resource_management import *
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
 from resource_management.libraries.functions.security_commons import build_expectations, \
   cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, FILE_TYPE_XML
 from hdfs import hdfs
@@ -87,9 +87,9 @@ class DataNodeDefault(DataNode):
     Logger.info("Executing DataNode Stack Upgrade pre-restart")
     import params
     env.set_params(params)
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
-      hdp_select.select("hadoop-hdfs-datanode", params.version)
+      stack_select.select("hadoop-hdfs-datanode", params.version)
 
   def post_upgrade_restart(self, env, upgrade_type=None):
     Logger.info("Executing DataNode Stack Upgrade post-restart")

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
index 21c0eda..c5ae35e 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
@@ -19,7 +19,7 @@ limitations under the License.
 
 from resource_management import *
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions.security_commons import build_expectations, \
   cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
   FILE_TYPE_XML
@@ -60,9 +60,9 @@ class HdfsClientDefault(HdfsClient):
   def pre_upgrade_restart(self, env, upgrade_type=None):
     import params
     env.set_params(params)
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
-      hdp_select.select("hadoop-client", params.version)
+      stack_select.select("hadoop-client", params.version)
 
   def security_status(self, env):
     import status_params

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode.py
index 7715f6c..6f26b40 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/journalnode.py
@@ -19,9 +19,9 @@ limitations under the License.
 
 from resource_management import *
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions.version import compare_versions, \
-  format_hdp_stack_version
+  format_stack_version
 from resource_management.libraries.functions.format import format
 from resource_management.libraries.functions.security_commons import build_expectations, \
   cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
@@ -50,9 +50,9 @@ class JournalNodeDefault(JournalNode):
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
-      hdp_select.select("hadoop-hdfs-journalnode", params.version)
+      stack_select.select("hadoop-hdfs-journalnode", params.version)
 
   def start(self, env, upgrade_type=None):
     import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
index 910bc0a..02905ec 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode.py
@@ -29,9 +29,9 @@ from resource_management import Script
 from resource_management.core.resources.system import Execute, File
 from resource_management.core import shell
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import Direction
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
 from resource_management.libraries.functions.format import format
 from resource_management.libraries.functions.security_commons import build_expectations, \
   cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
@@ -190,14 +190,14 @@ class NameNodeDefault(NameNode):
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
       # When downgrading an Express Upgrade, the first thing we do is to revert the symlinks.
       # Therefore, we cannot call this code in that scenario.
       call_if = [("rolling", "upgrade"), ("rolling", "downgrade"), ("nonrolling", "upgrade")]
       for e in call_if:
         if (upgrade_type, params.upgrade_direction) == e:
           conf_select.select(params.stack_name, "hadoop", params.version)
-      hdp_select.select("hadoop-hdfs-namenode", params.version)
+      stack_select.select("hadoop-hdfs-namenode", params.version)
 
   def post_upgrade_restart(self, env, upgrade_type=None):
     Logger.info("Executing Stack Upgrade post-restart")

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/nfsgateway.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/nfsgateway.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/nfsgateway.py
index 4b9ad06..c705fca 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/nfsgateway.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/nfsgateway.py
@@ -25,8 +25,8 @@ from resource_management.libraries.functions.security_commons import build_expec
 from hdfs_nfsgateway import nfsgateway
 from hdfs import hdfs
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
 
 
 class NFSGateway(Script):
@@ -45,9 +45,9 @@ class NFSGateway(Script):
     import params
     env.set_params(params)
 
-    if Script.is_hdp_stack_greater_or_equal('2.3.0.0'):
+    if Script.is_stack_greater_or_equal('2.3.0.0'):
       conf_select.select(params.stack_name, "hadoop", params.version)
-      hdp_select.select("hadoop-hdfs-nfs3", params.version)
+      stack_select.select("hadoop-hdfs-nfs3", params.version)
 
   def start(self, env, upgrade_type=None):
     import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
index 5242694..f0bf4d2 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
@@ -27,9 +27,9 @@ from ambari_commons.os_check import OSCheck
 from ambari_commons.str_utils import cbool, cint
 
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import format
-from resource_management.libraries.functions.version import format_hdp_stack_version
+from resource_management.libraries.functions.version import format_stack_version
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions import get_klist_path
 from resource_management.libraries.functions import get_kinit_path
@@ -47,7 +47,7 @@ tmp_dir = Script.get_tmp_dir()
 stack_name = default("/hostLevelParams/stack_name", None)
 upgrade_direction = default("/commandParams/upgrade_direction", None)
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
-hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
+stack_version_formatted = format_stack_version(stack_version_unformatted)
 agent_stack_retry_on_unavailability = cbool(config["hostLevelParams"]["agent_stack_retry_on_unavailability"])
 agent_stack_retry_count = cint(config["hostLevelParams"]["agent_stack_retry_count"])
 
@@ -77,17 +77,17 @@ secure_dn_ports_are_in_use = False
 
 # hadoop default parameters
 mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
-hadoop_libexec_dir = hdp_select.get_hadoop_dir("libexec")
-hadoop_bin = hdp_select.get_hadoop_dir("sbin")
-hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")
-hadoop_home = hdp_select.get_hadoop_dir("home")
+hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
+hadoop_bin = stack_select.get_hadoop_dir("sbin")
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hadoop_home = stack_select.get_hadoop_dir("home")
 hadoop_secure_dn_user = hdfs_user
 hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
 hadoop_conf_secure_dir = os.path.join(hadoop_conf_dir, "secure")
-hadoop_lib_home = hdp_select.get_hadoop_dir("lib")
+hadoop_lib_home = stack_select.get_hadoop_dir("lib")
 
 # hadoop parameters for 2.2+
-if Script.is_hdp_stack_greater_or_equal("2.2"):
+if Script.is_stack_greater_or_equal("2.2"):
   mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
 
   if not security_enabled:
@@ -114,7 +114,7 @@ limits_conf_dir = "/etc/security/limits.d"
 hdfs_user_nofile_limit = default("/configurations/hadoop-env/hdfs_user_nofile_limit", "128000")
 hdfs_user_nproc_limit = default("/configurations/hadoop-env/hdfs_user_nproc_limit", "65536")
 
-create_lib_snappy_symlinks = not Script.is_hdp_stack_greater_or_equal("2.2")
+create_lib_snappy_symlinks = not Script.is_stack_greater_or_equal("2.2")
 jsvc_path = "/usr/lib/bigtop-utils"
 
 execute_path = os.environ['PATH'] + os.pathsep + hadoop_bin_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py
index b8a1726..f96ac01 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/snamenode.py
@@ -19,8 +19,8 @@ limitations under the License.
 
 from resource_management import *
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
 from resource_management.libraries.functions.security_commons import build_expectations, \
   cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
   FILE_TYPE_XML
@@ -71,9 +71,9 @@ class SNameNodeDefault(SNameNode):
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "hadoop", params.version)
-      hdp_select.select("hadoop-hdfs-secondarynamenode", params.version)
+      stack_select.select("hadoop-hdfs-secondarynamenode", params.version)
 
   def security_status(self, env):
     import status_params

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
index e59dd78..c626028 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/utils.py
@@ -224,12 +224,12 @@ def service(action=None, name=None, user=None, options="", create_pid_dir=False,
     hadoop_secure_dn_pid_file = format("{hadoop_secure_dn_pid_dir}/hadoop_secure_dn.pid")
 
     # At Champlain stack and further, we may start datanode as a non-root even in secure cluster
-    if not (params.hdp_stack_version != "" and compare_versions(params.hdp_stack_version, '2.2') >= 0) or params.secure_dn_ports_are_in_use:
+    if not (params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.2') >= 0) or params.secure_dn_ports_are_in_use:
       user = "root"
       pid_file = format(
         "{hadoop_pid_dir_prefix}/{hdfs_user}/hadoop-{hdfs_user}-{name}.pid")
 
-    if action == 'stop' and (params.hdp_stack_version != "" and compare_versions(params.hdp_stack_version, '2.2') >= 0) and \
+    if action == 'stop' and (params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.2') >= 0) and \
       os.path.isfile(hadoop_secure_dn_pid_file):
         # We need special handling for this case to handle the situation
         # when we configure non-root secure DN and then restart it
@@ -354,7 +354,7 @@ def get_hdfs_binary(distro_component_name):
   if params.stack_name == "HDP":
     # This was used in HDP 2.1 and earlier
     hdfs_binary = "hdfs"
-    if Script.is_hdp_stack_greater_or_equal("2.2"):
+    if Script.is_stack_greater_or_equal("2.2"):
       hdfs_binary = "/usr/hdp/current/{0}/bin/hdfs".format(distro_component_name)
 
   return hdfs_binary

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hcat_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hcat_client.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hcat_client.py
index 70bebb4..85e7012 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hcat_client.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hcat_client.py
@@ -23,7 +23,7 @@ from ambari_commons import OSConst
 from ambari_commons.os_family_impl import OsFamilyImpl
 from resource_management.core.logger import Logger
 from resource_management.core.exceptions import ClientComponentHasNoStatus
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions.version import compare_versions
 from resource_management.libraries.script.script import Script
 
@@ -78,7 +78,7 @@ class HCatClientDefault(HCatClient):
     # HCat client doesn't have a first-class entry in hdp-select. Since clients always
     # update after daemons, this ensures that the hcat directories are correct on hosts
     # which do not include the WebHCat daemon
-    hdp_select.select("hive-webhcat", params.version)
+    stack_select.select("hive-webhcat", params.version)
 
 
 if __name__ == "__main__":

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
index 92e4ad7..c5d45ee 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py
@@ -107,7 +107,7 @@ def hive(name=None):
 
   if name == 'hiveserver2':
     # HDP 2.1.* or lower
-    if params.hdp_stack_version_major != "" and compare_versions(params.hdp_stack_version_major, "2.2.0.0") < 0:
+    if params.stack_version_formatted_major != "" and compare_versions(params.stack_version_formatted_major, "2.2.0.0") < 0:
       params.HdfsResource(params.webhcat_apps_dir,
                             type="directory",
                             action="create_on_execute",
@@ -134,7 +134,7 @@ def hive(name=None):
     # ****** Begin Copy Tarballs ******
     # *********************************
     # HDP 2.2 or higher, copy mapreduce.tar.gz to HDFS
-    if params.hdp_stack_version_major != "" and compare_versions(params.hdp_stack_version_major, '2.2') >= 0:
+    if params.stack_version_formatted_major != "" and compare_versions(params.stack_version_formatted_major, '2.2') >= 0:
       copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
       copy_to_hdfs("tez", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_client.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_client.py
index e4aace3..ba2a129 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_client.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_client.py
@@ -20,7 +20,7 @@ limitations under the License.
 import sys
 from resource_management import *
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from hive import hive
 from ambari_commons.os_family_impl import OsFamilyImpl
 from ambari_commons import OSConst
@@ -55,10 +55,10 @@ class HiveClientDefault(HiveClient):
 
     import params
     env.set_params(params)
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "hive", params.version)
       conf_select.select(params.stack_name, "hadoop", params.version)
-      hdp_select.select("hadoop-client", params.version)
+      stack_select.select("hadoop-client", params.version)
 
 
 if __name__ == "__main__":

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_metastore.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_metastore.py
index 59ecbbb..7fee2b3 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_metastore.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_metastore.py
@@ -23,10 +23,10 @@ from resource_management.core.logger import Logger
 from resource_management.core.resources.system import Execute, Directory
 from resource_management.libraries.script import Script
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions.constants import Direction
 from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.version import format_hdp_stack_version
+from resource_management.libraries.functions.version import format_stack_version
 from resource_management.libraries.functions.version import compare_versions
 from resource_management.libraries.functions.security_commons import build_expectations
 from resource_management.libraries.functions.security_commons import cached_kinit_executor
@@ -102,15 +102,15 @@ class HiveMetastoreDefault(HiveMetastore):
 
     env.set_params(params)
 
-    is_stack_hdp_23 = Script.is_hdp_stack_greater_or_equal("2.3")
+    is_stack_hdp_23 = Script.is_stack_greater_or_equal("2.3")
     is_upgrade = params.upgrade_direction == Direction.UPGRADE
 
     if is_stack_hdp_23 and is_upgrade:
       self.upgrade_schema(env)
 
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "hive", params.version)
-      hdp_select.select("hive-metastore", params.version)
+      stack_select.select("hive-metastore", params.version)
 
 
   def security_status(self, env):
@@ -229,7 +229,7 @@ class HiveMetastoreDefault(HiveMetastore):
     # we need to choose the original legacy location
     schematool_hive_server_conf_dir = params.hive_server_conf_dir
     if params.current_version is not None:
-      current_version = format_hdp_stack_version(params.current_version)
+      current_version = format_stack_version(params.current_version)
       if compare_versions(current_version, "2.3") < 0:
         schematool_hive_server_conf_dir = LEGACY_HIVE_SERVER_CONF
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server.py
index a81e4f6..f7f1377 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server.py
@@ -22,12 +22,12 @@ limitations under the License.
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
-from resource_management.libraries.functions.get_hdp_version import get_hdp_version
+from resource_management.libraries.functions.get_stack_version import get_stack_version
 from resource_management.libraries.functions.check_process_status import check_process_status
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
 from resource_management.libraries.functions.security_commons import build_expectations, \
   cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
   FILE_TYPE_XML
@@ -117,9 +117,9 @@ class HiveServerDefault(HiveServer):
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
       conf_select.select(params.stack_name, "hive", params.version)
-      hdp_select.select("hive-server2", params.version)
+      stack_select.select("hive-server2", params.version)
 
       # Copy mapreduce.tar.gz and tez.tar.gz to HDFS
       resource_created = copy_to_hdfs(

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_interactive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_interactive.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_interactive.py
index 6fa3081..3a90164 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_interactive.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_interactive.py
@@ -22,12 +22,12 @@ limitations under the License.
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
-from resource_management.libraries.functions.get_hdp_version import get_hdp_version
+from resource_management.libraries.functions.get_stack_version import get_stack_version
 from resource_management.libraries.functions.check_process_status import check_process_status
-from resource_management.libraries.functions.version import compare_versions, format_hdp_stack_version
+from resource_management.libraries.functions.version import compare_versions, format_stack_version
 from resource_management.libraries.functions.security_commons import build_expectations, \
     cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
     FILE_TYPE_XML


[18/51] [abbrv] ambari git commit: AMBARI-15329: Code Cleanup: Remove hdp hardcodings in functions, variables etc. (jluniya)

Posted by jl...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
index 0683551..7db4b26 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
@@ -45,7 +45,7 @@ class TestOozieServer(RMFTestCase):
                        classname = "OozieServer",
                        command = "configure",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = call_mocks
     )
@@ -59,7 +59,7 @@ class TestOozieServer(RMFTestCase):
                        classname = "OozieServer",
                        command = "configure",
                        config_file="default_oozie_mysql.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = call_mocks
     )
@@ -274,7 +274,7 @@ class TestOozieServer(RMFTestCase):
                        classname = "OozieServer",
                        command = "configure",
                        config_file="oozie_existing_sqla.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = call_mocks
     )
@@ -496,7 +496,7 @@ class TestOozieServer(RMFTestCase):
                          classname = "OozieServer",
                          command = "start",
                          config_file="default.json",
-                         hdp_stack_version = self.STACK_VERSION,
+                         stack_version = self.STACK_VERSION,
                          target = RMFTestCase.TARGET_COMMON_SERVICES,
                          call_mocks = call_mocks
         )
@@ -551,7 +551,7 @@ class TestOozieServer(RMFTestCase):
                          classname = "OozieServer",
                          command = "stop",
                          config_file="default.json",
-                         hdp_stack_version = self.STACK_VERSION,
+                         stack_version = self.STACK_VERSION,
                          target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', 'cd /var/tmp/oozie && /usr/lib/oozie/bin/oozie-stop.sh',
@@ -571,7 +571,7 @@ class TestOozieServer(RMFTestCase):
                        classname = "OozieServer",
                        command = "configure",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = call_mocks
     )
@@ -587,7 +587,7 @@ class TestOozieServer(RMFTestCase):
                          classname = "OozieServer",
                          command = "start",
                          config_file="secured.json",
-                         hdp_stack_version = self.STACK_VERSION,
+                         stack_version = self.STACK_VERSION,
                          target = RMFTestCase.TARGET_COMMON_SERVICES,
                          call_mocks = call_mocks
     )
@@ -646,7 +646,7 @@ class TestOozieServer(RMFTestCase):
                          classname = "OozieServer",
                          command = "stop",
                          config_file="secured.json",
-                         hdp_stack_version = self.STACK_VERSION,
+                         stack_version = self.STACK_VERSION,
                          target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', 'cd /var/tmp/oozie && /usr/lib/oozie/bin/oozie-stop.sh',
@@ -1047,7 +1047,7 @@ class TestOozieServer(RMFTestCase):
                      classname = "OozieServer",
                      command = "configure",
                      config_file="default.json",
-                     hdp_stack_version = self.STACK_VERSION,
+                     stack_version = self.STACK_VERSION,
                      target = RMFTestCase.TARGET_COMMON_SERVICES,
                      call_mocks = call_mocks
     )
@@ -1090,7 +1090,7 @@ class TestOozieServer(RMFTestCase):
                        classname = "OozieServer",
                        command = "security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -1114,7 +1114,7 @@ class TestOozieServer(RMFTestCase):
                          classname = "OozieServer",
                          command = "security_status",
                          config_file="secured.json",
-                         hdp_stack_version = self.STACK_VERSION,
+                         stack_version = self.STACK_VERSION,
                          target = RMFTestCase.TARGET_COMMON_SERVICES
       )
     except:
@@ -1131,7 +1131,7 @@ class TestOozieServer(RMFTestCase):
                        classname = "OozieServer",
                        command = "security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
@@ -1150,7 +1150,7 @@ class TestOozieServer(RMFTestCase):
                        classname = "OozieServer",
                        command = "security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
@@ -1160,7 +1160,7 @@ class TestOozieServer(RMFTestCase):
                        classname = "OozieServer",
                        command = "security_status",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
@@ -1192,7 +1192,7 @@ class TestOozieServer(RMFTestCase):
 
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/oozie_server.py",
      classname = "OozieServer", command = "pre_upgrade_restart", config_file = "oozie-upgrade.json",
-     hdp_stack_version = self.UPGRADE_STACK_VERSION,
+     stack_version = self.UPGRADE_STACK_VERSION,
      target = RMFTestCase.TARGET_COMMON_SERVICES,
      call_mocks = [(0, prepare_war_stdout)])
     
@@ -1263,7 +1263,7 @@ class TestOozieServer(RMFTestCase):
     mocks_dict = {}
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/oozie_server.py",
      classname = "OozieServer", command = "pre_upgrade_restart", config_dict = json_content,
-     hdp_stack_version = self.UPGRADE_STACK_VERSION,
+     stack_version = self.UPGRADE_STACK_VERSION,
      target = RMFTestCase.TARGET_COMMON_SERVICES,
      call_mocks = [(0, None, ''), (0, prepare_war_stdout)],
      mocks_dict = mocks_dict)
@@ -1334,7 +1334,7 @@ class TestOozieServer(RMFTestCase):
 
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/oozie_server.py",
      classname = "OozieServer", command = "pre_upgrade_restart", config_file = "oozie-downgrade.json",
-     hdp_stack_version = self.UPGRADE_STACK_VERSION,
+     stack_version = self.UPGRADE_STACK_VERSION,
      target = RMFTestCase.TARGET_COMMON_SERVICES,
      call_mocks = [(0, prepare_war_stdout)])
 
@@ -1385,7 +1385,7 @@ class TestOozieServer(RMFTestCase):
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/oozie_server_upgrade.py",
       classname = "OozieUpgrade", command = "upgrade_oozie_database_and_sharelib",
       config_dict = json_content,
-      hdp_stack_version = self.UPGRADE_STACK_VERSION,
+      stack_version = self.UPGRADE_STACK_VERSION,
       target = RMFTestCase.TARGET_COMMON_SERVICES )
 
     self.assertResourceCalled('Execute', '/usr/hdp/2.3.0.0-1234/oozie/bin/ooziedb.sh upgrade -run',
@@ -1448,7 +1448,7 @@ class TestOozieServer(RMFTestCase):
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/oozie_server_upgrade.py",
       classname = "OozieUpgrade", command = "upgrade_oozie_database_and_sharelib",
       config_dict = json_content,
-      hdp_stack_version = self.UPGRADE_STACK_VERSION,
+      stack_version = self.UPGRADE_STACK_VERSION,
       target = RMFTestCase.TARGET_COMMON_SERVICES )
 
     self.assertResourceCalled('File', '/tmp/mysql-connector-java.jar',
@@ -1534,7 +1534,7 @@ class TestOozieServer(RMFTestCase):
     mocks_dict = {}
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/oozie_server.py",
      classname = "OozieServer", command = "pre_upgrade_restart", config_dict = json_content,
-     hdp_stack_version = self.UPGRADE_STACK_VERSION,
+     stack_version = self.UPGRADE_STACK_VERSION,
      target = RMFTestCase.TARGET_COMMON_SERVICES,
      call_mocks = [(0, None, ''), (0, prepare_war_stdout)],
      mocks_dict = mocks_dict)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_service_check.py b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_service_check.py
index 8bf2790..646f5db 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_service_check.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_service_check.py
@@ -44,7 +44,7 @@ class TestOozieServiceCheck(RMFTestCase):
                        classname = "OozieServiceCheck",
                        command = "service_check",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None), (0, None)],
                        mocks_dict = mocks_dict)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_service_check.py b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_service_check.py
index dc35612..462c361 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_service_check.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_service_check.py
@@ -32,7 +32,7 @@ class TestServiceCheck(RMFTestCase):
                         classname="OozieServiceCheck",
                         command="service_check",
                         config_file="default.json",
-                        hdp_stack_version = self.STACK_VERSION,
+                        stack_version = self.STACK_VERSION,
                         target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -53,7 +53,7 @@ class TestServiceCheck(RMFTestCase):
                         classname="OozieServiceCheck",
                         command="service_check",
                         config_file="default.json",
-                        hdp_stack_version = self.STACK_VERSION,
+                        stack_version = self.STACK_VERSION,
                         target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_client.py b/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_client.py
index 99b795e..804abe7 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_client.py
@@ -30,7 +30,7 @@ class TestPigClient(RMFTestCase):
                        classname = "PigClient",
                        command = "configure",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -65,7 +65,7 @@ class TestPigClient(RMFTestCase):
                        classname = "PigClient",
                        command = "configure",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     
@@ -105,7 +105,7 @@ class TestPigClient(RMFTestCase):
                        classname = "PigClient",
                        command = "configure",
                        config_dict=default_json,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -143,7 +143,7 @@ class TestPigClient(RMFTestCase):
                        classname = "PigClient",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
                               ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-client', version), sudo=True)
@@ -161,7 +161,7 @@ class TestPigClient(RMFTestCase):
                        classname = "PigClient",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None, '')],
                        mocks_dict = mocks_dict)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_service_check.py b/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_service_check.py
index 9c0b035..1187e97 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_service_check.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_service_check.py
@@ -29,7 +29,7 @@ class TestPigServiceCheck(RMFTestCase):
                        classname = "PigServiceCheck",
                        command = "service_check",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa/pigsmoke.out',
@@ -102,7 +102,7 @@ class TestPigServiceCheck(RMFTestCase):
                        classname = "PigServiceCheck",
                        command = "service_check",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/SQOOP/test_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/SQOOP/test_service_check.py b/ambari-server/src/test/python/stacks/2.0.6/SQOOP/test_service_check.py
index 7a13ede..4337f35 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/SQOOP/test_service_check.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/SQOOP/test_service_check.py
@@ -30,7 +30,7 @@ class TestSqoopServiceCheck(RMFTestCase):
                        classname = "SqoopServiceCheck",
                        command = "service_check",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     kinit_path_local = get_kinit_path()
@@ -48,7 +48,7 @@ class TestSqoopServiceCheck(RMFTestCase):
                        classname = "SqoopServiceCheck",
                        command = "service_check",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', 'sqoop version',

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/SQOOP/test_sqoop.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/SQOOP/test_sqoop.py b/ambari-server/src/test/python/stacks/2.0.6/SQOOP/test_sqoop.py
index e12db4d..040dacb 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/SQOOP/test_sqoop.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/SQOOP/test_sqoop.py
@@ -30,7 +30,7 @@ class TestSqoop(RMFTestCase):
                        classname = "SqoopClient",
                        command = "configure",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Link', '/usr/lib/sqoop/lib/mysql-connector-java.jar',
@@ -76,7 +76,7 @@ class TestSqoop(RMFTestCase):
                        classname = "SqoopClient",
                        command = "configure",
                        config_dict = loaded_json,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Link', '/usr/lib/sqoop/lib/mysql-connector-java.jar',
@@ -136,7 +136,7 @@ class TestSqoop(RMFTestCase):
                        classname = "SqoopClient",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None)],
                        mocks_dict = mocks_dict)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
index 69e1f5d..85959aa 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
@@ -39,7 +39,7 @@ class TestHistoryServer(RMFTestCase):
                        classname="HistoryServer",
                        command="configure",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -50,7 +50,7 @@ class TestHistoryServer(RMFTestCase):
                        classname="HistoryServer",
                        command="start",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -128,7 +128,7 @@ class TestHistoryServer(RMFTestCase):
                        classname="HistoryServer",
                        command="stop",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -144,7 +144,7 @@ class TestHistoryServer(RMFTestCase):
                        classname="HistoryServer",
                        command="configure",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
@@ -155,7 +155,7 @@ class TestHistoryServer(RMFTestCase):
                        classname="HistoryServer",
                        command="start",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -183,7 +183,7 @@ class TestHistoryServer(RMFTestCase):
                        classname="HistoryServer",
                        command="stop",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -718,7 +718,7 @@ class TestHistoryServer(RMFTestCase):
                        classname="HistoryServer",
                        command="security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -750,7 +750,7 @@ class TestHistoryServer(RMFTestCase):
                          classname="HistoryServer",
                          command="security_status",
                          config_file="secured.json",
-                         hdp_stack_version = self.STACK_VERSION,
+                         stack_version = self.STACK_VERSION,
                          target = RMFTestCase.TARGET_COMMON_SERVICES
       )
     except:
@@ -767,7 +767,7 @@ class TestHistoryServer(RMFTestCase):
                        classname="HistoryServer",
                        command="security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal not set."})
@@ -784,7 +784,7 @@ class TestHistoryServer(RMFTestCase):
                        classname="HistoryServer",
                        command="security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
@@ -794,7 +794,7 @@ class TestHistoryServer(RMFTestCase):
                        classname="HistoryServer",
                        command="security_status",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
@@ -808,8 +808,8 @@ class TestHistoryServer(RMFTestCase):
                               action = ["delete"])
     self.assertResourceCalled("Link", "/etc/hadoop/conf", to="/usr/hdp/current/hadoop-client/conf")
 
-  @patch.object(Script, "is_hdp_stack_greater_or_equal", new = MagicMock(return_value="2.3.0"))
-  @patch.object(functions, "get_hdp_version", new = MagicMock(return_value="2.3.0.0-1234"))
+  @patch.object(Script, "is_stack_greater_or_equal", new = MagicMock(return_value="2.3.0"))
+  @patch.object(functions, "get_stack_version", new = MagicMock(return_value="2.3.0.0-1234"))
   @patch("resource_management.libraries.functions.copy_tarball.copy_to_hdfs")
   def test_pre_upgrade_restart_23(self, copy_to_hdfs_mock):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
@@ -824,7 +824,7 @@ class TestHistoryServer(RMFTestCase):
                        classname = "HistoryServer",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None, None), (0, None, None), (0, None, None), (0, None, None)],
                        mocks_dict = mocks_dict)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_client.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_client.py
index a66d829..efe6038 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_client.py
@@ -29,7 +29,7 @@ origin_exists = os.path.exists
 @patch.object(os.path, "exists", new=MagicMock(
   side_effect=lambda *args: origin_exists(args[0])
   if args[0][-2:] == "j2" else True))
-@patch.object(functions, "get_hdp_version", new = MagicMock(return_value="2.2.0.0-1234"))
+@patch.object(functions, "get_stack_version", new = MagicMock(return_value="2.2.0.0-1234"))
 class TestMapReduce2Client(RMFTestCase):
   COMMON_SERVICES_PACKAGE_DIR = "YARN/2.1.0.2.0/package"
   STACK_VERSION = "2.0.6"
@@ -39,7 +39,7 @@ class TestMapReduce2Client(RMFTestCase):
                        classname = "MapReduce2Client",
                        command = "configure",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -207,7 +207,7 @@ class TestMapReduce2Client(RMFTestCase):
                        classname = "MapReduce2Client",
                        command = "configure",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
@@ -375,13 +375,13 @@ class TestMapReduce2Client(RMFTestCase):
                               )
     self.assertNoMoreResources()
 
-  @patch.object(functions, "get_hdp_version", new=MagicMock(return_value="2.2.0.0-2041"))
+  @patch.object(functions, "get_stack_version", new=MagicMock(return_value="2.2.0.0-2041"))
   def test_upgrade(self):
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/mapreduce2_client.py",
                    classname = "MapReduce2Client",
                    command = "restart",
                    config_file="client-upgrade.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -401,7 +401,7 @@ class TestMapReduce2Client(RMFTestCase):
                        classname = "MapReduce2Client",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None)],
                        mocks_dict = mocks_dict)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_service_check.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_service_check.py
index 3618ab8..82ad240 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_service_check.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_service_check.py
@@ -36,7 +36,7 @@ class TestServiceCheck(RMFTestCase):
                       classname="MapReduce2ServiceCheck",
                       command="service_check",
                       config_file="default.json",
-                      hdp_stack_version = self.STACK_VERSION,
+                      stack_version = self.STACK_VERSION,
                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa/mapredsmokeoutput',
@@ -93,7 +93,7 @@ class TestServiceCheck(RMFTestCase):
                       classname="MapReduce2ServiceCheck",
                       command="service_check",
                       config_file="secured.json",
-                      hdp_stack_version = self.STACK_VERSION,
+                      stack_version = self.STACK_VERSION,
                       target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('HdfsResource', '/user/ambari-qa/mapredsmokeoutput',

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py
index d9f0d2e..e1036c5 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py
@@ -39,7 +39,7 @@ class TestNodeManager(RMFTestCase):
                        classname="Nodemanager",
                        command="configure",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -50,7 +50,7 @@ class TestNodeManager(RMFTestCase):
                        classname="Nodemanager",
                        command="start",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -77,7 +77,7 @@ class TestNodeManager(RMFTestCase):
                        classname="Nodemanager",
                        command="stop",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop-yarn/sbin/yarn-daemon.sh --config /etc/hadoop/conf stop nodemanager',
@@ -90,7 +90,7 @@ class TestNodeManager(RMFTestCase):
                        classname="Nodemanager",
                        command="configure",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
@@ -101,7 +101,7 @@ class TestNodeManager(RMFTestCase):
                        classname="Nodemanager",
                        command="start",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -129,7 +129,7 @@ class TestNodeManager(RMFTestCase):
                        classname="Nodemanager",
                        command="stop",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', 'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop-yarn/sbin/yarn-daemon.sh --config /etc/hadoop/conf stop nodemanager',
@@ -523,7 +523,7 @@ class TestNodeManager(RMFTestCase):
 
   @patch("socket.gethostbyname")
   @patch('time.sleep')
-  @patch.object(resource_management.libraries.functions, "get_hdp_version", new = MagicMock(return_value='2.3.0.0-1234'))
+  @patch.object(resource_management.libraries.functions, "get_stack_version", new = MagicMock(return_value='2.3.0.0-1234'))
   def test_post_upgrade_restart(self, time_mock, socket_gethostbyname_mock):
     process_output = """
       c6401.ambari.apache.org:45454  RUNNING  c6401.ambari.apache.org:8042  0
@@ -535,7 +535,7 @@ class TestNodeManager(RMFTestCase):
       classname = "Nodemanager",
       command = "post_upgrade_restart",
       config_file = "default.json",
-      hdp_stack_version = self.STACK_VERSION,
+      stack_version = self.STACK_VERSION,
       target = RMFTestCase.TARGET_COMMON_SERVICES,
       call_mocks = [(0, process_output)],
       mocks_dict = mocks_dict
@@ -563,7 +563,7 @@ class TestNodeManager(RMFTestCase):
                          classname="Nodemanager",
                          command = "post_upgrade_restart",
                          config_file="default.json",
-                         hdp_stack_version = self.STACK_VERSION,
+                         stack_version = self.STACK_VERSION,
                          target = RMFTestCase.TARGET_COMMON_SERVICES,
                          call_mocks = [(0, process_output)],
                          mocks_dict = mocks_dict,
@@ -586,7 +586,7 @@ class TestNodeManager(RMFTestCase):
                          classname="Nodemanager",
                          command = "post_upgrade_restart",
                          config_file="default.json",
-                         hdp_stack_version = self.STACK_VERSION,
+                         stack_version = self.STACK_VERSION,
                          target = RMFTestCase.TARGET_COMMON_SERVICES,
                          call_mocks = [(999, process_output)],
                          mocks_dict = mocks_dict,
@@ -630,7 +630,7 @@ class TestNodeManager(RMFTestCase):
                        classname="Nodemanager",
                        command="security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -653,7 +653,7 @@ class TestNodeManager(RMFTestCase):
                        classname="Nodemanager",
                        command="security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
           )
     except:
@@ -670,7 +670,7 @@ class TestNodeManager(RMFTestCase):
                        classname="Nodemanager",
                        command="security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
@@ -689,7 +689,7 @@ class TestNodeManager(RMFTestCase):
                        classname="Nodemanager",
                        command="security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
@@ -699,13 +699,13 @@ class TestNodeManager(RMFTestCase):
                        classname="Nodemanager",
                        command="security_status",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
 
   
-  @patch.object(resource_management.libraries.functions, "get_hdp_version", new = MagicMock(return_value='2.3.0.0-1234'))
+  @patch.object(resource_management.libraries.functions, "get_stack_version", new = MagicMock(return_value='2.3.0.0-1234'))
   def test_pre_upgrade_restart_23(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
     with open(config_file, "r") as f:
@@ -718,7 +718,7 @@ class TestNodeManager(RMFTestCase):
                        classname = "Nodemanager",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None)],
                        mocks_dict = mocks_dict)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
index ee6cd3a..e372667 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_resourcemanager.py
@@ -31,8 +31,8 @@ origin_exists = os.path.exists
   side_effect=lambda *args: origin_exists(args[0])
   if args[0][-2:] == "j2" else True))
 
-@patch.object(Script, "is_hdp_stack_greater_or_equal", new = MagicMock(return_value=False))
-@patch.object(functions, "get_hdp_version", new = MagicMock(return_value="2.0.0.0-1234"))
+@patch.object(Script, "is_stack_greater_or_equal", new = MagicMock(return_value=False))
+@patch.object(functions, "get_stack_version", new = MagicMock(return_value="2.0.0.0-1234"))
 class TestResourceManager(RMFTestCase):
   COMMON_SERVICES_PACKAGE_DIR = "YARN/2.1.0.2.0/package"
   STACK_VERSION = "2.0.6"
@@ -42,7 +42,7 @@ class TestResourceManager(RMFTestCase):
                        classname="Resourcemanager",
                        command="configure",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -53,7 +53,7 @@ class TestResourceManager(RMFTestCase):
                        classname="Resourcemanager",
                        command="start",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -79,7 +79,7 @@ class TestResourceManager(RMFTestCase):
                        classname="Resourcemanager",
                        command="stop",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -93,7 +93,7 @@ class TestResourceManager(RMFTestCase):
                        classname="Resourcemanager",
                        command="configure",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
@@ -103,7 +103,7 @@ class TestResourceManager(RMFTestCase):
                        classname="Resourcemanager",
                        command="start",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -130,7 +130,7 @@ class TestResourceManager(RMFTestCase):
                        classname="Resourcemanager",
                        command="stop",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -144,7 +144,7 @@ class TestResourceManager(RMFTestCase):
                        classname = "Resourcemanager",
                        command = "decommission",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('File', '/etc/hadoop/conf/yarn.exclude',
@@ -163,7 +163,7 @@ class TestResourceManager(RMFTestCase):
                        classname = "Resourcemanager",
                        command = "decommission",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('File', '/etc/hadoop/conf/yarn.exclude',
@@ -545,7 +545,7 @@ class TestResourceManager(RMFTestCase):
                        classname="Resourcemanager",
                        command="security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -568,7 +568,7 @@ class TestResourceManager(RMFTestCase):
                        classname="Resourcemanager",
                        command="security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
           )
     except:
@@ -585,7 +585,7 @@ class TestResourceManager(RMFTestCase):
                        classname="Resourcemanager",
                        command="security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
@@ -604,7 +604,7 @@ class TestResourceManager(RMFTestCase):
                        classname="Resourcemanager",
                        command="security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
@@ -614,7 +614,7 @@ class TestResourceManager(RMFTestCase):
                        classname="Resourcemanager",
                        command="security_status",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
@@ -631,7 +631,7 @@ class TestResourceManager(RMFTestCase):
                        classname = "Resourcemanager",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None)],
                        mocks_dict = mocks_dict)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_client.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_client.py
index be075bc..4601092 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_client.py
@@ -39,7 +39,7 @@ class TestYarnClient(RMFTestCase):
                        classname = "YarnClient",
                        command = "configure",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -207,7 +207,7 @@ class TestYarnClient(RMFTestCase):
                        classname = "YarnClient",
                        command = "configure",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
@@ -381,7 +381,7 @@ class TestYarnClient(RMFTestCase):
                        command = "restart",
                        config_file="default.json",
                        config_overrides = { 'roleParams' : { "component_category": "CLIENT" } },
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -544,13 +544,13 @@ class TestYarnClient(RMFTestCase):
     self.assertNoMoreResources()
 
 
-  @patch.object(functions, "get_hdp_version", new=MagicMock(return_value="2.2.0.0-2041"))
+  @patch.object(functions, "get_stack_version", new=MagicMock(return_value="2.2.0.0-2041"))
   def test_upgrade(self):
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/yarn_client.py",
                    classname = "YarnClient",
                    command = "restart",
                    config_file="client-upgrade.json",
-                   hdp_stack_version = self.STACK_VERSION,
+                   stack_version = self.STACK_VERSION,
                    target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -558,7 +558,7 @@ class TestYarnClient(RMFTestCase):
 
     # for now, it's enough that hdp-select is confirmed
 
-  @patch.object(functions, "get_hdp_version", new = MagicMock(return_value='2.3.0.0-1234'))
+  @patch.object(functions, "get_stack_version", new = MagicMock(return_value='2.3.0.0-1234'))
   def test_pre_upgrade_restart_23(self):
     config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
     with open(config_file, "r") as f:
@@ -571,7 +571,7 @@ class TestYarnClient(RMFTestCase):
                        classname = "YarnClient",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None)],
                        mocks_dict = mocks_dict)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_service_check.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_service_check.py
index 6279aab..320092d 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_service_check.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_service_check.py
@@ -39,7 +39,7 @@ class TestServiceCheck(RMFTestCase):
                           classname="ServiceCheck",
                           command="service_check",
                           config_file="default.json",
-                          hdp_stack_version = self.STACK_VERSION,
+                          stack_version = self.STACK_VERSION,
                           target = RMFTestCase.TARGET_COMMON_SERVICES,
                           checked_call_mocks = [(0, "some test text, appTrackingUrl=http:"
                                 "//c6402.ambari.apache.org:8088/proxy/application_1429885383763_0001/, some test text")]
@@ -57,7 +57,7 @@ class TestServiceCheck(RMFTestCase):
                           classname="ServiceCheck",
                           command="service_check",
                           config_file="secured.json",
-                          hdp_stack_version = self.STACK_VERSION,
+                          stack_version = self.STACK_VERSION,
                           target = RMFTestCase.TARGET_COMMON_SERVICES,
                           checked_call_mocks = [(0, "some test text, appTrackingUrl=http:"
                                "//c6402.ambari.apache.org:8088/proxy/application_1429885383763_0001/, some test text")]

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_client.py b/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_client.py
index 468f48d..c9b7064 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_client.py
@@ -32,7 +32,7 @@ class TestZookeeperClient(RMFTestCase):
                        classname = "ZookeeperClient",
                        command = "configure",
                        config_file = "default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -95,7 +95,7 @@ class TestZookeeperClient(RMFTestCase):
                        classname = "ZookeeperClient",
                        command = "configure",
                        config_file = "secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -170,7 +170,7 @@ class TestZookeeperClient(RMFTestCase):
                        classname = "ZookeeperClient",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
                               ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'zookeeper-client', version), sudo=True)
@@ -191,7 +191,7 @@ class TestZookeeperClient(RMFTestCase):
                        classname = "ZookeeperClient",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None)],
                        mocks_dict = mocks_dict)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py b/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py
index 260cf33..1c20915 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_server.py
@@ -33,7 +33,7 @@ class TestZookeeperServer(RMFTestCase):
                        classname = "ZookeeperServer",
                        command = "configure",
                        config_file = "default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -44,7 +44,7 @@ class TestZookeeperServer(RMFTestCase):
                        classname = "ZookeeperServer",
                        command = "start",
                        config_file = "default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -60,7 +60,7 @@ class TestZookeeperServer(RMFTestCase):
                        classname = "ZookeeperServer",
                        command = "stop",
                        config_file = "default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -75,7 +75,7 @@ class TestZookeeperServer(RMFTestCase):
                        classname = "ZookeeperServer",
                        command = "configure",
                        config_file = "secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
@@ -86,7 +86,7 @@ class TestZookeeperServer(RMFTestCase):
                        classname = "ZookeeperServer",
                        command = "start",
                        config_file = "secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -104,7 +104,7 @@ class TestZookeeperServer(RMFTestCase):
                        classname = "ZookeeperServer",
                        command = "stop",
                        config_file = "secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -271,7 +271,7 @@ class TestZookeeperServer(RMFTestCase):
                        classname = "ZookeeperServer",
                        command = "security_status",
                        config_file = "secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -294,7 +294,7 @@ class TestZookeeperServer(RMFTestCase):
                        classname = "ZookeeperServer",
                        command = "security_status",
                        config_file = "secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
       )
     except:
@@ -311,7 +311,7 @@ class TestZookeeperServer(RMFTestCase):
                        classname = "ZookeeperServer",
                        command = "security_status",
                        config_file = "secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
@@ -330,7 +330,7 @@ class TestZookeeperServer(RMFTestCase):
                        classname = "ZookeeperServer",
                        command = "security_status",
                        config_file = "secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
@@ -340,7 +340,7 @@ class TestZookeeperServer(RMFTestCase):
                        classname = "ZookeeperServer",
                        command = "security_status",
                        config_file = "default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
@@ -356,7 +356,7 @@ class TestZookeeperServer(RMFTestCase):
                        classname = "ZookeeperServer",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
                               ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'zookeeper-server', version), sudo=True)
@@ -377,7 +377,7 @@ class TestZookeeperServer(RMFTestCase):
                        classname = "ZookeeperServer",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None)],
                        mocks_dict = mocks_dict)
@@ -412,7 +412,7 @@ class TestZookeeperServer(RMFTestCase):
                        classname = "ZookeeperServer",
                        command = "post_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [
                          (0, 'Created'),

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_service_check.py b/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_service_check.py
index bb6570a..863505b 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_service_check.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/ZOOKEEPER/test_zookeeper_service_check.py
@@ -29,7 +29,7 @@ class TestServiceCheck(RMFTestCase):
                        classname = "ZookeeperServiceCheck",
                        command = "service_check",
                        config_file = "default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('File', '/tmp/zkSmoke.out',
@@ -52,7 +52,7 @@ class TestServiceCheck(RMFTestCase):
                        classname = "ZookeeperServiceCheck",
                        command = "service_check",
                        config_file = "secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('File', '/tmp/zkSmoke.out',
@@ -75,7 +75,7 @@ class TestServiceCheck(RMFTestCase):
                        classname = "ZookeeperServiceCheck",
                        command = "service_check",
                        config_file = "zk-service_check_2.2.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('File', '/tmp/zkSmoke.out',

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.0.6/hooks/after-INSTALL/test_after_install.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/hooks/after-INSTALL/test_after_install.py b/ambari-server/src/test/python/stacks/2.0.6/hooks/after-INSTALL/test_after_install.py
index 0498b3a..daee726 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/hooks/after-INSTALL/test_after_install.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/hooks/after-INSTALL/test_after_install.py
@@ -519,7 +519,7 @@ class TestHookAfterInstall(RMFTestCase):
   @patch("resource_management.libraries.functions.conf_select.select")
   @patch("os.symlink")
   @patch("shutil.rmtree")
-  def test_hook_default_hdp_select_specific_version(self, rmtree_mock, symlink_mock, conf_select_select_mock, conf_select_create_mock):
+  def test_hook_default_stack_select_specific_version(self, rmtree_mock, symlink_mock, conf_select_select_mock, conf_select_create_mock):
     """
     Tests that hdp-select set all on a specific version, not a 2.3* wildcard is used when
     installing a component when the cluster version is already set.

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_client.py b/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_client.py
index 4d900eb..fbf624a 100644
--- a/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_client.py
+++ b/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_client.py
@@ -32,7 +32,7 @@ class TestFalconClient(RMFTestCase):
                        classname="FalconClient",
                        command="configure",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Directory', '/var/run/falcon',
@@ -92,7 +92,7 @@ class TestFalconClient(RMFTestCase):
                        classname="FalconClient",
                        command="security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -103,7 +103,7 @@ class TestFalconClient(RMFTestCase):
                        classname="FalconClient",
                        command="security_status",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -119,7 +119,7 @@ class TestFalconClient(RMFTestCase):
                        classname = "FalconClient",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
                               ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'falcon-client', version), sudo=True,)
@@ -138,7 +138,7 @@ class TestFalconClient(RMFTestCase):
                        classname = "FalconClient",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None)],
                        mocks_dict = mocks_dict)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py b/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py
index 10c721f..f6efb8c 100644
--- a/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py
+++ b/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py
@@ -37,7 +37,7 @@ class TestFalconServer(RMFTestCase):
       classname="FalconServer",
       command="start",
       config_file="default.json",
-      hdp_stack_version = self.STACK_VERSION,
+      stack_version = self.STACK_VERSION,
       target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assert_configure_default()
@@ -54,7 +54,7 @@ class TestFalconServer(RMFTestCase):
       classname="FalconServer",
       command="stop",
       config_file="default.json",
-      hdp_stack_version = self.STACK_VERSION,
+      stack_version = self.STACK_VERSION,
       target = RMFTestCase.TARGET_COMMON_SERVICES)
 
     self.assertResourceCalled('Execute', '/usr/lib/falcon/bin/falcon-stop',
@@ -72,7 +72,7 @@ class TestFalconServer(RMFTestCase):
                        classname="FalconServer",
                        command="configure",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -201,7 +201,7 @@ class TestFalconServer(RMFTestCase):
 
     self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/falcon_server.py",
      classname = "FalconServer", command = "restart", config_file = "falcon-upgrade.json",
-     hdp_stack_version = self.UPGRADE_STACK_VERSION,
+     stack_version = self.UPGRADE_STACK_VERSION,
      target = RMFTestCase.TARGET_COMMON_SERVICES )
 
     self.assertResourceCalled('Execute',
@@ -421,7 +421,7 @@ class TestFalconServer(RMFTestCase):
                        classname="FalconServer",
                        command="security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -445,7 +445,7 @@ class TestFalconServer(RMFTestCase):
                        classname="FalconServer",
                        command="security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
       )
     except:
@@ -462,7 +462,7 @@ class TestFalconServer(RMFTestCase):
                        classname="FalconServer",
                        command="security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
@@ -481,7 +481,7 @@ class TestFalconServer(RMFTestCase):
                        classname="FalconServer",
                        command="security_status",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
@@ -491,7 +491,7 @@ class TestFalconServer(RMFTestCase):
                        classname="FalconServer",
                        command="security_status",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
@@ -509,7 +509,7 @@ class TestFalconServer(RMFTestCase):
                        classname = "FalconServer",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
                               ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'falcon-server', version), sudo=True,)
@@ -554,7 +554,7 @@ class TestFalconServer(RMFTestCase):
                        classname = "FalconServer",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None)],
                        mocks_dict = mocks_dict)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.1/FALCON/test_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/FALCON/test_service_check.py b/ambari-server/src/test/python/stacks/2.1/FALCON/test_service_check.py
index bb0ce90..6519e3c 100644
--- a/ambari-server/src/test/python/stacks/2.1/FALCON/test_service_check.py
+++ b/ambari-server/src/test/python/stacks/2.1/FALCON/test_service_check.py
@@ -30,7 +30,7 @@ class TestFalconServiceCheck(RMFTestCase):
                        classname="FalconServiceCheck",
                        command="service_check",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute', '/usr/lib/falcon/bin/falcon admin -version',
@@ -44,7 +44,7 @@ class TestFalconServiceCheck(RMFTestCase):
                        classname="FalconServiceCheck",
                        command="service_check",
                        config_file="secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assertResourceCalled('Execute','/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa@EXAMPLE.COM',

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py b/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py
index 425d060..80e4e36 100644
--- a/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py
+++ b/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py
@@ -37,7 +37,7 @@ class TestHiveMetastore(RMFTestCase):
                        classname = "HiveMetastore",
                        command = "configure",
                        config_file="../../2.1/configs/default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_default()
@@ -47,7 +47,7 @@ class TestHiveMetastore(RMFTestCase):
                        classname = "HiveMetastore",
                        command = "start",
                        config_file="../../2.1/configs/default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -77,7 +77,7 @@ class TestHiveMetastore(RMFTestCase):
                        classname = "HiveMetastore",
                        command = "start",
                        config_file="../../2.1/configs/default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -104,7 +104,7 @@ class TestHiveMetastore(RMFTestCase):
                        classname = "HiveMetastore",
                        command = "stop",
                        config_file="../../2.1/configs/default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -128,7 +128,7 @@ class TestHiveMetastore(RMFTestCase):
                        classname = "HiveMetastore",
                        command = "configure",
                        config_file="../../2.1/configs/secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     self.assert_configure_secured()
@@ -139,7 +139,7 @@ class TestHiveMetastore(RMFTestCase):
                        classname = "HiveMetastore",
                        command = "start",
                        config_file="../../2.1/configs/secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -164,7 +164,7 @@ class TestHiveMetastore(RMFTestCase):
                        classname = "HiveMetastore",
                        command = "stop",
                        config_file="../../2.1/configs/secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -413,7 +413,7 @@ class TestHiveMetastore(RMFTestCase):
                        classname = "HiveMetastore",
                        command = "security_status",
                        config_file="../../2.1/configs/secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 
@@ -437,7 +437,7 @@ class TestHiveMetastore(RMFTestCase):
                          classname = "HiveMetastore",
                          command = "security_status",
                          config_file="../../2.1/configs/secured.json",
-                         hdp_stack_version = self.STACK_VERSION,
+                         stack_version = self.STACK_VERSION,
                          target = RMFTestCase.TARGET_COMMON_SERVICES
       )
     except:
@@ -454,7 +454,7 @@ class TestHiveMetastore(RMFTestCase):
                        classname = "HiveMetastore",
                        command = "security_status",
                        config_file="../../2.1/configs/secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityIssuesFound": "Keytab file or principal are not set property."})
@@ -473,7 +473,7 @@ class TestHiveMetastore(RMFTestCase):
                        classname = "HiveMetastore",
                        command = "security_status",
                        config_file="../../2.1/configs/secured.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
@@ -483,7 +483,7 @@ class TestHiveMetastore(RMFTestCase):
                        classname = "HiveMetastore",
                        command = "security_status",
                        config_file="../../2.1/configs/default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
     put_structured_out_mock.assert_called_with({"securityState": "UNSECURED"})
@@ -498,7 +498,7 @@ class TestHiveMetastore(RMFTestCase):
                        classname = "HiveMetastore",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES)
     self.assertResourceCalled('Execute',
                               ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hive-metastore', version), sudo=True,)
@@ -519,7 +519,7 @@ class TestHiveMetastore(RMFTestCase):
                        classname = "HiveMetastore",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None)],
                        mocks_dict = mocks_dict)
@@ -551,7 +551,7 @@ class TestHiveMetastore(RMFTestCase):
       classname = "HiveMetastore",
       command = "pre_upgrade_restart",
       config_dict = json_content,
-      hdp_stack_version = self.STACK_VERSION,
+      stack_version = self.STACK_VERSION,
       target = RMFTestCase.TARGET_COMMON_SERVICES,
       call_mocks = [(0, None, ''), (0, None)])
 
@@ -571,7 +571,7 @@ class TestHiveMetastore(RMFTestCase):
       classname = "HiveMetastore",
       command = "pre_upgrade_restart",
       config_dict = json_content,
-      hdp_stack_version = self.STACK_VERSION,
+      stack_version = self.STACK_VERSION,
       target = RMFTestCase.TARGET_COMMON_SERVICES,
       call_mocks = [(0, None, ''), (0, None)])
 
@@ -589,7 +589,7 @@ class TestHiveMetastore(RMFTestCase):
       classname = "HiveMetastore",
       command = "pre_upgrade_restart",
       config_dict = json_content,
-      hdp_stack_version = self.STACK_VERSION,
+      stack_version = self.STACK_VERSION,
       target = RMFTestCase.TARGET_COMMON_SERVICES,
       call_mocks = [(0, None, ''), (0, None)])
 
@@ -598,9 +598,9 @@ class TestHiveMetastore(RMFTestCase):
 
   @patch("os.path.exists")
   @patch("resource_management.core.shell.call")
-  @patch("resource_management.libraries.functions.get_hdp_version")
-  def test_upgrade_metastore_schema(self, get_hdp_version_mock, call_mock, os_path_exists_mock):
-    get_hdp_version_mock.return_value = '2.3.0.0-1234'
+  @patch("resource_management.libraries.functions.get_stack_version")
+  def test_upgrade_metastore_schema(self, get_stack_version_mock, call_mock, os_path_exists_mock):
+    get_stack_version_mock.return_value = '2.3.0.0-1234'
 
     def side_effect(path):
       if path == "/usr/hdp/2.2.7.0-1234/hive-server2/lib/mysql-connector-java.jar":
@@ -633,7 +633,7 @@ class TestHiveMetastore(RMFTestCase):
       classname = "HiveMetastore",
       command = "pre_upgrade_restart",
       config_dict = json_content,
-      hdp_stack_version = self.STACK_VERSION,
+      stack_version = self.STACK_VERSION,
       target = RMFTestCase.TARGET_COMMON_SERVICES,
       call_mocks = [(0, None, ''), (0, None)],
       mocks_dict = mocks_dict)
@@ -688,9 +688,9 @@ class TestHiveMetastore(RMFTestCase):
 
   @patch("os.path.exists")
   @patch("resource_management.core.shell.call")
-  @patch("resource_management.libraries.functions.get_hdp_version")
-  def test_upgrade_metastore_schema_using_new_db(self, get_hdp_version_mock, call_mock, os_path_exists_mock):
-    get_hdp_version_mock.return_value = '2.3.0.0-1234'
+  @patch("resource_management.libraries.functions.get_stack_version")
+  def test_upgrade_metastore_schema_using_new_db(self, get_stack_version_mock, call_mock, os_path_exists_mock):
+    get_stack_version_mock.return_value = '2.3.0.0-1234'
 
     def side_effect(path):
       if path == "/usr/hdp/2.2.7.0-1234/hive-server2/lib/mysql-connector-java.jar":
@@ -713,7 +713,7 @@ class TestHiveMetastore(RMFTestCase):
       classname = "HiveMetastore",
       command = "upgrade_schema",
       config_dict = json_content,
-      hdp_stack_version = self.STACK_VERSION,
+      stack_version = self.STACK_VERSION,
       target = RMFTestCase.TARGET_COMMON_SERVICES,
       call_mocks = [(0, None, ''), (0, None)],
       mocks_dict = mocks_dict)
@@ -754,10 +754,10 @@ class TestHiveMetastore(RMFTestCase):
 
   @patch("os.path.exists")
   @patch("resource_management.core.shell.call")
-  @patch("resource_management.libraries.functions.get_hdp_version")
-  def test_upgrade_sqla_metastore_schema_with_jdbc_download(self, get_hdp_version_mock, call_mock, os_path_exists_mock):
+  @patch("resource_management.libraries.functions.get_stack_version")
+  def test_upgrade_sqla_metastore_schema_with_jdbc_download(self, get_stack_version_mock, call_mock, os_path_exists_mock):
 
-    get_hdp_version_mock.return_value = '2.3.0.0-1234'
+    get_stack_version_mock.return_value = '2.3.0.0-1234'
 
     def side_effect(path):
       if path == "/usr/hdp/2.2.7.0-1234/hive-server2/lib/mysql-connector-java.jar":
@@ -789,7 +789,7 @@ class TestHiveMetastore(RMFTestCase):
                        classname = "HiveMetastore",
                        command = "pre_upgrade_restart",
                        config_dict = json_content,
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES,
                        call_mocks = [(0, None, ''), (0, None)],
                        mocks_dict = mocks_dict)

http://git-wip-us.apache.org/repos/asf/ambari/blob/f7221e5a/ambari-server/src/test/python/stacks/2.1/STORM/test_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/STORM/test_service_check.py b/ambari-server/src/test/python/stacks/2.1/STORM/test_service_check.py
index 6f2080a..799494b 100644
--- a/ambari-server/src/test/python/stacks/2.1/STORM/test_service_check.py
+++ b/ambari-server/src/test/python/stacks/2.1/STORM/test_service_check.py
@@ -33,7 +33,7 @@ class TestStormServiceCheck(TestStormBase):
                        classname="ServiceCheck",
                        command="service_check",
                        config_file="default.json",
-                       hdp_stack_version = self.STACK_VERSION,
+                       stack_version = self.STACK_VERSION,
                        target = RMFTestCase.TARGET_COMMON_SERVICES
     )
 


[31/51] [abbrv] ambari git commit: AMBARI-15336. Blueprints: NullPointerException when unncessary config types found with %HOSTGROUP% tags. (Sebastian Toader via magyari_sandor)

Posted by jl...@apache.org.
AMBARI-15336. Blueprints: NullPointerException when unncessary config types found with %HOSTGROUP% tags. (Sebastian Toader via magyari_sandor)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/664ccd18
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/664ccd18
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/664ccd18

Branch: refs/heads/AMBARI-13364
Commit: 664ccd185a3ba7ffb3489e49f527e7aa902e8cd0
Parents: 112d385
Author: Sandor Magyari <sm...@hortonworks.com>
Authored: Wed Mar 9 13:32:43 2016 +0100
Committer: Sandor Magyari <sm...@hortonworks.com>
Committed: Wed Mar 9 14:03:04 2016 +0100

----------------------------------------------------------------------
 .../topology/ClusterConfigurationRequest.java   | 41 +++++----
 .../ClusterConfigurationRequestTest.java        | 88 +++++++++++++++++---
 .../ClusterInstallWithoutStartTest.java         |  4 +-
 .../server/topology/TopologyManagerTest.java    |  4 +-
 4 files changed, 107 insertions(+), 30 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/664ccd18/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterConfigurationRequest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterConfigurationRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterConfigurationRequest.java
index c9120de..063fd12 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterConfigurationRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/topology/ClusterConfigurationRequest.java
@@ -75,7 +75,7 @@ public class ClusterConfigurationRequest {
     // set initial configuration (not topology resolved)
     this.configurationProcessor = new BlueprintConfigurationProcessor(clusterTopology);
     this.stackAdvisorBlueprintProcessor = stackAdvisorBlueprintProcessor;
-    removeOrphanConfigTypes(clusterTopology);
+    removeOrphanConfigTypes();
     if (setInitial) {
       setConfigurationsOnCluster(clusterTopology, TopologyManager.INITIAL_CONFIG_TAG, Collections.<String>emptySet());
     }
@@ -84,24 +84,35 @@ public class ClusterConfigurationRequest {
   /**
    * Remove config-types, if there is no any services related to them (except cluster-env and global).
    */
-  private void removeOrphanConfigTypes(ClusterTopology clusterTopology) {
+  private void removeOrphanConfigTypes() {
     Configuration configuration = clusterTopology.getConfiguration();
+    removeOrphanConfigTypes(configuration);
+
+    Map<String, HostGroupInfo> hostGroupInfoMap = clusterTopology.getHostGroupInfo();
+    if (MapUtils.isNotEmpty(hostGroupInfoMap)) {
+      for (Map.Entry<String, HostGroupInfo> hostGroupInfo : hostGroupInfoMap.entrySet()) {
+        configuration = hostGroupInfo.getValue().getConfiguration();
+
+        if (configuration != null) {
+          removeOrphanConfigTypes(configuration);
+        }
+      }
+    }
+  }
+
+  /**
+   * Remove config-types from the given configuration if there is no any services related to them (except cluster-env and global).
+   */
+  private void removeOrphanConfigTypes(Configuration configuration) {
+    Blueprint blueprint = clusterTopology.getBlueprint();
+
     Collection<String> configTypes = configuration.getAllConfigTypes();
     for (String configType : configTypes) {
-      if (!configType.equals("cluster-env") && !configType.equals("global")) {
-        String service = clusterTopology.getBlueprint().getStack().getServiceForConfigType(configType);
-        if (!clusterTopology.getBlueprint().getServices().contains(service)) {
+      if (!"cluster-env".equals(configType) && !"global".equals(configType)) {
+        String service = blueprint.getStack().getServiceForConfigType(configType);
+        if (!blueprint.getServices().contains(service)) {
           configuration.removeConfigType(configType);
-          LOG.info("Not found any service for config type '{}'. It will be removed from configuration.", configType);
-          Map<String, HostGroupInfo> hostGroupInfoMap = clusterTopology.getHostGroupInfo();
-          if (MapUtils.isNotEmpty(hostGroupInfoMap)) {
-            for (Map.Entry<String, HostGroupInfo> hostGroupInfo : hostGroupInfoMap.entrySet()) {
-              if (hostGroupInfo.getValue().getConfiguration() != null) {
-                hostGroupInfo.getValue().getConfiguration().removeConfigType(configType);
-                LOG.info("Not found any service for config type '{}'. It will be removed from host group scoped configuration.", configType);
-              }
-            }
-          }
+          LOG.info("Removing config type '{}' as service '{}' is not present in either Blueprint or cluster creation template.", configType, service);
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/664ccd18/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterConfigurationRequestTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterConfigurationRequestTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterConfigurationRequestTest.java
index ece1287..58919b9 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterConfigurationRequestTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterConfigurationRequestTest.java
@@ -217,7 +217,7 @@ public class ClusterConfigurationRequestTest {
     expect(topology.getConfigRecommendationStrategy()).andReturn(ConfigRecommendationStrategy.NEVER_APPLY).anyTimes();
     expect(topology.getBlueprint()).andReturn(blueprint).anyTimes();
     expect(topology.getConfiguration()).andReturn(blueprintConfig).anyTimes();
-    expect(topology.getHostGroupInfo()).andReturn(Collections.<String, HostGroupInfo>emptyMap());
+    expect(topology.getHostGroupInfo()).andReturn(Collections.<String, HostGroupInfo>emptyMap()).anyTimes();
     expect(topology.getClusterId()).andReturn(Long.valueOf(1)).anyTimes();
     expect(ambariContext.getClusterName(Long.valueOf(1))).andReturn("testCluster").anyTimes();
     expect(ambariContext.createConfigurationRequests(anyObject(Map.class))).andReturn(Collections
@@ -295,7 +295,7 @@ public class ClusterConfigurationRequestTest {
     expect(topology.getConfigRecommendationStrategy()).andReturn(ConfigRecommendationStrategy.NEVER_APPLY).anyTimes();
     expect(topology.getBlueprint()).andReturn(blueprint).anyTimes();
     expect(topology.getConfiguration()).andReturn(stackConfig).anyTimes();
-    expect(topology.getHostGroupInfo()).andReturn(Collections.<String, HostGroupInfo>emptyMap());
+    expect(topology.getHostGroupInfo()).andReturn(Collections.<String, HostGroupInfo>emptyMap()).anyTimes();
     expect(topology.getClusterId()).andReturn(Long.valueOf(1)).anyTimes();
     expect(ambariContext.getClusterName(Long.valueOf(1))).andReturn("testCluster").anyTimes();
     expect(ambariContext.createConfigurationRequests(anyObject(Map.class))).andReturn(Collections
@@ -315,7 +315,7 @@ public class ClusterConfigurationRequestTest {
   }
 
   @Test
-  public void testProcessClusterConfigRequestRemoveUnusedConfigTypes() {
+  public void testProcessClusterConfigRequestRemoveUnusedConfigTypes() throws Exception {
     // GIVEN
     Configuration configuration = createConfigurations();
     Set<String> services = new HashSet<String>();
@@ -323,7 +323,7 @@ public class ClusterConfigurationRequestTest {
     services.add("RANGER");
     Map<String, HostGroupInfo> hostGroupInfoMap = Maps.newHashMap();
     HostGroupInfo hg1 = new HostGroupInfo("hg1");
-    hg1.setConfiguration(createConfigurations());
+    hg1.setConfiguration(createConfigurationsForHostGroup());
     hostGroupInfoMap.put("hg1", hg1);
 
     expect(topology.getConfiguration()).andReturn(configuration).anyTimes();
@@ -339,15 +339,59 @@ public class ClusterConfigurationRequestTest {
     // WHEN
     new ClusterConfigurationRequest(ambariContext, topology, false, stackAdvisorBlueprintProcessor);
     // THEN
-    assertFalse(configuration.getFullProperties().containsKey("yarn-site"));
-    assertFalse(configuration.getFullAttributes().containsKey("yarn-site"));
-    assertTrue(configuration.getFullAttributes().containsKey("hdfs-site"));
-    assertTrue(configuration.getFullProperties().containsKey("cluster-env"));
-    assertTrue(configuration.getFullProperties().containsKey("global"));
-    assertFalse(hg1.getConfiguration().getFullAttributes().containsKey("yarn-site"));
+    assertFalse("YARN service not present in topology config thus 'yarn-site' config type should be removed from config.", configuration.getFullProperties().containsKey("yarn-site"));
+    assertTrue("HDFS service is present in topology host group config thus 'hdfs-site' config type should be left in the config.", configuration.getFullAttributes().containsKey("hdfs-site"));
+    assertTrue("'cluster-env' config type should not be removed from configuration.", configuration.getFullProperties().containsKey("cluster-env"));
+    assertTrue("'global' config type should not be removed from configuration.", configuration.getFullProperties().containsKey("global"));
+
+    assertFalse("SPARK service not present in topology host group config thus 'spark-env' config type should be removed from config.", hg1.getConfiguration().getFullAttributes().containsKey("spark-env"));
+    assertTrue("HDFS service is present in topology host group config thus 'hdfs-site' config type should be left in the config.", hg1.getConfiguration().getFullAttributes().containsKey("hdfs-site"));
     verify(stack, blueprint, topology);
   }
 
+  @Test
+  public void testProcessClusterConfigRequestWithOnlyHostGroupConfigRemoveUnusedConfigTypes() throws Exception {
+    // Given
+    Map<String, Map<String, String>> config = Maps.newHashMap();
+    config.put("cluster-env", new HashMap<String, String>());
+    config.put("global", new HashMap<String, String>());
+    Map<String, Map<String, Map<String, String>>> attributes = Maps.newHashMap();
+
+    Configuration configuration = new Configuration(config, attributes);
+
+    Set<String> services = new HashSet<>();
+    services.add("HDFS");
+    services.add("RANGER");
+    Map<String, HostGroupInfo> hostGroupInfoMap = Maps.newHashMap();
+    HostGroupInfo hg1 = new HostGroupInfo("hg1");
+    hg1.setConfiguration(createConfigurationsForHostGroup());
+    hostGroupInfoMap.put("hg1", hg1);
+
+    expect(topology.getConfiguration()).andReturn(configuration).anyTimes();
+    expect(topology.getBlueprint()).andReturn(blueprint).anyTimes();
+    expect(topology.getHostGroupInfo()).andReturn(hostGroupInfoMap);
+    expect(blueprint.getStack()).andReturn(stack).anyTimes();
+    expect(blueprint.getServices()).andReturn(services).anyTimes();
+    expect(stack.getServiceForConfigType("hdfs-site")).andReturn("HDFS").anyTimes();
+    expect(stack.getServiceForConfigType("admin-properties")).andReturn("RANGER").anyTimes();
+    expect(stack.getServiceForConfigType("yarn-site")).andReturn("YARN").anyTimes();
+
+    EasyMock.replay(stack, blueprint, topology);
+
+    // When
+
+    new ClusterConfigurationRequest(ambariContext, topology, false, stackAdvisorBlueprintProcessor);
+
+    // Then
+    assertTrue("'cluster-env' config type should not be removed from configuration.", configuration.getFullProperties().containsKey("cluster-env"));
+    assertTrue("'global' config type should not be removed from configuration.", configuration.getFullProperties().containsKey("global"));
+
+    assertFalse("SPARK service not present in topology host group config thus 'spark-env' config type should be removed from config.", hg1.getConfiguration().getFullAttributes().containsKey("spark-env"));
+    assertTrue("HDFS service is present in topology host group config thus 'hdfs-site' config type should be left in the config.", hg1.getConfiguration().getFullAttributes().containsKey("hdfs-site"));
+    verify(stack, blueprint, topology);
+
+  }
+
   private Configuration createConfigurations() {
     Map<String, Map<String, String>> firstLevelConfig = Maps.newHashMap();
     firstLevelConfig.put("hdfs-site", new HashMap<String, String>());
@@ -362,9 +406,31 @@ public class ClusterConfigurationRequestTest {
     secondLevelConfig.put("admin-properties", new HashMap<String, String>());
     Map<String, Map<String, Map<String, String>>> secondLevelAttributes = Maps.newHashMap();
     secondLevelAttributes.put("admin-properties", new HashMap<String, Map<String, String>>());
-    secondLevelAttributes.put("yarn-site", new HashMap<String, Map<String, String>>());
+
 
     Configuration secondLevelConf = new Configuration(secondLevelConfig, secondLevelAttributes);
     return new Configuration(firstLevelConfig, firstLevelAttributes, secondLevelConf);
   }
+
+  private Configuration createConfigurationsForHostGroup() {
+    Map<String, Map<String, String>> firstLevelConfig = Maps.newHashMap();
+    firstLevelConfig.put("hdfs-site", new HashMap<String, String>());
+    firstLevelConfig.put("spark-env", new HashMap<String, String>());
+    firstLevelConfig.put("cluster-env", new HashMap<String, String>());
+    firstLevelConfig.put("global", new HashMap<String, String>());
+
+    Map<String, Map<String, Map<String, String>>> firstLevelAttributes = Maps.newHashMap();
+    firstLevelAttributes.put("hdfs-site", new HashMap<String, Map<String, String>>());
+
+    Map<String, Map<String, String>> secondLevelConfig = Maps.newHashMap();
+    secondLevelConfig.put("admin-properties", new HashMap<String, String>());
+    Map<String, Map<String, Map<String, String>>> secondLevelAttributes = Maps.newHashMap();
+    secondLevelAttributes.put("admin-properties", new HashMap<String, Map<String, String>>());
+
+
+    Configuration secondLevelConf = new Configuration(secondLevelConfig, secondLevelAttributes);
+    return new Configuration(firstLevelConfig, firstLevelAttributes, secondLevelConf);
+  }
+
+
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/664ccd18/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartTest.java
index 156580a..6e7c975 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/ClusterInstallWithoutStartTest.java
@@ -251,8 +251,8 @@ public class ClusterInstallWithoutStartTest {
     expect(stack.getConfiguration()).andReturn(stackConfig).anyTimes();
     expect(stack.getName()).andReturn(STACK_NAME).anyTimes();
     expect(stack.getVersion()).andReturn(STACK_VERSION).anyTimes();
-    expect(stack.getServiceForConfigType("service1-site")).andReturn("service1");
-    expect(stack.getServiceForConfigType("service2-site")).andReturn("service2");
+    expect(stack.getServiceForConfigType("service1-site")).andReturn("service1").anyTimes();
+    expect(stack.getServiceForConfigType("service2-site")).andReturn("service2").anyTimes();
     expect(stack.getExcludedConfigurationTypes("service1")).andReturn(Collections.<String>emptySet()).anyTimes();
     expect(stack.getExcludedConfigurationTypes("service2")).andReturn(Collections.<String>emptySet()).anyTimes();
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/664ccd18/ambari-server/src/test/java/org/apache/ambari/server/topology/TopologyManagerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/topology/TopologyManagerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/topology/TopologyManagerTest.java
index 69c1935..91f4993 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/topology/TopologyManagerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/topology/TopologyManagerTest.java
@@ -228,8 +228,8 @@ public class TopologyManagerTest {
     expect(stack.getComponents()).andReturn(serviceComponents).anyTimes();
     expect(stack.getComponents("service1")).andReturn(serviceComponents.get("service1")).anyTimes();
     expect(stack.getComponents("service2")).andReturn(serviceComponents.get("service2")).anyTimes();
-    expect(stack.getServiceForConfigType("service1-site")).andReturn("service1");
-    expect(stack.getServiceForConfigType("service2-site")).andReturn("service2");
+    expect(stack.getServiceForConfigType("service1-site")).andReturn("service1").anyTimes();
+    expect(stack.getServiceForConfigType("service2-site")).andReturn("service2").anyTimes();
     expect(stack.getConfiguration()).andReturn(stackConfig).anyTimes();
     expect(stack.getName()).andReturn(STACK_NAME).anyTimes();
     expect(stack.getVersion()).andReturn(STACK_VERSION).anyTimes();


[37/51] [abbrv] ambari git commit: AMBARI-15329: Code Cleanup: Remove hdp hardcodings in functions, variables etc. (jluniya)

Posted by jl...@apache.org.
AMBARI-15329: Code Cleanup: Remove hdp hardcodings in functions, variables etc. (jluniya)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/456b4511
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/456b4511
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/456b4511

Branch: refs/heads/AMBARI-13364
Commit: 456b451180b427e89f52a5dc0a644e27fe346efb
Parents: f5bd058
Author: Jayush Luniya <jl...@hortonworks.com>
Authored: Wed Mar 9 10:09:45 2016 -0800
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Wed Mar 9 10:09:45 2016 -0800

----------------------------------------------------------------------
 .../src/main/python/amc_service.py              |  2 +-
 .../src/main/python/main.py                     |  4 +-
 .../0.5.0.2.1/package/scripts/params_windows.py |  2 +-
 .../0.5.0.2.1/package/scripts/service_check.py  |  2 +-
 .../1.4.0.2.0/package/scripts/flume_check.py    |  2 +-
 .../1.4.0.2.0/package/scripts/params_windows.py |  2 +-
 .../package/scripts/params_windows.py           |  2 +-
 .../0.96.0.2.0/package/scripts/service_check.py |  2 +-
 .../package/scripts/setup_ranger_hbase.py       |  6 +-
 .../HBASE/0.96.0.2.0/package/scripts/upgrade.py |  4 +-
 .../package/scripts/setup_ranger_hdfs.py        | 10 +--
 .../2.1.0.2.0/package/scripts/status_params.py  | 12 ++--
 .../package/scripts/hcat_service_check.py       |  2 +-
 .../package/scripts/hive_server_upgrade.py      |  6 +-
 .../package/scripts/params_windows.py           |  4 +-
 .../0.12.0.2.0/package/scripts/service_check.py |  2 +-
 .../package/scripts/setup_ranger_hive.py        |  6 +-
 .../package/scripts/webhcat_service_check.py    |  2 +-
 .../KAFKA/0.8.1.2.2/package/scripts/params.py   |  4 +-
 .../0.5.0.2.2/package/scripts/params_windows.py |  4 +-
 .../package/scripts/setup_ranger_knox.py        |  6 +-
 .../4.0.0.2.0/package/scripts/params_windows.py |  2 +-
 .../4.0.0.2.0/package/scripts/service_check.py  |  2 +-
 .../package/scripts/params_windows.py           |  4 +-
 .../0.12.0.2.0/package/scripts/service_check.py |  2 +-
 .../RANGER/0.4.0/package/scripts/upgrade.py     |  6 +-
 .../0.5.0.2.3/package/scripts/kms_server.py     |  4 +-
 .../0.5.0.2.3/package/scripts/upgrade.py        |  6 +-
 .../package/scripts/params_windows.py           |  4 +-
 .../0.60.0.2.2/package/scripts/service_check.py |  2 +-
 .../1.4.4.2.0/package/scripts/params_windows.py |  2 +-
 .../1.4.4.2.0/package/scripts/service_check.py  |  2 +-
 .../0.9.1.2.1/package/scripts/params_windows.py |  2 +-
 .../0.9.1.2.1/package/scripts/service_check.py  |  2 +-
 .../package/scripts/setup_ranger_storm.py       |  6 +-
 .../0.4.0.2.1/package/scripts/params_windows.py |  4 +-
 .../0.4.0.2.1/package/scripts/service_check.py  |  2 +-
 .../3.4.5.2.0/package/scripts/params_windows.py |  4 +-
 .../3.4.5.2.0/package/scripts/service_check.py  |  2 +-
 .../resources/host_scripts/alert_disk_space.py  | 20 +++---
 .../0.8/hooks/after-INSTALL/scripts/hook.py     |  2 +-
 .../0.8/hooks/after-INSTALL/scripts/params.py   |  2 +-
 .../scripts/shared_initialization.py            |  2 +-
 .../0.8/hooks/before-ANY/scripts/params.py      |  2 +-
 .../HDFS/package/scripts/status_params.py       | 12 ++--
 .../2.0.6/hooks/after-INSTALL/scripts/hook.py   |  4 +-
 .../2.0.6/hooks/after-INSTALL/scripts/params.py |  2 +-
 .../scripts/shared_initialization.py            |  2 +-
 .../2.0.6/hooks/before-ANY/scripts/params.py    |  2 +-
 .../2.1/hooks/after-INSTALL/scripts/params.py   |  4 +-
 .../src/test/python/TestVersionSelectUtil.py    |  8 +--
 .../custom_actions/TestInstallPackages.py       | 72 ++++++++++----------
 52 files changed, 139 insertions(+), 139 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-metrics/ambari-metrics-timelineservice/src/main/python/amc_service.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/python/amc_service.py b/ambari-metrics/ambari-metrics-timelineservice/src/main/python/amc_service.py
index f616fd0..a19fd11 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/python/amc_service.py
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/python/amc_service.py
@@ -165,7 +165,7 @@ def init_service_debug(options):
     sys.frozen = 'windows_exe'  # Fake py2exe so we can debug
 
 
-def ensure_hdp_service_soft_dependencies():
+def ensure_hadoop_service_soft_dependencies():
   if SERVICE_STATUS_RUNNING != WinServiceController.QueryStatus(EMBEDDED_HBASE_MASTER_SERVICE):
     err = 'ERROR: Service "{0}" was not started.'.format(EMBEDDED_HBASE_MASTER_SERVICE)
     raise FatalException(1, err)

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-metrics/ambari-metrics-timelineservice/src/main/python/main.py
----------------------------------------------------------------------
diff --git a/ambari-metrics/ambari-metrics-timelineservice/src/main/python/main.py b/ambari-metrics/ambari-metrics-timelineservice/src/main/python/main.py
index 172861e..b6b4e0b 100644
--- a/ambari-metrics/ambari-metrics-timelineservice/src/main/python/main.py
+++ b/ambari-metrics/ambari-metrics-timelineservice/src/main/python/main.py
@@ -109,8 +109,8 @@ def server_process_main(options, scmStatus=None):
 
   #Ensure the 3 Hadoop services required are started on the local machine
   if not options.no_embedded_hbase:
-    from amc_service import ensure_hdp_service_soft_dependencies
-    ensure_hdp_service_soft_dependencies()
+    from amc_service import ensure_hadoop_service_soft_dependencies
+    ensure_hadoop_service_soft_dependencies()
 
   if scmStatus is not None:
     scmStatus.reportStartPending()

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/params_windows.py
index 9136957..b213028 100644
--- a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/params_windows.py
+++ b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/params_windows.py
@@ -35,7 +35,7 @@ falcon_home = None
 falcon_log_dir = "."
 
 if os.environ.has_key("HADOOP_HOME"):
-  hdp_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"], ".."))
+  stack_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"], ".."))
 
 if os.environ.has_key("FALCON_CONF_DIR"):
   falcon_conf_dir = os.environ["FALCON_CONF_DIR"]

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/service_check.py
index e633dcb..473b7c3 100644
--- a/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/service_check.py
@@ -45,7 +45,7 @@ class FalconServiceCheckWindows(FalconServiceCheck):
   def service_check(self, env):
     import params
     env.set_params(params)
-    smoke_cmd = os.path.join(params.hdp_root,"Run-SmokeTests.cmd")
+    smoke_cmd = os.path.join(params.stack_root,"Run-SmokeTests.cmd")
     service = "FALCON"
     Execute(format("cmd /C {smoke_cmd} {service}"), user=params.falcon_user, logoutput=True, tries = 3, try_sleep = 20)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/flume_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/flume_check.py b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/flume_check.py
index ae166f8..25d9a08 100644
--- a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/flume_check.py
+++ b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/flume_check.py
@@ -27,7 +27,7 @@ class FlumeServiceCheck(Script):
   def service_check(self, env):
     import params
     env.set_params(params)
-    smoke_cmd = os.path.join(params.hdp_root,"Run-SmokeTests.cmd")
+    smoke_cmd = os.path.join(params.stack_root,"Run-SmokeTests.cmd")
     service = "FLUME"
     Execute(format("cmd /C {smoke_cmd} {service}"), logoutput=True, user=params.hdfs_user)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params_windows.py
index 17177ec..66e9852 100644
--- a/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params_windows.py
+++ b/ambari-server/src/main/resources/common-services/FLUME/1.4.0.2.0/package/scripts/params_windows.py
@@ -26,7 +26,7 @@ config = Script.get_config()
 
 hadoop_user = config["configurations"]["cluster-env"]["hadoop.user.name"]
 
-hdp_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"],".."))
+stack_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"],".."))
 flume_home = os.environ['FLUME_HOME']
 flume_conf_dir = os.path.join(flume_home, 'conf')
 flume_user = hadoop_user

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_windows.py
index c132503..7d634cb 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_windows.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_windows.py
@@ -27,7 +27,7 @@ config = Script.get_config()
 hbase_conf_dir = os.environ["HBASE_CONF_DIR"]
 hbase_bin_dir = os.path.join(os.environ["HBASE_HOME"],'bin')
 hbase_executable = os.path.join(hbase_bin_dir,"hbase.cmd")
-hdp_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"],".."))
+stack_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"],".."))
 hadoop_user = config["configurations"]["cluster-env"]["hadoop.user.name"]
 hbase_user = hadoop_user
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/service_check.py
index 97cdd32..6ecb58e 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/service_check.py
@@ -34,7 +34,7 @@ class HbaseServiceCheckWindows(HbaseServiceCheck):
   def service_check(self, env):
     import params
     env.set_params(params)
-    smoke_cmd = os.path.join(params.hdp_root, "Run-SmokeTests.cmd")
+    smoke_cmd = os.path.join(params.stack_root, "Run-SmokeTests.cmd")
     service = "HBASE"
     Execute(format("cmd /C {smoke_cmd} {service}"), user=params.hbase_user, logoutput=True)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/setup_ranger_hbase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/setup_ranger_hbase.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/setup_ranger_hbase.py
index 5c68583..ffd0715 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/setup_ranger_hbase.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/setup_ranger_hbase.py
@@ -29,10 +29,10 @@ def setup_ranger_hbase(upgrade_type=None):
     else:
       from resource_management.libraries.functions.setup_ranger_plugin import setup_ranger_plugin
     
-    hdp_version = None
+    stack_version = None
 
     if upgrade_type is not None:
-      hdp_version = params.version
+      stack_version = params.version
 
     if params.retryAble:
       Logger.info("HBase: Setup ranger: command retry enables thus retrying if ranger admin is down !")
@@ -80,6 +80,6 @@ def setup_ranger_hbase(upgrade_type=None):
                         component_list=['hbase-client', 'hbase-master', 'hbase-regionserver'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
                         credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password, 
                         ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
-                        hdp_version_override = hdp_version, skip_if_rangeradmin_down= not params.retryAble)
+                        stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble)
   else:
     Logger.info('Ranger admin not installed')

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/upgrade.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/upgrade.py
index c5ba682..92c0f70 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/upgrade.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/upgrade.py
@@ -26,12 +26,12 @@ from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions.version import compare_versions, format_stack_version
 from resource_management.libraries.functions.decorator import retry
 
-def prestart(env, hdp_component):
+def prestart(env, stack_component):
   import params
 
   if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
     conf_select.select(params.stack_name, "hbase", params.version)
-    stack_select.select(hdp_component, params.version)
+    stack_select.select(stack_component, params.version)
 
 def post_regionserver(env):
   import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py
index ff93e39..209ac91 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/setup_ranger_hdfs.py
@@ -34,10 +34,10 @@ def setup_ranger_hdfs(upgrade_type=None):
     else:
       from resource_management.libraries.functions.setup_ranger_plugin import setup_ranger_plugin
 
-    hdp_version = None
+    stack_version = None
 
     if upgrade_type is not None:
-      hdp_version = params.version
+      stack_version = params.version
 
     if params.retryAble:
       Logger.info("HDFS: Setup ranger: command retry enables thus retrying if ranger admin is down !")
@@ -58,11 +58,11 @@ def setup_ranger_hdfs(upgrade_type=None):
                         component_list=['hadoop-client'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
                         credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password, 
                         ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
-                        hdp_version_override = hdp_version, skip_if_rangeradmin_down= not params.retryAble)
+                        stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble)
 
-    if hdp_version and params.upgrade_direction == Direction.UPGRADE:
+    if stack_version and params.upgrade_direction == Direction.UPGRADE:
       # when upgrading to 2.3+, this env file must be removed
-      if compare_versions(hdp_version, '2.3', format=True) > 0:
+      if compare_versions(stack_version, '2.3', format=True) > 0:
         source_file = os.path.join(params.hadoop_conf_dir, 'set-hdfs-plugin-env.sh')
         target_file = source_file + ".bak"
         Execute(("mv", source_file, target_file), sudo=True, only_if=format("test -f {source_file}"))

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/status_params.py
index 388fa59..cdb683b 100644
--- a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/status_params.py
+++ b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/status_params.py
@@ -36,12 +36,12 @@ if OSCheck.is_windows_family():
 else:
   hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
   hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-  hdp_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
-  datanode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-datanode.pid")
-  namenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-namenode.pid")
-  snamenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-secondarynamenode.pid")
-  journalnode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-journalnode.pid")
-  zkfc_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-zkfc.pid")
+  hadoop_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
+  datanode_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-datanode.pid")
+  namenode_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-namenode.pid")
+  snamenode_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-secondarynamenode.pid")
+  journalnode_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-journalnode.pid")
+  zkfc_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-zkfc.pid")
   nfsgateway_pid_file = format("{hadoop_pid_dir_prefix}/root/hadoop_privileged_nfs3.pid")
 
   # Security related/required params

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hcat_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hcat_service_check.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hcat_service_check.py
index 27ff29a..4153821 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hcat_service_check.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hcat_service_check.py
@@ -27,7 +27,7 @@ from ambari_commons import OSConst
 @OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
 def hcat_service_check():
   import params
-  smoke_cmd = os.path.join(params.hdp_root, "Run-SmokeTests.cmd")
+  smoke_cmd = os.path.join(params.stack_root, "Run-SmokeTests.cmd")
   service = "HCatalog"
   Execute(format("cmd /C {smoke_cmd} {service}"), user=params.hcat_user, logoutput=True)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_upgrade.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_upgrade.py
index 664cafa..c3d15e5 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_upgrade.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_server_upgrade.py
@@ -121,7 +121,7 @@ def _get_current_hiveserver_version():
     if formatted_source_version and compare_versions(formatted_source_version, "2.2") >= 0:
       version_hive_bin = format('/usr/hdp/{source_version}/hive/bin')
     command = format('{version_hive_bin}/hive --version')
-    return_code, hdp_output = shell.call(command, user=params.hive_user, path=hive_execute_path)
+    return_code, output = shell.call(command, user=params.hive_user, path=hive_execute_path)
   except Exception, e:
     Logger.error(str(e))
     raise Fail('Unable to execute hive --version command to retrieve the hiveserver2 version.')
@@ -129,12 +129,12 @@ def _get_current_hiveserver_version():
   if return_code != 0:
     raise Fail('Unable to determine the current HiveServer2 version because of a non-zero return code of {0}'.format(str(return_code)))
 
-  match = re.search('^(Hive) ([0-9]+.[0-9]+.\S+)', hdp_output, re.MULTILINE)
+  match = re.search('^(Hive) ([0-9]+.[0-9]+.\S+)', output, re.MULTILINE)
 
   if match:
     current_hive_server_version = match.group(2)
     return current_hive_server_version
   else:
-    raise Fail('The extracted hiveserver2 version "{0}" does not matching any known pattern'.format(hdp_output))
+    raise Fail('The extracted hiveserver2 version "{0}" does not matching any known pattern'.format(output))
 
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_windows.py
index 7c21b5f..5610019 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_windows.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_windows.py
@@ -28,7 +28,7 @@ config = Script.get_config()
 stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
 stack_version_formatted = format_stack_version(stack_version_unformatted)
 
-hdp_root = None
+stack_root = None
 hive_conf_dir = None
 hive_home = None
 hive_lib_dir = None
@@ -39,7 +39,7 @@ hcat_config_dir = None
 hive_bin = None
 
 try:
-  hdp_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"],".."))
+  stack_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"],".."))
   hive_conf_dir = os.environ["HIVE_CONF_DIR"]
   hive_home = os.environ["HIVE_HOME"]
   hive_lib_dir = os.environ["HIVE_LIB_DIR"]

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/service_check.py
index 0c254be..251e71f 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/service_check.py
@@ -37,7 +37,7 @@ class HiveServiceCheckWindows(HiveServiceCheck):
   def service_check(self, env):
     import params
     env.set_params(params)
-    smoke_cmd = os.path.join(params.hdp_root,"Run-SmokeTests.cmd")
+    smoke_cmd = os.path.join(params.stack_root,"Run-SmokeTests.cmd")
     service = "HIVE"
     Execute(format("cmd /C {smoke_cmd} {service}"), user=params.hive_user, logoutput=True)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/setup_ranger_hive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/setup_ranger_hive.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/setup_ranger_hive.py
index 8b2e4e4..f51dbab 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/setup_ranger_hive.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/setup_ranger_hive.py
@@ -29,10 +29,10 @@ def setup_ranger_hive(upgrade_type = None):
     else:
       from resource_management.libraries.functions.setup_ranger_plugin import setup_ranger_plugin
     
-    hdp_version = None
+    stack_version = None
 
     if upgrade_type is not None:
-      hdp_version = params.version
+      stack_version = params.version
 
     if params.retryAble:
       Logger.info("Hive: Setup ranger: command retry enables thus retrying if ranger admin is down !")
@@ -72,6 +72,6 @@ def setup_ranger_hive(upgrade_type = None):
                         component_list=['hive-client', 'hive-metastore', 'hive-server2'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
                         credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password, 
                         ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
-                        hdp_version_override = hdp_version, skip_if_rangeradmin_down= not params.retryAble)
+                        stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble)
   else:
     Logger.info('Ranger admin not installed')

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat_service_check.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat_service_check.py
index 99f52a5..9965bfa9 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat_service_check.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/webhcat_service_check.py
@@ -32,7 +32,7 @@ def webhcat_service_check():
   # AMBARI-11633 [WinTP2] Webhcat service check fails
   # Hive doesn't pass the environment variables correctly to child processes, which fails the smoke test.
   # Reducing the amount of URLs checked to the minimum required.
-  #smoke_cmd = os.path.join(params.hdp_root,"Run-SmokeTests.cmd")
+  #smoke_cmd = os.path.join(params.stack_root,"Run-SmokeTests.cmd")
   #service = "WEBHCAT"
   #Execute(format("cmd /C {smoke_cmd} {service}"), user=params.hcat_user, logoutput=True)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/params.py b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/params.py
index ba6857e..4e73730 100644
--- a/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/params.py
+++ b/ambari-server/src/main/resources/common-services/KAFKA/0.8.1.2.2/package/scripts/params.py
@@ -248,8 +248,8 @@ if has_ranger_admin and is_supported_kafka_ranger:
   ssl_truststore_password = unicode(config['configurations']['ranger-kafka-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password']) if xml_configurations_supported else None
   credential_file = format('/etc/ranger/{repo_name}/cred.jceks') if xml_configurations_supported else None
 
-  hdp_version = get_stack_version('kafka-broker')
-  setup_ranger_env_sh_source = format('/usr/hdp/{hdp_version}/ranger-kafka-plugin/install/conf.templates/enable/kafka-ranger-env.sh')
+  stack_version = get_stack_version('kafka-broker')
+  setup_ranger_env_sh_source = format('/usr/hdp/{stack_version}/ranger-kafka-plugin/install/conf.templates/enable/kafka-ranger-env.sh')
   setup_ranger_env_sh_target = format("{conf_dir}/kafka-ranger-env.sh")
 
   #For SQLA explicitly disable audit to DB for Ranger

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_windows.py
index e044d9a..e7a3747 100644
--- a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_windows.py
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_windows.py
@@ -25,7 +25,7 @@ from status_params import *
 # server configurations
 config = Script.get_config()
 
-hdp_root = None
+stack_root = None
 knox_home = None
 knox_conf_dir = None
 knox_logs_dir = None
@@ -38,7 +38,7 @@ knox_master_secret_path = None
 knox_cert_store_path = None
 
 try:
-  hdp_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"],".."))
+  stack_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"],".."))
   knox_home = os.environ['KNOX_HOME']
   knox_conf_dir = os.environ['KNOX_CONF_DIR']
   knox_logs_dir = os.environ['KNOX_LOG_DIR']

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/setup_ranger_knox.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/setup_ranger_knox.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/setup_ranger_knox.py
index 1a08d54..13987c8 100644
--- a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/setup_ranger_knox.py
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/setup_ranger_knox.py
@@ -29,9 +29,9 @@ def setup_ranger_knox(upgrade_type=None):
     else:
       from resource_management.libraries.functions.setup_ranger_plugin import setup_ranger_plugin
     
-    hdp_version = None
+    stack_version = None
     if upgrade_type is not None:
-      hdp_version = params.version
+      stack_version = params.version
 
     if params.retryAble:
       Logger.info("Knox: Setup ranger: command retry enables thus retrying if ranger admin is down !")
@@ -72,6 +72,6 @@ def setup_ranger_knox(upgrade_type=None):
                         component_list=['knox-server'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
                         credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password, 
                         ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
-                        hdp_version_override = hdp_version, skip_if_rangeradmin_down= not params.retryAble)
+                        stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble)
   else:
     Logger.info('Ranger admin not installed')
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_windows.py
index ec404c4..1f939d4 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_windows.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_windows.py
@@ -24,7 +24,7 @@ from status_params import *
 config = Script.get_config()
 
 hadoop_user = config["configurations"]["cluster-env"]["hadoop.user.name"]
-hdp_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"], ".."))
+stack_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"], ".."))
 oozie_root = os.environ['OOZIE_ROOT']
 oozie_home = os.environ['OOZIE_HOME']
 oozie_conf_dir = os.path.join(oozie_home,'conf')

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/service_check.py
index f0cdb2c..8d14836 100644
--- a/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/service_check.py
@@ -130,7 +130,7 @@ class OozieServiceCheckWindows(OozieServiceCheck):
     import params
 
     env.set_params(params)
-    smoke_cmd = os.path.join(params.hdp_root, "Run-SmokeTests.cmd")
+    smoke_cmd = os.path.join(params.stack_root, "Run-SmokeTests.cmd")
     service = "OOZIE"
     Execute(format("cmd /C {smoke_cmd} {service}"), logoutput=True)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/params_windows.py
index 8a0a519..9cb0ce7 100644
--- a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/params_windows.py
+++ b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/params_windows.py
@@ -23,11 +23,11 @@ from resource_management import *
 # server configurations
 config = Script.get_config()
 
-hdp_root = None
+stack_root = None
 pig_home = None
 pig_conf_dir = None
 try:
-  hdp_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"],".."))
+  stack_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"],".."))
   pig_home = os.environ['PIG_HOME']
   pig_conf_dir = os.path.join(pig_home,'conf')
 except:

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/service_check.py
index 155e63c..ec2946e 100644
--- a/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/service_check.py
@@ -125,7 +125,7 @@ class PigServiceCheckWindows(PigServiceCheck):
   def service_check(self, env):
     import params
     env.set_params(params)
-    smoke_cmd = os.path.join(params.hdp_root,"Run-SmokeTests.cmd")
+    smoke_cmd = os.path.join(params.stack_root,"Run-SmokeTests.cmd")
     service = "PIG"
     Execute(format("cmd /C {smoke_cmd} {service}", smoke_cmd=smoke_cmd, service=service), logoutput=True, user=params.pig_user, timeout=300)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/upgrade.py b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/upgrade.py
index ed8b690..aa75949 100644
--- a/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/upgrade.py
+++ b/ambari-server/src/main/resources/common-services/RANGER/0.4.0/package/scripts/upgrade.py
@@ -23,9 +23,9 @@ from resource_management.libraries.functions import conf_select
 from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions.format import format
 
-def prestart(env, hdp_component):
+def prestart(env, stack_component):
   import params
 
   if params.version and params.stack_is_hdp22_or_further:
-    conf_select.select(params.stack_name, hdp_component, params.version)
-    stack_select.select(hdp_component, params.version)
+    conf_select.select(params.stack_name, stack_component, params.version)
+    stack_select.select(stack_component, params.version)

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/kms_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/kms_server.py b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/kms_server.py
index 9450184..42f1cb9 100755
--- a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/kms_server.py
+++ b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/kms_server.py
@@ -17,7 +17,7 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
-from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import stack_select
 from resource_management.libraries.script import Script
 from resource_management.core.resources.system import Execute
 from resource_management.core.exceptions import ComponentIsNotRunning
@@ -82,7 +82,7 @@ class KmsServer(Script):
     import params
     env.set_params(params)
 
-    upgrade_stack = hdp_select._get_upgrade_stack()
+    upgrade_stack = stack_select._get_upgrade_stack()
     if upgrade_stack is None:
       raise Fail('Unable to determine the stack and stack version')
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/upgrade.py b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/upgrade.py
index 798e8f7..315a417 100644
--- a/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/upgrade.py
+++ b/ambari-server/src/main/resources/common-services/RANGER_KMS/0.5.0.2.3/package/scripts/upgrade.py
@@ -22,9 +22,9 @@ from resource_management.libraries.functions import conf_select
 from resource_management.libraries.functions import stack_select
 from resource_management.libraries.functions.format import format
 
-def prestart(env, hdp_component):
+def prestart(env, stack_component):
   import params
 
   if params.version and params.stack_is_hdp23_or_further:
-    conf_select.select(params.stack_name, hdp_component, params.version)
-    stack_select.select(hdp_component, params.version)
+    conf_select.select(params.stack_name, stack_component, params.version)
+    stack_select.select(stack_component, params.version)

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/params_windows.py
index 366a1c9..fdb8160 100644
--- a/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/params_windows.py
+++ b/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/params_windows.py
@@ -24,13 +24,13 @@ import os
 # server configurations
 config = Script.get_config()
 
-hdp_root = None
+stack_root = None
 slider_home = None
 slider_bin_dir = None
 slider_conf_dir = None
 storm_slider_conf_dir = None
 try:
-  hdp_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"],".."))
+  stack_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"],".."))
   slider_home = os.environ['SLIDER_HOME']
   slider_bin_dir = os.path.join(slider_home, 'bin')
   slider_conf_dir = os.path.join(slider_home, 'conf')

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/service_check.py
index b93b0eb..a16d0e7 100644
--- a/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/service_check.py
@@ -29,7 +29,7 @@ class SliderServiceCheck(Script):
   def service_check(self, env):
     import params
     env.set_params(params)
-    smoke_cmd = os.path.join(params.hdp_root, "Run-SmokeTests.cmd")
+    smoke_cmd = os.path.join(params.stack_root, "Run-SmokeTests.cmd")
     service = "SLIDER"
     Execute(format("cmd /C {smoke_cmd} {service}"), logoutput=True, user=params.hdfs_user)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/params_windows.py
index e4b71f1..f930765 100644
--- a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/params_windows.py
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/params_windows.py
@@ -24,7 +24,7 @@ config = Script.get_config()
 
 sqoop_user = "sqoop"
 
-hdp_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"], ".."))
+stack_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"], ".."))
 sqoop_env_cmd_template = config['configurations']['sqoop-env']['content']
 sqoop_home_dir = os.environ["SQOOP_HOME"]
 sqoop_conf_dir = os.path.join(sqoop_home_dir, "conf")
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/service_check.py
index 426315f..c0d0e8c 100644
--- a/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/service_check.py
@@ -53,7 +53,7 @@ class SqoopServiceCheckWindows(SqoopServiceCheck):
   def service_check(self, env):
     import params
     env.set_params(params)
-    smoke_cmd = os.path.join(params.hdp_root,"Run-SmokeTests.cmd")
+    smoke_cmd = os.path.join(params.stack_root,"Run-SmokeTests.cmd")
     service = "SQOOP"
     Execute(format("cmd /C {smoke_cmd} {service}"), logoutput=True)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_windows.py
index 90bc76d..88e6246 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_windows.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/params_windows.py
@@ -27,7 +27,7 @@ config = Script.get_config()
 
 stack_is_hdp23_or_further = Script.is_stack_greater_or_equal("2.3")
 
-hdp_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"],".."))
+stack_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"],".."))
 conf_dir = os.environ["STORM_CONF_DIR"]
 hadoop_user = config["configurations"]["cluster-env"]["hadoop.user.name"]
 storm_user = hadoop_user

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/service_check.py
index 99a6439..80ea0f5 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/service_check.py
@@ -38,7 +38,7 @@ class ServiceCheckWindows(ServiceCheck):
   def service_check(self, env):
     import params
     env.set_params(params)
-    smoke_cmd = os.path.join(params.hdp_root,"Run-SmokeTests.cmd")
+    smoke_cmd = os.path.join(params.stack_root,"Run-SmokeTests.cmd")
     service = "STORM"
     Execute(format("cmd /C {smoke_cmd} {service}", smoke_cmd=smoke_cmd, service=service), user=params.storm_user, logoutput=True)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/setup_ranger_storm.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/setup_ranger_storm.py b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/setup_ranger_storm.py
index a76457f..bef1f02 100644
--- a/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/setup_ranger_storm.py
+++ b/ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/setup_ranger_storm.py
@@ -32,9 +32,9 @@ def setup_ranger_storm(upgrade_type=None):
     else:
       from resource_management.libraries.functions.setup_ranger_plugin import setup_ranger_plugin
     
-    hdp_version = None
+    stack_version = None
     if upgrade_type is not None:
-      hdp_version = params.version
+      stack_version = params.version
 
     if params.retryAble:
       Logger.info("Storm: Setup ranger: command retry enables thus retrying if ranger admin is down !")
@@ -75,6 +75,6 @@ def setup_ranger_storm(upgrade_type=None):
                         component_list=['storm-client', 'storm-nimbus'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
                         credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password, 
                         ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
-                        hdp_version_override = hdp_version, skip_if_rangeradmin_down= not params.retryAble)
+                        stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble)
   else:
     Logger.info('Ranger admin not installed')

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/params_windows.py
index ad80830..dd732f5 100644
--- a/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/params_windows.py
+++ b/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/params_windows.py
@@ -36,9 +36,9 @@ except KeyError:
 
 stack_version_formatted = ""
 
-hdp_root = None
+stack_root = None
 try:
-  hdp_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"], ".."))
+  stack_root = os.path.abspath(os.path.join(os.environ["HADOOP_HOME"], ".."))
 except:
   pass
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/service_check.py
index b70256c..c0c66af 100644
--- a/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/service_check.py
@@ -100,7 +100,7 @@ class TezServiceCheckWindows(TezServiceCheck):
   def service_check(self, env):
     import params
     env.set_params(params)
-    smoke_cmd = os.path.join(params.hdp_root,"Run-SmokeTests.cmd")
+    smoke_cmd = os.path.join(params.stack_root,"Run-SmokeTests.cmd")
     service = "TEZ"
     Execute(format("cmd /C {smoke_cmd} {service}"), logoutput=True, user=params.tez_user)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/params_windows.py b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/params_windows.py
index 480fc8b..c36e152 100644
--- a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/params_windows.py
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/params_windows.py
@@ -26,11 +26,11 @@ import status_params
 config = Script.get_config()
 
 config_dir = None
-hdp_root = None
+stack_root = None
 try:
   # not used zookeeper_home_dir = os.environ["ZOOKEEPER_HOME"]
   config_dir = os.environ["ZOOKEEPER_CONF_DIR"]
-  hdp_root = os.environ["HADOOP_NODE_INSTALL_ROOT"]
+  stack_root = os.environ["HADOOP_NODE_INSTALL_ROOT"]
 except:
   pass
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/service_check.py
index b53e5f5..622a5eb 100644
--- a/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/ZOOKEEPER/3.4.5.2.0/package/scripts/service_check.py
@@ -65,7 +65,7 @@ class ZookeeperServiceCheckWindows(ZookeeperServiceCheck):
     import params
     env.set_params(params)
 
-    smoke_cmd = os.path.join(params.hdp_root,"Run-SmokeTests.cmd")
+    smoke_cmd = os.path.join(params.stack_root,"Run-SmokeTests.cmd")
     service = "Zookeeper"
     Execute(format("cmd /C {smoke_cmd} {service}"), user=params.zk_user, logoutput=True, tries=3, try_sleep=20)
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/host_scripts/alert_disk_space.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/host_scripts/alert_disk_space.py b/ambari-server/src/main/resources/host_scripts/alert_disk_space.py
index 94f19d6..b7a7038 100644
--- a/ambari-server/src/main/resources/host_scripts/alert_disk_space.py
+++ b/ambari-server/src/main/resources/host_scripts/alert_disk_space.py
@@ -37,10 +37,10 @@ PERCENT_USED_WARNING_DEFAULT = 50
 PERCENT_USED_CRITICAL_DEFAULT = 80
 
 # the location where HDP installs components when using HDP 2.2+
-HDP_HOME_DIR = "/usr/hdp"
+STACK_HOME_DIR = "/usr/hdp"
 
 # the location where HDP installs components when using HDP 2.0 to 2.1
-HDP_HOME_LEGACY_DIR = "/usr/lib"
+STACK_HOME_LEGACY_DIR = "/usr/lib"
 
 def get_tokens():
   """
@@ -66,14 +66,14 @@ def execute(configurations={}, parameters={}, host_name=None):
   """
 
   # determine the location of HDP home
-  hdp_home = None
-  if os.path.isdir(HDP_HOME_DIR):
-    hdp_home = HDP_HOME_DIR
-  elif os.path.isdir(HDP_HOME_LEGACY_DIR):
-    hdp_home = HDP_HOME_LEGACY_DIR
-
-  # if hdp home was found, use it; otherwise default to None
-  path = hdp_home if hdp_home is not None else None
+  stack_home = None
+  if os.path.isdir(STACK_HOME_DIR):
+    stack_home = STACK_HOME_DIR
+  elif os.path.isdir(STACK_HOME_LEGACY_DIR):
+    stack_home = STACK_HOME_LEGACY_DIR
+
+  # if stack home was found, use it; otherwise default to None
+  path = stack_home if stack_home is not None else None
 
   try:
     disk_usage = _get_disk_usage(path)

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/after-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/after-INSTALL/scripts/hook.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/after-INSTALL/scripts/hook.py
index 71ac3df..6c2806d 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/after-INSTALL/scripts/hook.py
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/after-INSTALL/scripts/hook.py
@@ -28,7 +28,7 @@ class AfterInstallHook(Hook):
     import params
 
     env.set_params(params)
-    setup_hdp_install_directory()
+    setup_install_directory()
     setup_config()
 
 if __name__ == "__main__":

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/after-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/after-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/after-INSTALL/scripts/params.py
index f5851aa..dd66d58 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/after-INSTALL/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/after-INSTALL/scripts/params.py
@@ -35,7 +35,7 @@ else:
 
 hadoop_conf_dir = "/etc/hadoop/conf"
 hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
-versioned_hdp_root = '/usr/bigtop/current'
+versioned_stack_root = '/usr/bigtop/current'
 #security params
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 #java params

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/after-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/after-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/after-INSTALL/scripts/shared_initialization.py
index a930f54..8094129 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/after-INSTALL/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/after-INSTALL/scripts/shared_initialization.py
@@ -19,7 +19,7 @@ limitations under the License.
 import os
 from resource_management import *
 
-def setup_hdp_install_directory():
+def setup_install_directory():
   import params
   if params.rpm_version:
     Execute(format('ambari-python-wrap /usr/bin/bigtop-select set all `ambari-python-wrap /usr/bin/bigtop-select versions | grep ^{rpm_version}- | tail -1`'),

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-ANY/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-ANY/scripts/params.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-ANY/scripts/params.py
index 7a93d41..97e560a 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-ANY/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-ANY/scripts/params.py
@@ -47,7 +47,7 @@ else:
 
 hadoop_conf_dir = "/etc/hadoop/conf"
 hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
-versioned_hdp_root = '/usr/bigtop/current'
+versioned_stack_root = '/usr/bigtop/current'
 #security params
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/status_params.py
index 0027a4c..c2adadb 100644
--- a/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/status_params.py
+++ b/ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/status_params.py
@@ -23,9 +23,9 @@ config = Script.get_config()
 
 hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-hdp_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
-datanode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-datanode.pid")
-namenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-namenode.pid")
-snamenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-secondarynamenode.pid")
-journalnode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-journalnode.pid")
-zkfc_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-zkfc.pid")
+hadoop_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
+datanode_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-datanode.pid")
+namenode_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-namenode.pid")
+snamenode_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-secondarynamenode.pid")
+journalnode_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-journalnode.pid")
+zkfc_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-zkfc.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/hook.py
index 73412e0..8a583b3 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/hook.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/hook.py
@@ -20,7 +20,7 @@ limitations under the License.
 from resource_management.libraries.script.hook import Hook
 from shared_initialization import link_configs
 from shared_initialization import setup_config
-from shared_initialization import setup_hdp_symlinks
+from shared_initialization import setup_stack_symlinks
 
 class AfterInstallHook(Hook):
 
@@ -28,7 +28,7 @@ class AfterInstallHook(Hook):
     import params
 
     env.set_params(params)
-    setup_hdp_symlinks()
+    setup_stack_symlinks()
     setup_config()
 
     link_configs(self.stroutfile)

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
index ab7dea5..cd04492 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
@@ -49,7 +49,7 @@ if Script.is_stack_greater_or_equal("2.2"):
   # not supported in HDP 2.2+
   hadoop_conf_empty_dir = None
 
-versioned_hdp_root = '/usr/hdp/current'
+versioned_stack_root = '/usr/hdp/current'
 
 #security params
 security_enabled = config['configurations']['cluster-env']['security_enabled']

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py
index 8cf75cb..96dc104 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py
@@ -28,7 +28,7 @@ from resource_management.libraries.resources.xml_config import XmlConfig
 from resource_management.libraries.script import Script
 
 
-def setup_hdp_symlinks():
+def setup_stack_symlinks():
   """
   Invokes hdp-select set all against a calculated fully-qualified, "normalized" version based on a
   stack version, such as "2.3". This should always be called after a component has been

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
index 7e37873..e4cad39 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
@@ -103,7 +103,7 @@ hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec", force_latest_on_upgr
 hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
 hadoop_secure_dn_user = hdfs_user
 hadoop_dir = "/etc/hadoop"
-versioned_hdp_root = '/usr/hdp/current'
+versioned_stack_root = '/usr/hdp/current'
 hadoop_java_io_tmpdir = os.path.join(tmp_dir, "hadoop_java_io_tmpdir")
 datanode_max_locked_memory = config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory']
 is_datanode_max_locked_memory_set = not is_empty(config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory'])

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/after-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/after-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/after-INSTALL/scripts/params.py
index bfc605c..d541a15 100644
--- a/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/after-INSTALL/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDPWIN/2.1/hooks/after-INSTALL/scripts/params.py
@@ -39,8 +39,8 @@ if os.environ.has_key("HADOOP_NODE_INSTALL_ROOT"):
   hadoop_install_root = os.environ["HADOOP_NODE_INSTALL_ROOT"]
 
 
-hdp_log_dir = "c:\\hadoop\\logs"
-hdp_data_dir = "c:\\hadoop"
+stack_log_dir = "c:\\hadoop\\logs"
+stack_data_dir = "c:\\hadoop"
 db_flavor = "MSSQL"
 db_hostname = "localhost"
 db_port = "1433"

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/test/python/TestVersionSelectUtil.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/TestVersionSelectUtil.py b/ambari-server/src/test/python/TestVersionSelectUtil.py
index f753023..9fa24cc 100644
--- a/ambari-server/src/test/python/TestVersionSelectUtil.py
+++ b/ambari-server/src/test/python/TestVersionSelectUtil.py
@@ -39,7 +39,7 @@ class TestVersionSelectUtil(TestCase):
   @patch('__builtin__.open')
   @patch("resource_management.core.shell.call")
   def test_get_component_version(self, call_mock, open_mock):
-    hdp_expected_version = "2.2.1.0-2175"
+    stack_expected_version = "2.2.1.0-2175"
 
     # Mock classes for reading from a file
     class MagicFile(object):
@@ -49,7 +49,7 @@ class TestVersionSelectUtil(TestCase):
                            "zookeeper-client"
                            ])
       def read(self, value):
-        return (value + " - " + hdp_expected_version) if value in self.allowed_names else ("ERROR: Invalid package - " + value)
+        return (value + " - " + stack_expected_version) if value in self.allowed_names else ("ERROR: Invalid package - " + value)
 
       def __exit__(self, exc_type, exc_val, exc_tb):
         pass
@@ -88,6 +88,6 @@ class TestVersionSelectUtil(TestCase):
 
     # Pass
     version = self.module.get_component_version("HDP", "hadoop-hdfs-namenode")
-    self.assertEquals(version, hdp_expected_version)
+    self.assertEquals(version, stack_expected_version)
     version = self.module.get_component_version("HDP", "hadoop-hdfs-datanode")
-    self.assertEquals(version, hdp_expected_version)
+    self.assertEquals(version, stack_expected_version)

http://git-wip-us.apache.org/repos/asf/ambari/blob/456b4511/ambari-server/src/test/python/custom_actions/TestInstallPackages.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/custom_actions/TestInstallPackages.py b/ambari-server/src/test/python/custom_actions/TestInstallPackages.py
index 80ba480..f022c80 100644
--- a/ambari-server/src/test/python/custom_actions/TestInstallPackages.py
+++ b/ambari-server/src/test/python/custom_actions/TestInstallPackages.py
@@ -70,9 +70,9 @@ class TestInstallPackages(RMFTestCase):
   def test_normal_flow_rhel(self,
                             write_actual_version_to_history_file_mock,
                             read_actual_version_from_history_file_mock,
-                            hdp_versions_mock,
+                            stack_versions_mock,
                             put_structured_out_mock, allInstalledPackages_mock, list_ambari_managed_repos_mock):
-    hdp_versions_mock.side_effect = [
+    stack_versions_mock.side_effect = [
       [],  # before installation attempt
       [VERSION_STUB]
     ]
@@ -129,10 +129,10 @@ class TestInstallPackages(RMFTestCase):
   @patch("resource_management.libraries.functions.repo_version_history.write_actual_version_to_history_file")
   def test_normal_flow_sles(self, write_actual_version_to_history_file_mock,
                             read_actual_version_from_history_file_mock,
-                            hdp_versions_mock, put_structured_out_mock, allInstalledPackages_mock, list_ambari_managed_repos_mock, is_suse_family_mock):
+                            stack_versions_mock, put_structured_out_mock, allInstalledPackages_mock, list_ambari_managed_repos_mock, is_suse_family_mock):
     is_suse_family_mock = True
     Script.stack_version_from_distro_select = VERSION_STUB
-    hdp_versions_mock.side_effect = [
+    stack_versions_mock.side_effect = [
       [],  # before installation attempt
       [VERSION_STUB]
     ]
@@ -190,10 +190,10 @@ class TestInstallPackages(RMFTestCase):
   @patch("resource_management.libraries.functions.repo_version_history.write_actual_version_to_history_file")
   def test_exclude_existing_repo(self,  write_actual_version_to_history_file_mock,
                                  read_actual_version_from_history_file_mock,
-                                 hdp_versions_mock,
+                                 stack_versions_mock,
                                  allInstalledPackages_mock, put_structured_out_mock,
                                  is_redhat_family_mock, list_ambari_managed_repos_mock):
-    hdp_versions_mock.side_effect = [
+    stack_versions_mock.side_effect = [
       [],  # before installation attempt
       [VERSION_STUB]
     ]
@@ -324,11 +324,11 @@ class TestInstallPackages(RMFTestCase):
   @patch("resource_management.libraries.functions.repo_version_history.write_actual_version_to_history_file")
   def test_format_package_name(self,                                                                                    write_actual_version_to_history_file_mock,
                                read_actual_version_from_history_file_mock,
-                               hdp_versions_mock,
+                               stack_versions_mock,
                                allInstalledPackages_mock, put_structured_out_mock,
                                package_mock, is_suse_family_mock):
     Script.stack_version_from_distro_select = VERSION_STUB
-    hdp_versions_mock.side_effect = [
+    stack_versions_mock.side_effect = [
       [],  # before installation attempt
       [VERSION_STUB]
     ]
@@ -387,9 +387,9 @@ class TestInstallPackages(RMFTestCase):
   def test_version_reporting__build_number_defined(self,
                                                                                    write_actual_version_to_history_file_mock,
                                                                                    read_actual_version_from_history_file_mock,
-                                                                                   hdp_versions_mock,
+                                                                                   stack_versions_mock,
                                                                                    put_structured_out_mock, allInstalledPackages_mock, list_ambari_managed_repos_mock):
-    hdp_versions_mock.side_effect = [
+    stack_versions_mock.side_effect = [
       [OLD_VERSION_STUB],  # before installation attempt
       [OLD_VERSION_STUB, VERSION_STUB]
     ]
@@ -419,12 +419,12 @@ class TestInstallPackages(RMFTestCase):
     self.assertTrue(write_actual_version_to_history_file_mock.called)
     self.assertEquals(write_actual_version_to_history_file_mock.call_args[0], (VERSION_STUB_WITHOUT_BUILD_NUMBER, VERSION_STUB))
 
-    hdp_versions_mock.reset_mock()
+    stack_versions_mock.reset_mock()
     write_actual_version_to_history_file_mock.reset_mock()
     put_structured_out_mock.reset_mock()
 
     # Test retrying install again
-    hdp_versions_mock.side_effect = [
+    stack_versions_mock.side_effect = [
       [OLD_VERSION_STUB, VERSION_STUB],
       [OLD_VERSION_STUB, VERSION_STUB]
     ]
@@ -464,14 +464,14 @@ class TestInstallPackages(RMFTestCase):
   @patch("resource_management.libraries.functions.repo_version_history.read_actual_version_from_history_file")
   @patch("resource_management.libraries.functions.repo_version_history.write_actual_version_to_history_file")
   @patch("os.path.exists")
-  def test_version_reporting__build_number_not_defined__usr_hdp_present__no_components_installed(self,
+  def test_version_reporting__build_number_not_defined_stack_root_present__no_components_installed(self,
                                                                             exists_mock,
                                                                             write_actual_version_to_history_file_mock,
                                                                             read_actual_version_from_history_file_mock,
-                                                                            hdp_versions_mock,
+                                                                            stack_versions_mock,
                                                                             put_structured_out_mock, allInstalledPackages_mock, list_ambari_managed_repos_mock):
     exists_mock.return_value = True
-    hdp_versions_mock.side_effect = [
+    stack_versions_mock.side_effect = [
       [],  # before installation attempt
       []
     ]
@@ -508,7 +508,7 @@ class TestInstallPackages(RMFTestCase):
 
     self.assertFalse(write_actual_version_to_history_file_mock.called)
 
-    hdp_versions_mock.reset_mock()
+    stack_versions_mock.reset_mock()
     write_actual_version_to_history_file_mock.reset_mock()
     put_structured_out_mock.reset_mock()
 
@@ -520,14 +520,14 @@ class TestInstallPackages(RMFTestCase):
   @patch("resource_management.libraries.functions.repo_version_history.read_actual_version_from_history_file")
   @patch("resource_management.libraries.functions.repo_version_history.write_actual_version_to_history_file")
   @patch("os.path.exists")
-  def test_version_reporting__build_number_not_defined__usr_hdp_absent(self,
+  def test_version_reporting__build_number_not_defined_stack_root_absent(self,
                                                                         exists_mock,
                                                                         write_actual_version_to_history_file_mock,
                                                                         read_actual_version_from_history_file_mock,
-                                                                        hdp_versions_mock,
+                                                                        stack_versions_mock,
                                                                         put_structured_out_mock, allInstalledPackages_mock, list_ambari_managed_repos_mock):
     exists_mock.return_value = False
-    hdp_versions_mock.side_effect = [
+    stack_versions_mock.side_effect = [
       [],  # before installation attempt
       []
     ]
@@ -562,13 +562,13 @@ class TestInstallPackages(RMFTestCase):
 
     self.assertFalse(write_actual_version_to_history_file_mock.called)
 
-    hdp_versions_mock.reset_mock()
+    stack_versions_mock.reset_mock()
     write_actual_version_to_history_file_mock.reset_mock()
     put_structured_out_mock.reset_mock()
 
     # Test retrying install again  (correct build number, provided by other nodes, is now received from server)
 
-    hdp_versions_mock.side_effect = [
+    stack_versions_mock.side_effect = [
       [],  # before installation attempt
       []
     ]
@@ -611,12 +611,12 @@ class TestInstallPackages(RMFTestCase):
   @patch("resource_management.libraries.functions.stack_select.get_stack_versions")
   @patch("resource_management.libraries.functions.repo_version_history.read_actual_version_from_history_file")
   @patch("resource_management.libraries.functions.repo_version_history.write_actual_version_to_history_file")
-  def test_version_reporting__build_number_not_defined__usr_hdp_present(self,
+  def test_version_reporting__build_number_not_defined_stack_root_present(self,
                                                                     write_actual_version_to_history_file_mock,
                                                                     read_actual_version_from_history_file_mock,
-                                                                    hdp_versions_mock,
+                                                                    stack_versions_mock,
                                                                     put_structured_out_mock, allInstalledPackages_mock, list_ambari_managed_repos_mock):
-    hdp_versions_mock.side_effect = [
+    stack_versions_mock.side_effect = [
       [OLD_VERSION_STUB],  # before installation attempt
       [OLD_VERSION_STUB, VERSION_STUB]
     ]
@@ -646,12 +646,12 @@ class TestInstallPackages(RMFTestCase):
     self.assertTrue(write_actual_version_to_history_file_mock.called)
     self.assertEquals(write_actual_version_to_history_file_mock.call_args[0], (VERSION_STUB_WITHOUT_BUILD_NUMBER, VERSION_STUB))
 
-    hdp_versions_mock.reset_mock()
+    stack_versions_mock.reset_mock()
     write_actual_version_to_history_file_mock.reset_mock()
     put_structured_out_mock.reset_mock()
 
     # Test retrying install again
-    hdp_versions_mock.side_effect = [
+    stack_versions_mock.side_effect = [
       [OLD_VERSION_STUB, VERSION_STUB],
       [OLD_VERSION_STUB, VERSION_STUB]
     ]
@@ -689,12 +689,12 @@ class TestInstallPackages(RMFTestCase):
   @patch("resource_management.libraries.functions.stack_select.get_stack_versions")
   @patch("resource_management.libraries.functions.repo_version_history.read_actual_version_from_history_file")
   @patch("resource_management.libraries.functions.repo_version_history.write_actual_version_to_history_file")
-  def test_version_reporting__wrong_build_number_specified__usr_hdp_present(self,
+  def test_version_reporting__wrong_build_number_specified_stack_root_present(self,
                                                                         write_actual_version_to_history_file_mock,
                                                                         read_actual_version_from_history_file_mock,
-                                                                        hdp_versions_mock,
+                                                                        stack_versions_mock,
                                                                         put_structured_out_mock, allInstalledPackages_mock, list_ambari_managed_repos_mock):
-    hdp_versions_mock.side_effect = [
+    stack_versions_mock.side_effect = [
       [OLD_VERSION_STUB],  # before installation attempt
       [OLD_VERSION_STUB, VERSION_STUB]
     ]
@@ -724,12 +724,12 @@ class TestInstallPackages(RMFTestCase):
     self.assertTrue(write_actual_version_to_history_file_mock.called)
     self.assertEquals(write_actual_version_to_history_file_mock.call_args[0], ('2.2.0.1', VERSION_STUB))
 
-    hdp_versions_mock.reset_mock()
+    stack_versions_mock.reset_mock()
     write_actual_version_to_history_file_mock.reset_mock()
     put_structured_out_mock.reset_mock()
 
     # Test retrying install again
-    hdp_versions_mock.side_effect = [
+    stack_versions_mock.side_effect = [
       [OLD_VERSION_STUB, VERSION_STUB],
       [OLD_VERSION_STUB, VERSION_STUB]
     ]
@@ -768,14 +768,14 @@ class TestInstallPackages(RMFTestCase):
   @patch("resource_management.libraries.functions.repo_version_history.read_actual_version_from_history_file")
   @patch("resource_management.libraries.functions.repo_version_history.write_actual_version_to_history_file")
   @patch("os.path.exists")
-  def test_version_reporting__wrong_build_number_specified__usr_hdp_absent(self,
+  def test_version_reporting__wrong_build_number_specified_stack_root_absent(self,
                                                                             exists_mock,
                                                                             write_actual_version_to_history_file_mock,
                                                                             read_actual_version_from_history_file_mock,
-                                                                            hdp_versions_mock,
+                                                                            stack_versions_mock,
                                                                             put_structured_out_mock, allInstalledPackages_mock, list_ambari_managed_repos_mock):
     exists_mock.return_value = False
-    hdp_versions_mock.side_effect = [
+    stack_versions_mock.side_effect = [
       [],  # before installation attempt
       []
     ]
@@ -810,13 +810,13 @@ class TestInstallPackages(RMFTestCase):
 
     self.assertFalse(write_actual_version_to_history_file_mock.called)
 
-    hdp_versions_mock.reset_mock()
+    stack_versions_mock.reset_mock()
     write_actual_version_to_history_file_mock.reset_mock()
     put_structured_out_mock.reset_mock()
 
     # Test retrying install again (correct build number, provided by other nodes, is now received from server)
 
-    hdp_versions_mock.side_effect = [
+    stack_versions_mock.side_effect = [
       [],  # before installation attempt
       []
     ]


[12/51] [abbrv] ambari git commit: AMBARI-15316. Kerberos: Provide SHA256 or SHA512 options for template principal digest (rlevas)

Posted by jl...@apache.org.
AMBARI-15316. Kerberos: Provide SHA256 or SHA512 options for template principal digest (rlevas)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/1986078f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/1986078f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/1986078f

Branch: refs/heads/AMBARI-13364
Commit: 1986078f78e545eefc64f5315304ccebbb6dbae8
Parents: be7bafe
Author: Robert Levas <rl...@hortonworks.com>
Authored: Tue Mar 8 12:44:24 2016 -0500
Committer: Robert Levas <rl...@hortonworks.com>
Committed: Tue Mar 8 12:44:24 2016 -0500

----------------------------------------------------------------------
 .../kerberos/ADKerberosOperationHandler.java    |  2 +
 .../ADKerberosOperationHandlerTest.java         | 72 ++++++++++++++++++--
 2 files changed, 70 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/1986078f/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/ADKerberosOperationHandler.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/ADKerberosOperationHandler.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/ADKerberosOperationHandler.java
index a1e1544..deea158 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/ADKerberosOperationHandler.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/kerberos/ADKerberosOperationHandler.java
@@ -269,6 +269,8 @@ public class ADKerberosOperationHandler extends KerberosOperationHandler {
     context.put("is_service", service);
     context.put("container_dn", this.principalContainerDn);
     context.put("principal_digest", DigestUtils.sha1Hex(deconstructedPrincipal.getNormalizedPrincipal()));
+    context.put("principal_digest_256", DigestUtils.sha256Hex(deconstructedPrincipal.getNormalizedPrincipal()));
+    context.put("principal_digest_512", DigestUtils.sha512Hex(deconstructedPrincipal.getNormalizedPrincipal()));
 
     Map<String, Object> data = processCreateTemplate(context);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/1986078f/ambari-server/src/test/java/org/apache/ambari/server/serveraction/kerberos/ADKerberosOperationHandlerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/kerberos/ADKerberosOperationHandlerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/kerberos/ADKerberosOperationHandlerTest.java
index 0622807..52cd372 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/kerberos/ADKerberosOperationHandlerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/kerberos/ADKerberosOperationHandlerTest.java
@@ -200,6 +200,7 @@ public class ADKerberosOperationHandlerTest extends KerberosOperationHandlerTest
                 .andAnswer(new IAnswer<NamingEnumeration<SearchResult>>() {
                   @Override
                   public NamingEnumeration<SearchResult> answer() throws Throwable {
+                    @SuppressWarnings("unchecked")
                     NamingEnumeration<SearchResult> result = createNiceMock(NamingEnumeration.class);
                     expect(result.hasMore()).andReturn(false).once();
                     replay(result);
@@ -238,14 +239,15 @@ public class ADKerberosOperationHandlerTest extends KerberosOperationHandlerTest
       }
     };
 
-    Capture<Name> capturedName = new Capture<Name>(CaptureType.ALL);
-    Capture<Attributes> capturedAttributes = new Capture<Attributes>(CaptureType.ALL);
+    Capture<Name> capturedName = newCapture(CaptureType.ALL);
+    Capture<Attributes> capturedAttributes = newCapture(CaptureType.ALL);
 
     ADKerberosOperationHandler handler = createMockBuilder(ADKerberosOperationHandler.class)
         .addMockedMethod(ADKerberosOperationHandler.class.getDeclaredMethod("createInitialLdapContext", Properties.class, Control[].class))
         .addMockedMethod(ADKerberosOperationHandler.class.getDeclaredMethod("createSearchControls"))
         .createNiceMock();
 
+    @SuppressWarnings("unchecked")
     NamingEnumeration<SearchResult> searchResult = createNiceMock(NamingEnumeration.class);
     expect(searchResult.hasMore()).andReturn(false).once();
 
@@ -366,14 +368,15 @@ public class ADKerberosOperationHandlerTest extends KerberosOperationHandlerTest
       }
     };
 
-    Capture<Name> capturedName = new Capture<Name>();
-    Capture<Attributes> capturedAttributes = new Capture<Attributes>();
+    Capture<Name> capturedName = newCapture();
+    Capture<Attributes> capturedAttributes = newCapture();
 
     ADKerberosOperationHandler handler = createMockBuilder(ADKerberosOperationHandler.class)
         .addMockedMethod(ADKerberosOperationHandler.class.getDeclaredMethod("createInitialLdapContext", Properties.class, Control[].class))
         .addMockedMethod(ADKerberosOperationHandler.class.getDeclaredMethod("createSearchControls"))
         .createNiceMock();
 
+    @SuppressWarnings("unchecked")
     NamingEnumeration<SearchResult> searchResult = createNiceMock(NamingEnumeration.class);
     expect(searchResult.hasMore()).andReturn(false).once();
 
@@ -438,7 +441,68 @@ public class ADKerberosOperationHandlerTest extends KerberosOperationHandlerTest
 
     Assert.assertNotNull(attributes.get("userAccountControl"));
     Assert.assertEquals("66048", attributes.get("userAccountControl").get());
+  }
+
+  @Test
+  public void testDigests() throws Exception {
+    PrincipalKeyCredential kc = new PrincipalKeyCredential(DEFAULT_ADMIN_PRINCIPAL, DEFAULT_ADMIN_PASSWORD);
+    Map<String, String> kerberosEnvMap = new HashMap<String, String>();
+    kerberosEnvMap.put(ADKerberosOperationHandler.KERBEROS_ENV_LDAP_URL, DEFAULT_LDAP_URL);
+    kerberosEnvMap.put(ADKerberosOperationHandler.KERBEROS_ENV_PRINCIPAL_CONTAINER_DN, DEFAULT_PRINCIPAL_CONTAINER_DN);
+    kerberosEnvMap.put(ADKerberosOperationHandler.KERBEROS_ENV_AD_CREATE_ATTRIBUTES_TEMPLATE, "" +
+            "{" +
+            "\"principal_digest\": \"$principal_digest\"," +
+            "\"principal_digest_256\": \"$principal_digest_256\"," +
+            "\"principal_digest_512\": \"$principal_digest_512\"" +
+            "}"
+    );
+
+    Capture<Attributes> capturedAttributes = newCapture();
+
+    ADKerberosOperationHandler handler = createMockBuilder(ADKerberosOperationHandler.class)
+        .addMockedMethod(ADKerberosOperationHandler.class.getDeclaredMethod("createInitialLdapContext", Properties.class, Control[].class))
+        .addMockedMethod(ADKerberosOperationHandler.class.getDeclaredMethod("createSearchControls"))
+        .createNiceMock();
+
+    @SuppressWarnings("unchecked")
+    NamingEnumeration<SearchResult> searchResult = createNiceMock(NamingEnumeration.class);
+    expect(searchResult.hasMore()).andReturn(false).once();
+
+    LdapContext ldapContext = createNiceMock(LdapContext.class);
+    expect(ldapContext.search(anyObject(String.class), anyObject(String.class), anyObject(SearchControls.class)))
+        .andReturn(searchResult)
+        .once();
+
+    expect(ldapContext.createSubcontext(anyObject(Name.class), capture(capturedAttributes)))
+        .andReturn(createNiceMock(DirContext.class))
+        .once();
+
+    expect(handler.createInitialLdapContext(anyObject(Properties.class), anyObject(Control[].class)))
+        .andReturn(ldapContext)
+        .once();
+
+    expect(handler.createSearchControls()).andAnswer(new IAnswer<SearchControls>() {
+      @Override
+      public SearchControls answer() throws Throwable {
+        SearchControls searchControls = createNiceMock(SearchControls.class);
+        replay(searchControls);
+        return searchControls;
+      }
+    }).once();
+
+    replayAll();
+
+    handler.open(kc, DEFAULT_REALM, kerberosEnvMap);
+    handler.createPrincipal("nn/c6501.ambari.apache.org", "secret", true);
+    handler.close();
+
+    Attributes attributes = capturedAttributes.getValue();
+
+    Assert.assertNotNull(attributes);
 
+    Assert.assertEquals("995e1580db28198e7fda1417ab5d894c877937d2", attributes.get("principal_digest").get());
+    Assert.assertEquals("b65bc066d11ac8b1beb31dc84035d9c204736f823decf8dfedda05a30e4ae410", attributes.get("principal_digest_256").get());
+    Assert.assertEquals("f48de28bc0467d764f5b04dbf04d35ff329a80277614be35eda0d0deed7f1c074cc5b0e0dc361130fdb078e09eb0ca545b9c653388192508ef382af89bd3a80c", attributes.get("principal_digest_512").get());
   }
 
   /**


[11/51] [abbrv] ambari git commit: Added info for issues@ambari.apache.org and reviews@ambari.apache.org. (yusaku)

Posted by jl...@apache.org.
Added info for issues@ambari.apache.org and reviews@ambari.apache.org. (yusaku)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/be7bafec
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/be7bafec
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/be7bafec

Branch: refs/heads/AMBARI-13364
Commit: be7bafec27e8e9b77e24b174292f303e3759ed62
Parents: 7e81d37
Author: Yusaku Sako <yu...@hortonworks.com>
Authored: Tue Mar 8 00:17:51 2016 -0800
Committer: Yusaku Sako <yu...@hortonworks.com>
Committed: Tue Mar 8 00:18:19 2016 -0800

----------------------------------------------------------------------
 docs/pom.xml | 18 ++++++++++++++++++
 1 file changed, 18 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/be7bafec/docs/pom.xml
----------------------------------------------------------------------
diff --git a/docs/pom.xml b/docs/pom.xml
index 9cb2ad3..b60652d 100644
--- a/docs/pom.xml
+++ b/docs/pom.xml
@@ -86,6 +86,24 @@
             <archive>http://mail-archives.apache.org/mod_mbox/ambari-dev/</archive>
         </mailingList>
         <mailingList>
+            <name>JIRA list</name>
+            <subscribe>mailto:issues-subscribe@ambari.apache.org
+            </subscribe>
+            <unsubscribe>mailto:issues-unsubscribe@ambari.apache.org
+            </unsubscribe>
+            <post>mailto:issues@ambari.apache.org</post>
+            <archive>http://mail-archives.apache.org/mod_mbox/ambari-issues/</archive>
+        </mailingList>
+        <mailingList>
+            <name>Review Board list</name>
+            <subscribe>mailto:reviews-subscribe@ambari.apache.org
+            </subscribe>
+            <unsubscribe>mailto:reviews-unsubscribe@ambari.apache.org
+            </unsubscribe>
+            <post>mailto:reviews@ambari.apache.org</post>
+            <archive>http://mail-archives.apache.org/mod_mbox/ambari-reviews/</archive>
+        </mailingList>
+        <mailingList>
             <name>Commit list</name>
             <subscribe>mailto:commits-subscribe@ambari.apache.org
             </subscribe>


[06/51] [abbrv] ambari git commit: AMBARI-15325. On Grafana config change Ambari should not ask to restart Collector. (swagle)

Posted by jl...@apache.org.
AMBARI-15325. On Grafana config change Ambari should not ask to restart Collector. (swagle)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/283d8167
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/283d8167
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/283d8167

Branch: refs/heads/AMBARI-13364
Commit: 283d8167da8daf4c4895610e08de2e95f0bf2b74
Parents: bf186cf
Author: Siddharth Wagle <sw...@hortonworks.com>
Authored: Mon Mar 7 16:03:29 2016 -0800
Committer: Siddharth Wagle <sw...@hortonworks.com>
Committed: Mon Mar 7 16:03:29 2016 -0800

----------------------------------------------------------------------
 .../AMBARI_METRICS/0.1.0/metainfo.xml           | 28 ++++++++++++--------
 1 file changed, 17 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/283d8167/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml
index c9d10e3..b06f55b 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/metainfo.xml
@@ -47,7 +47,19 @@
             <scriptType>PYTHON</scriptType>
             <timeout>1200</timeout>
           </commandScript>
+          <configuration-dependencies>
+            <config-type>ams-site</config-type>
+            <config-type>ams-log4j</config-type>
+            <config-type>ams-hbase-policy</config-type>
+            <config-type>ams-hbase-site</config-type>
+            <config-type>ams-hbase-security-site</config-type>
+            <config-type>ams-hbase-env</config-type>
+            <config-type>ams-hbase-log4j</config-type>
+            <config-type>ams-ssl-server</config-type>
+            <config-type>ams-ssl-client</config-type>
+          </configuration-dependencies>
         </component>
+
         <component>
           <name>METRICS_MONITOR</name>
           <displayName>Metrics Monitor</displayName>
@@ -63,6 +75,7 @@
             <timeout>1200</timeout>
           </commandScript>
         </component>
+
         <component>
           <name>METRICS_GRAFANA</name>
           <displayName>Grafana</displayName>
@@ -74,6 +87,10 @@
             <scriptType>PYTHON</scriptType>
             <timeout>1200</timeout>
           </commandScript>
+          <configuration-dependencies>
+            <config-type>ams-grafana-env</config-type>
+            <config-type>ams-grafana-ini</config-type>
+          </configuration-dependencies>
         </component>
       </components>
 
@@ -146,18 +163,7 @@
       </requiredServices>
 
       <configuration-dependencies>
-        <config-type>ams-site</config-type>
-        <config-type>ams-log4j</config-type>
         <config-type>ams-env</config-type>
-        <config-type>ams-hbase-policy</config-type>
-        <config-type>ams-hbase-site</config-type>
-        <config-type>ams-hbase-security-site</config-type>
-        <config-type>ams-hbase-env</config-type>
-        <config-type>ams-hbase-log4j</config-type>
-        <config-type>ams-grafana-env</config-type>
-        <config-type>ams-grafana-ini</config-type>
-        <config-type>ams-ssl-server</config-type>
-        <config-type>ams-ssl-client</config-type>
       </configuration-dependencies>
 
       <excluded-config-types>


[42/51] [abbrv] ambari git commit: AMBARI-15223:Add ability to display messages on Upgrade UI in paragraphs - remove dead comment (dili)

Posted by jl...@apache.org.
AMBARI-15223:Add ability to display messages on Upgrade UI in paragraphs - remove dead comment (dili)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f7711af8
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f7711af8
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f7711af8

Branch: refs/heads/AMBARI-13364
Commit: f7711af89df6db4d4a304993c670faf577640351
Parents: cbc3f1a
Author: Di Li <di...@apache.org>
Authored: Wed Mar 9 16:22:03 2016 -0500
Committer: Di Li <di...@apache.org>
Committed: Wed Mar 9 16:22:03 2016 -0500

----------------------------------------------------------------------
 .../org/apache/ambari/server/state/stack/upgrade/ManualTask.java  | 3 ---
 1 file changed, 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f7711af8/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ManualTask.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ManualTask.java b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ManualTask.java
index e56a602..0a1907f 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ManualTask.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ManualTask.java
@@ -46,9 +46,6 @@ public class ManualTask extends ServerSideActionTask {
   @XmlTransient
   public String structuredOut = null;
 
-  /*@XmlElement(name="message")
-  public String message;*/
-
   @XmlElement(name="message")
   public List<String> messages;
 


[51/51] [abbrv] ambari git commit: AMBARI-15171: Parameterize distro-specific stack information for SLIDER (Juanjo Marron via dili)

Posted by jl...@apache.org.
AMBARI-15171: Parameterize distro-specific stack information for SLIDER (Juanjo Marron via dili)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7d862f58
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7d862f58
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7d862f58

Branch: refs/heads/AMBARI-13364
Commit: 7d862f588ebf85fc77b24b4eab9ed4a4bc6bf99a
Parents: b68758b
Author: Di Li <di...@apache.org>
Authored: Mon Feb 29 10:36:01 2016 -0500
Committer: Jayush Luniya <jl...@hortonworks.com>
Committed: Wed Mar 9 15:43:24 2016 -0800

----------------------------------------------------------------------
 .../SLIDER/0.60.0.2.2/package/scripts/params_linux.py     | 10 +++++++---
 .../SLIDER/0.60.0.2.2/package/scripts/service_check.py    |  4 ++--
 .../SLIDER/0.60.0.2.2/package/scripts/slider.py           |  2 +-
 .../SLIDER/0.60.0.2.2/package/scripts/slider_client.py    |  5 +++--
 4 files changed, 13 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/7d862f58/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/params_linux.py
index b1cec11..be6d5a7 100644
--- a/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/params_linux.py
@@ -27,15 +27,19 @@ from resource_management.libraries.functions import get_kinit_path
 # server configurations
 config = Script.get_config()
 
-slider_home_dir = '/usr/hdp/current/slider-client'
+stack_dir = config['configurations']['cluster-env']['stack_dir']
+stack_version_ru_support = config['configurations']['cluster-env']['stack_version_ru_support']
+
+slider_home_dir = format('{stack_dir}/current/slider-client')
 
 #hadoop params
 slider_bin_dir = "/usr/lib/slider/bin"
-if Script.is_stack_greater_or_equal("2.2"):
+
+if Script.is_stack_greater_or_equal(stack_version_ru_support):
     slider_bin_dir = format('{slider_home_dir}/bin')
 
 slider_conf_dir = format("{slider_home_dir}/conf")
-storm_slider_conf_dir = '/usr/hdp/current/storm-slider-client/conf'
+storm_slider_conf_dir = format('{stack_dir}/current/storm-slider-client/conf')
 
 slider_lib_dir = format('{slider_home_dir}/lib')
 slider_tar_gz = format('{slider_lib_dir}/slider.tar.gz')

http://git-wip-us.apache.org/repos/asf/ambari/blob/7d862f58/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/service_check.py
index a16d0e7..344f0f1 100644
--- a/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/service_check.py
@@ -37,8 +37,8 @@ class SliderServiceCheck(Script):
   def service_check(self, env):
     import params
     env.set_params(params)
-    
-    if Script.is_stack_greater_or_equal("2.2"):
+
+    if Script.is_stack_greater_or_equal(params.stack_version_ru_support):
       copy_to_hdfs("slider", params.user_group, params.hdfs_user, host_sys_prepped=params.host_sys_prepped)
     
     smokeuser_kinit_cmd = format(

http://git-wip-us.apache.org/repos/asf/ambari/blob/7d862f58/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/slider.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/slider.py b/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/slider.py
index f090583..46e5ab6 100644
--- a/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/slider.py
+++ b/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/slider.py
@@ -81,7 +81,7 @@ def slider():
     File(format("{params.slider_conf_dir}/log4j.properties"),
          mode=0644
     )
-  if Script.is_stack_greater_or_equal("2.2"):
+  if Script.is_stack_greater_or_equal(params.stack_version_ru_support):
     File(params.slider_tar_gz,
          owner=params.hdfs_user,
          group=params.user_group,

http://git-wip-us.apache.org/repos/asf/ambari/blob/7d862f58/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/slider_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/slider_client.py b/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/slider_client.py
index f584a12..3f995c8 100644
--- a/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/slider_client.py
+++ b/ambari-server/src/main/resources/common-services/SLIDER/0.60.0.2.2/package/scripts/slider_client.py
@@ -32,13 +32,14 @@ class SliderClient(Script):
 @OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
 class SliderClientLinux(SliderClient):
   def get_stack_to_component(self):
-    return {"HDP": "slider-client"}
+    import params
+    return {params.stack_name: "slider-client"}
 
   def pre_upgrade_restart(self, env,  upgrade_type=None):
     import params
     env.set_params(params)
 
-    if params.version and compare_versions(format_stack_version(params.version), '2.2.0.0') >= 0:
+    if params.version and compare_versions(format_stack_version(params.version), params.stack_version_ru_support) >= 0:
       conf_select.select(params.stack_name, "slider", params.version)
       stack_select.select("slider-client", params.version)
 


[34/51] [abbrv] ambari git commit: AMBARI-15344 Grafana: UI edits related to Admin username + password.(ababiichuk)

Posted by jl...@apache.org.
AMBARI-15344 Grafana: UI edits related to Admin username + password.(ababiichuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/087fcffa
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/087fcffa
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/087fcffa

Branch: refs/heads/AMBARI-13364
Commit: 087fcffa91743356a172c6c652ec961826928394
Parents: 354d079
Author: ababiichuk <ab...@hortonworks.com>
Authored: Wed Mar 9 11:50:20 2016 +0200
Committer: ababiichuk <ab...@hortonworks.com>
Committed: Wed Mar 9 18:38:48 2016 +0200

----------------------------------------------------------------------
 .../AMBARI_METRICS/0.1.0/configuration/ams-grafana-env.xml       | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/087fcffa/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-grafana-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-grafana-env.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-grafana-env.xml
index f400da2..a863214 100644
--- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-grafana-env.xml
+++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/configuration/ams-grafana-env.xml
@@ -40,7 +40,7 @@
   <property>
     <name>metrics_grafana_username</name>
     <value>admin</value>
-    <display-name>Metrics Grafana Admin Username</display-name>
+    <display-name>Grafana Admin Username</display-name>
     <description>
       Metrics Grafana Username. This value cannot be modified by Ambari
       except on initial install. Please make sure the username change in
@@ -55,7 +55,7 @@
     <name>metrics_grafana_password</name>
     <value></value>
     <property-type>PASSWORD</property-type>
-    <display-name>Metrics Grafana Admin Password</display-name>
+    <display-name>Grafana Admin Password</display-name>
     <description>
       Metrics Grafana password. This value cannot be modified by Ambari
       except on initial install. Please make sure the password change in


[36/51] [abbrv] ambari git commit: AMBARI-15339: Align PXF Alert description with other services (jaoki via bhuvnesh2703)

Posted by jl...@apache.org.
AMBARI-15339: Align PXF Alert description with other services (jaoki via bhuvnesh2703)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/f5bd0585
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/f5bd0585
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/f5bd0585

Branch: refs/heads/AMBARI-13364
Commit: f5bd0585bfcd6e22a99e60a1304501d938a4361b
Parents: 20de17a
Author: Bhuvnesh Chaudhary <bc...@pivotal.io>
Authored: Wed Mar 9 09:12:27 2016 -0800
Committer: Bhuvnesh Chaudhary <bc...@pivotal.io>
Committed: Wed Mar 9 09:12:27 2016 -0800

----------------------------------------------------------------------
 .../src/main/resources/common-services/PXF/3.0.0/alerts.json     | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/f5bd0585/ambari-server/src/main/resources/common-services/PXF/3.0.0/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/PXF/3.0.0/alerts.json b/ambari-server/src/main/resources/common-services/PXF/3.0.0/alerts.json
index d32fc97..015b13e 100644
--- a/ambari-server/src/main/resources/common-services/PXF/3.0.0/alerts.json
+++ b/ambari-server/src/main/resources/common-services/PXF/3.0.0/alerts.json
@@ -3,8 +3,8 @@
     "PXF": [
       {
         "name": "pxf_process",
-        "label": "This alert is triggered when the PXF is not functional.",
-        "description": "This alert is triggered when the PXF is not functional.",
+        "label": "PXF Process",
+        "description": "This host-level alert is triggered if the PXF process cannot be established to be up and listening on the network.",
         "interval": 1,
         "scope": "ANY",
         "enabled": true,


[02/51] [abbrv] ambari git commit: AMBARI-15305. Move Hive Server Interactive related files from HDP 2.4 stack to HDP 2.6 (Swapan Shridhar via alejandro)

Posted by jl...@apache.org.
AMBARI-15305. Move Hive Server Interactive related files from HDP 2.4 stack to HDP 2.6 (Swapan Shridhar via alejandro)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/037d9338
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/037d9338
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/037d9338

Branch: refs/heads/AMBARI-13364
Commit: 037d93385d8872587c568b5793b3723ee4a7050f
Parents: 7cc897b
Author: Alejandro Fernandez <af...@hortonworks.com>
Authored: Mon Mar 7 12:54:40 2016 -0800
Committer: Alejandro Fernandez <af...@hortonworks.com>
Committed: Mon Mar 7 12:54:40 2016 -0800

----------------------------------------------------------------------
 .../configuration/hive-interactive-site.xml     | 2053 ------------------
 .../HIVE/configuration/llap-daemon-log4j.xml    |  126 --
 .../services/HIVE/configuration/llap-env.xml    |   72 -
 .../stacks/HDP/2.4/services/HIVE/metainfo.xml   |   93 +-
 .../HDP/2.4/services/HIVE/themes/theme.json     |   76 -
 .../configuration/hive-interactive-site.xml     | 2053 ++++++++++++++++++
 .../HIVE/configuration/llap-daemon-log4j.xml    |  126 ++
 .../services/HIVE/configuration/llap-env.xml    |   72 +
 .../stacks/HDP/2.6/services/HIVE/metainfo.xml   |   93 +-
 .../HDP/2.6/services/HIVE/themes/theme.json     |   76 +
 10 files changed, 2420 insertions(+), 2420 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/037d9338/ambari-server/src/main/resources/stacks/HDP/2.4/services/HIVE/configuration/hive-interactive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/services/HIVE/configuration/hive-interactive-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/services/HIVE/configuration/hive-interactive-site.xml
deleted file mode 100644
index e77b379..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/services/HIVE/configuration/hive-interactive-site.xml
+++ /dev/null
@@ -1,2053 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<configuration supports_final="true">
-
-  <property>
-    <name>hive.cbo.enable</name>
-    <value>true</value>
-    <description>Flag to control enabling Cost Based Optimizations using Calcite framework.</description>
-    <display-name>Enable Cost Based Optimizer</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>true</value>
-          <label>On</label>
-        </entry>
-        <entry>
-          <value>false</value>
-          <label>Off</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-  </property>
-
-  <property>
-    <name>hive.zookeeper.quorum</name>
-    <value>localhost:2181</value>
-    <description>List of ZooKeeper servers to talk to. This is needed for: 1.
-      Read/write locks - when hive.lock.manager is set to
-      org.apache.hadoop.hive.ql.lockmgr.zookeeper.ZooKeeperHiveLockManager,
-      2. When HiveServer2 supports service discovery via Zookeeper.</description>
-    <value-attributes>
-      <type>multiLine</type>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-  </property>
-
-  <property>
-    <name>hive.metastore.connect.retries</name>
-    <value>24</value>
-    <description>Number of retries while opening a connection to metastore</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.failure.retries</name>
-    <value>24</value>
-    <description>Number of retries upon failure of Thrift metastore calls</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.client.connect.retry.delay</name>
-    <value>5s</value>
-    <description>
-      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
-      Number of seconds for the client to wait between consecutive connection attempts
-    </description>
-  </property>
-
- <property>
-    <name>hive.heapsize</name>
-    <value>1024</value>
-    <display-name>HiveServer2 heap size</display-name>
-    <deleted>true</deleted>
-    <description>Hive Java heap size</description>
-    <value-attributes>
-      <type>int</type>
-    </value-attributes>
-  </property>
-
-  <property>
-    <name>ambari.hive.db.schema.name</name>
-    <value>hive</value>
-    <display-name>Database Name</display-name>
-    <description>Database name used as the Hive Metastore</description>
-    <value-attributes>
-      <type>database</type>
-      <overridable>false</overridable>
-    </value-attributes>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionURL</name>
-    <value>jdbc:mysql://localhost/hive?createDatabaseIfNotExist=true</value>
-    <display-name>Database URL</display-name>
-    <description>JDBC connect string for a JDBC metastore</description>
-    <value-attributes>
-      <overridable>false</overridable>
-    </value-attributes>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionDriverName</name>
-    <value>com.mysql.jdbc.Driver</value>
-    <display-name>JDBC Driver Class</display-name>
-    <description>Driver class name for a JDBC metastore</description>
-    <value-attributes>
-      <overridable>false</overridable>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <name>hive_database</name>
-        <type>hive-env</type>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionUserName</name>
-    <value>hive</value>
-    <display-name>Database Username</display-name>
-    <description>username to use against metastore database</description>
-    <value-attributes>
-      <type>db_user</type>
-      <overridable>false</overridable>
-    </value-attributes>
-  </property>
-
-  <property require-input="true">
-    <name>javax.jdo.option.ConnectionPassword</name>
-    <value></value>
-    <property-type>PASSWORD</property-type>
-    <display-name>Database Password</display-name>
-    <description>password to use against metastore database</description>
-    <value-attributes>
-      <type>password</type>
-      <overridable>false</overridable>
-    </value-attributes>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionURL</name>
-    <value>jdbc:mysql://localhost/hive?createDatabaseIfNotExist=true</value>
-    <display-name>Database URL</display-name>
-    <description>JDBC connect string for a JDBC metastore</description>
-    <value-attributes>
-      <overridable>false</overridable>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <name>hive_database</name>
-        <type>hive-env</type>
-      </property>
-      <property>
-        <name>ambari.hive.db.schema.name</name>
-        <type>hive-site</type>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>hive.metastore.server.max.threads</name>
-    <value>100000</value>
-    <description>Maximum number of worker threads in the Thrift server's pool.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.warehouse.dir</name>
-    <value>/apps/hive/warehouse</value>
-    <description>location of default database for the warehouse</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.sasl.enabled</name>
-    <value>false</value>
-    <description>If true, the metastore thrift interface will be secured with SASL.
-      Clients must authenticate with Kerberos.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.kerberos.keytab.file</name>
-    <value>/etc/security/keytabs/hive.service.keytab</value>
-    <description>The path to the Kerberos Keytab file containing the metastore Thrift server's service principal.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.kerberos.principal</name>
-    <value>hive/_HOST@EXAMPLE.COM</value>
-    <description>
-      The service principal for the metastore Thrift server.
-      The special string _HOST will be replaced automatically with the correct host name.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.cluster.delegation.token.store.zookeeper.znode</name>
-    <value>/hive/cluster/delegation</value>
-    <description>The root path for token store data.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.cache.pinobjtypes</name>
-    <value>Table,Database,Type,FieldSchema,Order</value>
-    <description>List of comma separated metastore object types that should be pinned in the cache</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.uris</name>
-    <value>thrift://localhost:9083</value>
-    <description>Thrift URI for the remote metastore. Used by metastore client to connect to remote metastore.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.pre.event.listeners</name>
-    <value>org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener</value>
-    <description>List of comma separated listeners for metastore events.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.authorization.storage.checks</name>
-    <value>false</value>
-    <description>
-      Should the metastore do authorization checks against the underlying storage (usually hdfs)
-      for operations like drop-partition (disallow the drop-partition if the user in
-      question doesn't have permissions to delete the corresponding directory
-      on the storage).
-    </description>
-  </property>
-
-  <property>
-    <name>datanucleus.autoCreateSchema</name>
-    <value>false</value>
-  </property>
-
-  <property>
-    <name>datanucleus.fixedDatastore</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.metastore.client.socket.timeout</name>
-    <value>1800s</value>
-    <description>
-      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
-      MetaStore Client socket timeout in seconds
-    </description>
-  </property>
-
-  <property>
-    <name>hive.metastore.execute.setugi</name>
-    <value>true</value>
-    <description>In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.</description>
-  </property>
-
-  <property>
-    <name>hive.security.authorization.enabled</name>
-    <value>false</value>
-    <description>enable or disable the Hive client authorization</description>
-    <display-name>Enable Authorization</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>true</value>
-          <label>True</label>
-        </entry>
-        <entry>
-          <value>false</value>
-          <label>False</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>hive-env</type>
-        <name>hive_security_authorization</name>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>hive.security.authorization.manager</name>
-    <value>org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory</value>
-    <description>
-      The Hive client authorization manager class name. The user defined authorization class should implement
-      interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.
-    </description>
-    <depends-on>
-      <property>
-        <type>hive-env</type>
-        <name>hive_security_authorization</name>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>hive.cluster.delegation.token.store.class</name>
-    <value>org.apache.hadoop.hive.thrift.ZooKeeperTokenStore</value>
-    <description>The delegation token store implementation.
-      Set to org.apache.hadoop.hive.thrift.ZooKeeperTokenStore for load-balanced cluster.</description>
-  </property>
-
-  <property>
-    <name>hive.cluster.delegation.token.store.zookeeper.connectString</name>
-    <value>localhost:2181</value>
-    <description>The ZooKeeper token store connect string.</description>
-  </property>
-
-  <property>
-    <name>hive.security.metastore.authorization.auth.reads</name>
-    <value>true</value>
-    <description>If this is true, metastore authorizer authorizes read actions on database, table</description>
-  </property>
-
-  <property>
-    <name>hive.server2.logging.operation.log.location</name>
-    <value>/tmp/hive/operation_logs</value>
-    <description>Top level directory where operation logs are stored if logging functionality is enabled</description>
-  </property>
-
-  <property>
-    <name>hive.server2.logging.operation.enabled</name>
-    <value>true</value>
-    <description>When true, HS2 will save operation logs</description>
-  </property>
-
-  <property>
-    <name>hive.security.metastore.authenticator.manager</name>
-    <value>org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator</value>
-    <description>
-      authenticator manager class name to be used in the metastore for authentication.
-      The user defined authenticator should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.security.metastore.authorization.manager</name>
-    <display-name>Hive Authorization Manager</display-name>
-    <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
-    <description>
-      authorization manager class name to be used in the metastore for authorization.
-      The user defined authorization class should implement interface
-      org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider.
-    </description>
-    <depends-on>
-      <property>
-        <type>hive-env</type>
-        <name>hive_security_authorization</name>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>hive.security.authenticator.manager</name>
-    <value>org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator</value>
-    <description>
-      hive client authenticator manager class name. The user defined authenticator should implement
-      interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.
-    </description>
-    <depends-on>
-      <property>
-        <type>hive-env</type>
-        <name>hive_security_authorization</name>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>hive.server2.enable.doAs</name>
-    <value>true</value>
-    <description>
-      Setting this property to true will have HiveServer2 execute
-      Hive operations as the user making the calls to it.
-    </description>
-    <display-name>Run as end user instead of Hive user</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>true</value>
-          <label>True</label>
-        </entry>
-        <entry>
-          <value>false</value>
-          <label>False</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>hive-env</type>
-        <name>hive_security_authorization</name>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>hive.user.install.directory</name>
-    <value>/user/</value>
-    <description>
-      If hive (in tez mode only) cannot find a usable hive jar in "hive.jar.directory",
-      it will upload the hive jar to "hive.user.install.directory/user.name"
-      and use it to run queries.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.conf.restricted.list</name>
-    <value>hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role</value>
-    <description>Comma separated list of configuration options which are immutable at runtime</description>
-  </property>
-
-  <property>
-    <name>hive.server2.use.SSL</name>
-    <value>false</value>
-    <description/>
-    <display-name>Use SSL</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>true</value>
-          <label>True</label>
-        </entry>
-        <entry>
-          <value>false</value>
-          <label>False</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-  </property>
-
-  <property>
-    <name>hive.server2.table.type.mapping</name>
-    <value>CLASSIC</value>
-    <description>
-      Expects one of [classic, hive].
-      This setting reflects how HiveServer2 will report the table types for JDBC and other
-      client implementations that retrieve the available tables and supported table types
-      HIVE : Exposes Hive's native table types like MANAGED_TABLE, EXTERNAL_TABLE, VIRTUAL_VIEW
-      CLASSIC : More generic types like TABLE and VIEW
-    </description>
-  </property>
-
-  <property>
-    <name>hive.server2.enable.impersonation</name>
-    <value>true</value>
-    <deleted>true</deleted>
-    <description>Enable user impersonation for HiveServer2</description>
-  </property>
-
-  <property>
-    <name>fs.hdfs.impl.disable.cache</name>
-    <value>true</value>
-    <description>Disable HDFS filesystem cache.</description>
-  </property>
-
-  <property>
-    <name>fs.file.impl.disable.cache</name>
-    <value>true</value>
-    <description>Disable local filesystem cache.</description>
-  </property>
-
-  <property>
-    <name>hive.exec.scratchdir</name>
-    <value>/tmp/hive</value>
-    <description>HDFS root scratch dir for Hive jobs which gets created with write all (733) permission. For each connecting user, an HDFS scratch dir: ${hive.exec.scratchdir}/&lt;username&gt; is created, with ${hive.scratch.dir.permission}.</description>
-  </property>
-
-  <property>
-    <name>hive.exec.submit.local.task.via.child</name>
-    <value>true</value>
-    <description>
-      Determines whether local tasks (typically mapjoin hashtable generation phase) runs in
-      separate JVM (true recommended) or not.
-      Avoids the overhead of spawning new JVM, but can lead to out-of-memory issues.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.exec.compress.intermediate</name>
-    <value>false</value>
-    <description>
-      This controls whether intermediate files produced by Hive between multiple map-reduce jobs are compressed.
-      The compression codec and other options are determined from Hadoop config variables mapred.output.compress*
-    </description>
-  </property>
-
-  <property>
-    <name>hive.exec.reducers.bytes.per.reducer</name>
-    <value>67108864</value>
-    <description>Defines the size per reducer. For example, if it is set to 64M, given 256M input size, 4 reducers will be used.</description>
-    <display-name>Data per Reducer</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>64</minimum>
-      <maximum>4294967296</maximum>
-      <unit>B</unit>
-      <step-increment></step-increment>
-    </value-attributes>
-  </property>
-
-  <property>
-    <name>hive.exec.reducers.max</name>
-    <value>1009</value>
-    <description>
-      max number of reducers will be used. If the one specified in the configuration parameter mapred.reduce.tasks is
-      negative, Hive will use this one as the max number of reducers when automatically determine number of reducers.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.exec.compress.output</name>
-    <value>false</value>
-    <description>
-      This controls whether the final outputs of a query (to a local/HDFS file or a Hive table) is compressed.
-      The compression codec and other options are determined from Hadoop config variables mapred.output.compress*
-    </description>
-  </property>
-
-  <property>
-    <name>hive.exec.submitviachild</name>
-    <value>false</value>
-    <description/>
-  </property>
-
-  <property>
-    <name>hive.enforce.bucketing</name>
-    <value>true</value>
-    <description>Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced.</description>
-    <display-name>Enforce bucketing</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>true</value>
-          <label>True</label>
-        </entry>
-        <entry>
-          <value>false</value>
-          <label>False</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>hive-env</type>
-        <name>hive_txn_acid</name>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>hive.enforce.sorting</name>
-    <value>true</value>
-    <description>Whether sorting is enforced. If true, while inserting into the table, sorting is enforced.</description>
-  </property>
-
-  <property>
-    <name>hive.enforce.sortmergebucketmapjoin</name>
-    <value>true</value>
-    <description>If the user asked for sort-merge bucketed map-side join, and it cannot be performed, should the query fail or not ?</description>
-  </property>
-
-  <property>
-    <name>hive.map.aggr</name>
-    <value>true</value>
-    <description>Whether to use map-side aggregation in Hive Group By queries</description>
-  </property>
-
-  <property>
-    <name>hive.mapjoin.optimized.hashtable</name>
-    <value>true</value>
-    <description>
-      Whether Hive should use memory-optimized hash table for MapJoin. Only works on Tez,
-      because memory-optimized hashtable cannot be serialized.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.smbjoin.cache.rows</name>
-    <value>10000</value>
-    <description>How many rows with the same key value should be cached in memory per smb joined table.</description>
-  </property>
-
-  <property>
-    <name>hive.map.aggr.hash.percentmemory</name>
-    <value>0.5</value>
-    <description>Portion of total memory to be used by map-side group aggregation hash table</description>
-  </property>
-
-  <property>
-    <name>hive.map.aggr.hash.force.flush.memory.threshold</name>
-    <value>0.9</value>
-    <description>
-      The max memory to be used by map-side group aggregation hash table.
-      If the memory usage is higher than this number, force to flush data
-    </description>
-  </property>
-
-  <property>
-    <name>hive.map.aggr.hash.min.reduction</name>
-    <value>0.5</value>
-    <description>
-      Hash aggregation will be turned off if the ratio between hash  table size and input rows is bigger than this number.
-      Set to 1 to make sure hash aggregation is never turned off.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.merge.mapfiles</name>
-    <value>true</value>
-    <description>Merge small files at the end of a map-only job</description>
-  </property>
-
-  <property>
-    <name>hive.merge.mapredfiles</name>
-    <value>false</value>
-    <description>Merge small files at the end of a map-reduce job</description>
-  </property>
-
-  <property>
-    <name>hive.merge.tezfiles</name>
-    <value>false</value>
-    <description>Merge small files at the end of a Tez DAG</description>
-  </property>
-
-  <property>
-    <name>hive.merge.size.per.task</name>
-    <value>256000000</value>
-    <description>Size of merged files at the end of the job</description>
-  </property>
-
-  <property>
-    <name>hive.merge.smallfiles.avgsize</name>
-    <value>16000000</value>
-    <description>
-      When the average output file size of a job is less than this number, Hive will start an additional
-      map-reduce job to merge the output files into bigger files. This is only done for map-only jobs
-      if hive.merge.mapfiles is true, and for map-reduce jobs if hive.merge.mapredfiles is true.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.merge.rcfile.block.level</name>
-    <value>true</value>
-    <description/>
-  </property>
-
-  <property>
-    <name>hive.merge.orcfile.stripe.level</name>
-    <value>true</value>
-    <description>
-      When hive.merge.mapfiles or hive.merge.mapredfiles is enabled while writing a
-      table with ORC file format, enabling this config will do stripe level fast merge
-      for small ORC files. Note that enabling this config will not honor padding tolerance
-      config (hive.exec.orc.block.padding.tolerance).
-    </description>
-  </property>
-
-  <property>
-    <name>hive.exec.orc.default.stripe.size</name>
-    <value>67108864</value>
-    <description>Define the default ORC stripe size</description>
-    <display-name>Default ORC Stripe Size</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>8388608</minimum>
-      <maximum>268435456</maximum>
-      <unit>B</unit>
-      <increment-step>8388608</increment-step>
-    </value-attributes>
-  </property>
-
-  <property>
-    <name>hive.optimize.bucketmapjoin</name>
-    <value>true</value>
-    <description>Whether to try bucket mapjoin</description>
-  </property>
-
-  <property>
-    <name>hive.optimize.bucketmapjoin.sortedmerge</name>
-    <value>false</value>
-    <description> If the tables being joined are sorted and bucketized on the join columns, and they have the same number
-      of buckets, a sort-merge join can be performed by setting this parameter as true.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.mapred.reduce.tasks.speculative.execution</name>
-    <value>false</value>
-    <description>Whether speculative execution for reducers should be turned on. </description>
-  </property>
-
-  <property>
-    <name>hive.exec.dynamic.partition</name>
-    <value>true</value>
-    <description>Whether or not to allow dynamic partitions in DML/DDL.</description>
-  </property>
-
-  <property>
-    <name>hive.exec.dynamic.partition.mode</name>
-    <value>nonstrict</value>
-    <description>
-      In strict mode, the user must specify at least one static partition
-      in case the user accidentally overwrites all partitions.
-      NonStrict allows all partitions of a table to be dynamic.
-    </description>
-    <display-name>Allow all partitions to be Dynamic</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>nonstrict</value>
-          <label>On</label>
-        </entry>
-        <entry>
-          <value>strict</value>
-          <label>Off</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>hive-env</type>
-        <name>hive_txn_acid</name>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>hive.exec.max.dynamic.partitions.pernode</name>
-    <value>2000</value>
-    <description>Maximum number of dynamic partitions allowed to be created in each mapper/reducer node.</description>
-  </property>
-
-  <property>
-    <name>hive.exec.max.created.files</name>
-    <value>100000</value>
-    <description>Maximum number of HDFS files created by all mappers/reducers in a MapReduce job.</description>
-  </property>
-
-  <property>
-    <name>hive.exec.max.dynamic.partitions</name>
-    <value>5000</value>
-    <description>Maximum number of dynamic partitions allowed to be created in total.</description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join</name>
-    <value>true</value>
-    <description>Whether Hive enables the optimization about converting common join into mapjoin based on the input file size</description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.sortmerge.join</name>
-    <value>true</value>
-    <description>Will the join be automatically converted to a sort-merge join, if the joined tables pass the criteria for sort-merge join.</description>
-  </property>
-
-  <property>
-    <name>hive.optimize.constant.propagation</name>
-    <value>true</value>
-    <description>Whether to enable constant propagation optimizer</description>
-  </property>
-
-  <property>
-    <name>hive.optimize.metadataonly</name>
-    <value>true</value>
-    <description/>
-  </property>
-
-  <property>
-    <name>hive.optimize.null.scan</name>
-    <value>true</value>
-    <description>Dont scan relations which are guaranteed to not generate any rows</description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.sortmerge.join.to.mapjoin</name>
-    <value>false</value>
-    <description>
-      If hive.auto.convert.sortmerge.join is set to true, and a join was converted to a sort-merge join,
-      this parameter decides whether each table should be tried as a big table, and effectively a map-join should be
-      tried. That would create a conditional task with n+1 children for a n-way join (1 child for each table as the
-      big table), and the backup task will be the sort-merge join. In some cases, a map-join would be faster than a
-      sort-merge join, if there is no advantage of having the output bucketed and sorted. For example, if a very big sorted
-      and bucketed table with few files (say 10 files) are being joined with a very small sorter and bucketed table
-      with few files (10 files), the sort-merge join will only use 10 mappers, and a simple map-only join might be faster
-      if the complete small table can fit in memory, and a map-join can be performed.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.sortmerge.join.noconditionaltask</name>
-    <value>true</value>
-    <deleted>true</deleted>
-    <description>Required to Enable the conversion of an SMB (Sort-Merge-Bucket) to a map-join SMB.</description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join.noconditionaltask</name>
-    <value>true</value>
-    <description>
-      Whether Hive enables the optimization about converting common join into mapjoin based on the input file size.
-      If this parameter is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the
-      specified size, the join is directly converted to a mapjoin (there is no conditional task).
-    </description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join.noconditionaltask.size</name>
-    <value>52428800</value>
-    <description>If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. However, if it
-      is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly
-      converted to a mapjoin(there is no conditional task).
-    </description>
-    <display-name>For Map Join, per Map memory threshold</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>8192</minimum>
-      <maximum>17179869184</maximum>
-      <unit>B</unit>
-      <step-increment></step-increment>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>hive-site</type>
-        <name>hive.tez.container.size</name>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>hive.optimize.reducededuplication.min.reducer</name>
-    <value>4</value>
-    <description>
-      Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS.
-      That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.
-      The optimization will be automatically disabled if number of reducers would be less than specified value.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.sort.dynamic.partition</name>
-    <value>false</value>
-    <description>
-      When enabled dynamic partitioning column will be globally sorted.
-      This way we can keep only one record writer open for each partition value
-      in the reducer thereby reducing the memory pressure on reducers.
-    </description>
-    <display-name>Sort Partitions Dynamically</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>true</value>
-          <label>True</label>
-        </entry>
-        <entry>
-          <value>false</value>
-          <label>False</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-  </property>
-
-  <property>
-    <name>hive.optimize.mapjoin.mapreduce</name>
-    <value>true</value>
-    <deleted>true</deleted>
-    <description>If hive.auto.convert.join is off, this parameter does not take
-      affect. If it is on, and if there are map-join jobs followed by a map-reduce
-      job (for e.g a group by), each map-only job is merged with the following
-      map-reduce job.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.mapjoin.bucket.cache.size</name>
-    <value>10000</value>
-    <description>
-      Size per reducer.The default is 1G, i.e if the input size is 10G, it
-      will use 10 reducers.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.vectorized.execution.enabled</name>
-    <value>true</value>
-    <description>
-      This flag should be set to true to enable vectorized mode of query execution.
-      The default value is false.
-    </description>
-    <display-name>Enable Vectorization and Map Vectorization</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>true</value>
-          <label>True</label>
-        </entry>
-        <entry>
-          <value>false</value>
-          <label>False</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-  </property>
-
-  <property>
-    <name>hive.optimize.reducededuplication</name>
-    <value>true</value>
-    <description>
-      Remove extra map-reduce jobs if the data is already clustered by the same key which needs to be used again.
-      This should always be set to true. Since it is a new feature, it has been made configurable.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.index.filter</name>
-    <value>true</value>
-    <description>Whether to enable automatic use of indexes</description>
-    <display-name>Push Filters to Storage</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>true</value>
-          <label>True</label>
-        </entry>
-        <entry>
-          <value>false</value>
-          <label>False</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-  </property>
-
-  <property>
-    <name>hive.execution.engine</name>
-    <value>tez</value>
-    <description>
-      Expects one of [mr, tez].
-      Chooses execution engine. Options are: mr (Map reduce, default) or tez (hadoop 2 only)
-    </description>
-    <display-name>Execution Engine</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>mr</value>
-          <label>MapReduce</label>
-        </entry>
-        <entry>
-          <value>tez</value>
-          <label>TEZ</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-  </property>
-
-  <property>
-    <name>hive.exec.post.hooks</name>
-    <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
-    <description>
-      Comma-separated list of post-execution hooks to be invoked for each statement.
-      A post-execution hook is specified as the name of a Java class which implements the
-      org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.
-    </description>
-    <depends-on>
-      <property>
-        <type>hive-env</type>
-        <name>hive_timeline_logging_enabled</name>
-      </property>
-      <property>
-        <type>application-properties</type>
-        <name>atlas.server.http.port</name>
-      </property>
-      <property>
-        <type>application-properties</type>
-        <name>atlas.server.https.port</name>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>hive.exec.pre.hooks</name>
-    <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
-    <description>
-      Comma-separated list of pre-execution hooks to be invoked for each statement.
-      A pre-execution hook is specified as the name of a Java class which implements the
-      org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.
-    </description>
-    <depends-on>
-      <property>
-        <type>hive-env</type>
-        <name>hive_timeline_logging_enabled</name>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>hive.exec.failure.hooks</name>
-    <value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
-    <description>
-      Comma-separated list of on-failure hooks to be invoked for each statement.
-      An on-failure hook is specified as the name of Java class which implements the
-      org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.
-    </description>
-    <depends-on>
-      <property>
-        <type>hive-env</type>
-        <name>hive_timeline_logging_enabled</name>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>hive.exec.parallel</name>
-    <value>false</value>
-    <description>Whether to execute jobs in parallel</description>
-  </property>
-
-  <property>
-    <name>hive.exec.parallel.thread.number</name>
-    <value>8</value>
-    <description>How many jobs at most can be executed in parallel</description>
-  </property>
-
-  <property>
-    <name>hive.vectorized.groupby.maxentries</name>
-    <value>100000</value>
-    <description>
-      Max number of entries in the vector group by aggregation hashtables.
-      Exceeding this will trigger a flush irrelevant of memory pressure condition.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.tez.smb.number.waves</name>
-    <value>0.5</value>
-    <description>The number of waves in which to run the SMB join. Account for cluster being occupied. Ideally should be 1 wave.</description>
-  </property>
-
-  <property>
-    <name>hive.tez.dynamic.partition.pruning.max.data.size</name>
-    <value>104857600</value>
-    <description>Maximum total data size of events in dynamic pruning.</description>
-  </property>
-
-  <property>
-    <name>hive.tez.dynamic.partition.pruning.max.event.size</name>
-    <value>1048576</value>
-    <description>Maximum size of events sent by processors in dynamic pruning. If this size is crossed no pruning will take place.</description>
-  </property>
-
-  <property>
-    <name>hive.tez.dynamic.partition.pruning</name>
-    <value>true</value>
-    <description>When dynamic pruning is enabled, joins on partition keys will be processed by sending events from the processing vertices to the tez application master. These events will be used to prune unnecessary partitions.</description>
-    <display-name>Allow dynamic partition pruning</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>true</value>
-          <label>True</label>
-        </entry>
-        <entry>
-          <value>false</value>
-          <label>False</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-  </property>
-
-  <property>
-    <name>hive.tez.min.partition.factor</name>
-    <value>0.25</value>
-    <description>
-      When auto reducer parallelism is enabled this factor will be used to put a lower limit to the number
-      of reducers that tez specifies.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.tez.max.partition.factor</name>
-    <value>2.0</value>
-    <description>When auto reducer parallelism is enabled this factor will be used to over-partition data in shuffle edges.</description>
-  </property>
-
-  <property>
-    <name>hive.tez.auto.reducer.parallelism</name>
-    <value>false</value>
-    <description>
-      Turn on Tez' auto reducer parallelism feature. When enabled, Hive will still estimate data sizes
-      and set parallelism estimates. Tez will sample source vertices' output sizes and adjust the estimates at runtime as
-      necessary.
-    </description>
-    <display-name>Allow dynamic numbers of reducers</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>true</value>
-          <label>True</label>
-        </entry>
-        <entry>
-          <value>false</value>
-          <label>False</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-  </property>
-
-  <property>
-    <name>hive.convert.join.bucket.mapjoin.tez</name>
-    <value>false</value>
-    <description>
-      Whether joins can be automatically converted to bucket map joins in hive
-      when tez is used as the execution engine.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.prewarm.numcontainers</name>
-    <value>3</value>
-    <description>Controls the number of containers to prewarm for Tez (Hadoop 2 only)</description>
-    <display-name>Number of Containers Held</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>1</minimum>
-      <maximum>20</maximum>
-      <increment-step>1</increment-step>
-    </value-attributes>
-  </property>
-
-  <property>
-    <name>hive.prewarm.enabled</name>
-    <value>false</value>
-    <description>Enables container prewarm for Tez (Hadoop 2 only)</description>
-    <display-name>Hold Containers to Reduce Latency</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>true</value>
-          <label>True</label>
-        </entry>
-        <entry>
-          <value>false</value>
-          <label>False</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-  </property>
-
-  <property>
-    <name>hive.vectorized.groupby.checkinterval</name>
-    <value>4096</value>
-    <description>Number of entries added to the group by aggregation hash before a recomputation of average entry size is performed.</description>
-  </property>
-
-  <property>
-    <name>hive.vectorized.groupby.flush.percent</name>
-    <value>0.1</value>
-    <description>Percent of entries in the group by aggregation hash flushed when the memory threshold is exceeded.</description>
-  </property>
-
-  <property>
-    <name>hive.stats.autogather</name>
-    <value>true</value>
-    <description>A flag to gather statistics automatically during the INSERT OVERWRITE command.</description>
-  </property>
-
-  <property>
-    <name>hive.stats.dbclass</name>
-    <value>fs</value>
-    <description>
-      Expects one of the pattern in [jdbc(:.*), hbase, counter, custom, fs].
-      The storage that stores temporary Hive statistics. Currently, jdbc, hbase, counter and custom type are supported.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.stats.fetch.partition.stats</name>
-    <value>true</value>
-    <description>
-      Annotation of operator tree with statistics information requires partition level basic
-      statistics like number of rows, data size and file size. Partition statistics are fetched from
-      metastore. Fetching partition statistics for each needed partition can be expensive when the
-      number of partitions is high. This flag can be used to disable fetching of partition statistics
-      from metastore. When this flag is disabled, Hive will make calls to filesystem to get file sizes
-      and will estimate the number of rows from row schema.
-    </description>
-    <display-name>Fetch partition stats at compiler</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>true</value>
-          <label>On</label>
-        </entry>
-        <entry>
-          <value>false</value>
-          <label>Off</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>hive-site</type>
-        <name>hive.cbo.enable</name>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>hive.zookeeper.client.port</name>
-    <value>2181</value>
-    <description>The port of ZooKeeper servers to talk to. If the list of Zookeeper servers specified in hive.zookeeper.quorum,does not contain port numbers, this value is used.</description>
-  </property>
-
-  <property>
-    <name>hive.zookeeper.namespace</name>
-    <value>hive_zookeeper_namespace</value>
-    <description>The parent node under which all ZooKeeper nodes are created.</description>
-  </property>
-
-  <property>
-    <name>hive.stats.fetch.column.stats</name>
-    <value>false</value>
-    <description>
-      Annotation of operator tree with statistics information requires column statistics.
-      Column statistics are fetched from metastore. Fetching column statistics for each needed column
-      can be expensive when the number of columns is high. This flag can be used to disable fetching
-      of column statistics from metastore.
-    </description>
-    <display-name>Fetch column stats at compiler</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>true</value>
-          <label>On</label>
-        </entry>
-        <entry>
-          <value>false</value>
-          <label>Off</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <depends-on>
-       <property>
-        <type>hive-site</type>
-        <name>hive.cbo.enable</name>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>hive.tez.container.size</name>
-    <value>682</value>
-    <description>By default, Tez uses the java options from map tasks. Use this property to override that value.</description>
-    <display-name>Tez Container Size</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>682</minimum>
-      <maximum>6820</maximum>
-      <unit>MB</unit>
-      <increment-step>682</increment-step>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>yarn-site</type>
-        <name>yarn.scheduler.minimum-allocation-mb</name>
-      </property>
-      <property>
-        <type>yarn-site</type>
-        <name>yarn.scheduler.maximum-allocation-mb</name>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>hive.tez.input.format</name>
-    <value>org.apache.hadoop.hive.ql.io.HiveInputFormat</value>
-    <description>The default input format for Tez. Tez groups splits in the Application Master.</description>
-  </property>
-
-  <property>
-    <name>hive.tez.java.opts</name>
-    <value>-server -Xmx545m -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC -XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps</value>
-    <description>Java command line options for Tez. The -Xmx parameter value is generally 80% of hive.tez.container.size.</description>
-  </property>
-
-  <property>
-    <name>hive.compute.query.using.stats</name>
-    <value>true</value>
-    <description>
-      When set to true Hive will answer a few queries like count(1) purely using stats
-      stored in metastore. For basic stats collection turn on the config hive.stats.autogather to true.
-      For more advanced stats collection need to run analyze table queries.
-    </description>
-    <display-name>Compute simple queries using stats only</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>true</value>
-          <label>True</label>
-        </entry>
-        <entry>
-          <value>false</value>
-          <label>False</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>hive-site</type>
-        <name>hive.cbo.enable</name>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>hive.exec.orc.default.compress</name>
-    <value>ZLIB</value>
-    <description>Define the default compression codec for ORC file</description>
-    <display-name>ORC Compression Algorithm</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>ZLIB</value>
-          <label>zlib Compression Library</label>
-        </entry>
-        <entry>
-          <value>SNAPPY</value>
-          <label>Snappy Compression Library</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-  </property>
-
-  <property>
-    <name>hive.orc.splits.include.file.footer</name>
-    <value>false</value>
-    <description>
-      If turned on splits generated by orc will include metadata about the stripes in the file. This
-      data is read remotely (from the client or HS2 machine) and sent to all the tasks.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.orc.compute.splits.num.threads</name>
-    <value>10</value>
-    <description>How many threads orc should use to create splits in parallel.</description>
-  </property>
-
-  <property>
-    <name>hive.limit.optimize.enable</name>
-    <value>true</value>
-    <description>Whether to enable to optimization to trying a smaller subset of data for simple LIMIT first.</description>
-  </property>
-
-  <property>
-    <name>hive.tez.cpu.vcores</name>
-    <value>-1</value>
-    <description>By default Tez will ask for however many cpus map-reduce is configured to use per container. This can be used to overwrite.</description>
-  </property>
-
-  <property>
-    <name>hive.tez.log.level</name>
-    <value>INFO</value>
-    <description>
-      The log level to use for tasks executing as part of the DAG.
-      Used only if hive.tez.java.opts is used to configure Java options.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.limit.pushdown.memory.usage</name>
-    <value>0.04</value>
-    <description>The max memory to be used for hash in RS operator for top K selection.</description>
-  </property>
-
-  <property>
-    <name>hive.exec.orc.encoding.strategy</name>
-    <value>SPEED</value>
-    <description>
-      Define the encoding strategy to use while writing data. Changing this
-      will only affect the light weight encoding for integers. This flag will not change
-      the compression level of higher level compression codec (like ZLIB). Possible
-      options are SPEED and COMPRESSION.
-    </description>
-    <display-name>ORC Encoding Strategy</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>SPEED</value>
-          <label>Speed</label>
-        </entry>
-        <entry>
-          <value>COMPRESSION</value>
-          <label>Compression</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>hive-env</type>
-        <name>hive_exec_orc_storage_strategy</name>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>hive.exec.orc.compression.strategy</name>
-    <value>SPEED</value>
-    <description>
-      Define the compression strategy to use while writing data. This changes the
-      compression level of higher level compression codec (like ZLIB).
-    </description>
-    <display-name>ORC Compression Strategy</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>SPEED</value>
-          <label>Speed</label>
-        </entry>
-        <entry>
-          <value>COMPRESSION</value>
-          <label>Compression</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>hive-env</type>
-        <name>hive_exec_orc_storage_strategy</name>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>hive.vectorized.execution.reduce.enabled</name>
-    <value>false</value>
-    <description>
-      This flag should be set to true to enable vectorized mode of the reduce-side of
-      query execution.
-    </description>
-    <display-name>Enable Reduce Vectorization</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>true</value>
-          <label>True</label>
-        </entry>
-        <entry>
-          <value>false</value>
-          <label>False</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-  </property>
-
-  <property>
-    <name>hive.server2.authentication.ldap.baseDN</name>
-    <property-type>DONT_ADD_ON_UPGRADE</property-type>
-    <depends-on>
-      <property>
-        <type>hive-site</type>
-        <name>hive.server2.authentication</name>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>hive.server2.authentication.kerberos.principal</name>
-    <value>hive/_HOST@EXAMPLE.COM</value>
-    <property-type>DONT_ADD_ON_UPGRADE</property-type>
-    <depends-on>
-      <property>
-        <type>hive-site</type>
-        <name>hive.server2.authentication</name>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>hive.server2.custom.authentication.class</name>
-    <property-type>DONT_ADD_ON_UPGRADE</property-type>
-    <depends-on>
-      <property>
-        <type>hive-site</type>
-        <name>hive.server2.authentication</name>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>hive.server2.authentication.kerberos.keytab</name>
-    <value>/etc/security/keytabs/hive.service.keytab</value>
-    <property-type>DONT_ADD_ON_UPGRADE</property-type>
-    <depends-on>
-      <property>
-        <type>hive-site</type>
-        <name>hive.server2.authentication</name>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>hive.server2.authentication.ldap.url</name>
-    <value> </value>
-    <property-type>DONT_ADD_ON_UPGRADE</property-type>
-    <depends-on>
-      <property>
-        <type>hive-site</type>
-        <name>hive.server2.authentication</name>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>hive.server2.tez.default.queues</name>
-    <display-name>Default query queues</display-name>
-    <value>default</value>
-    <description>
-      A list of comma separated values corresponding to YARN queues of the same name.
-      When HiveServer2 is launched in Tez mode, this configuration needs to be set
-      for multiple Tez sessions to run in parallel on the cluster.
-    </description>
-    <value-attributes>
-      <type>combo</type>
-      <entries>
-        <entry>
-          <value>default</value>
-          <label>Default</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1+</selection-cardinality>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>capacity-scheduler</type>
-        <name>yarn.scheduler.capacity.root.queues</name>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>hive.server2.tez.sessions.per.default.queue</name>
-    <value>1</value>
-    <description>
-      A positive integer that determines the number of Tez sessions that should be
-      launched on each of the queues specified by "hive.server2.tez.default.queues".
-      Determines the parallelism on each queue.
-    </description>
-    <display-name>Session per queue</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>1</minimum>
-      <maximum>10</maximum>
-      <increment-step>1</increment-step>
-    </value-attributes>
-  </property>
-
-  <property>
-    <name>hive.server2.tez.initialize.default.sessions</name>
-    <value>false</value>
-    <description>
-      This flag is used in HiveServer2 to enable a user to use HiveServer2 without
-      turning on Tez for HiveServer2. The user could potentially want to run queries
-      over Tez without the pool of sessions.
-    </description>
-    <display-name>Start Tez session at Initialization</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>true</value>
-          <label>True</label>
-        </entry>
-        <entry>
-          <value>false</value>
-          <label>False</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-  </property>
-
-  <property>
-    <name>hive.txn.manager</name>
-    <value>org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager</value>
-    <description/>
-    <display-name>Transaction Manager</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager</value>
-          <label>org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager (off)</label>
-        </entry>
-        <entry>
-          <value>org.apache.hadoop.hive.ql.lockmgr.DbTxnManager</value>
-          <label>org.apache.hadoop.hive.ql.lockmgr.DbTxnManager (on)</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>hive-env</type>
-        <name>hive_txn_acid</name>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>hive.txn.timeout</name>
-    <value>300</value>
-    <description>Time after which transactions are declared aborted if the client has not sent a heartbeat, in seconds.</description>
-  </property>
-
-  <property>
-    <name>hive.txn.max.open.batch</name>
-    <value>1000</value>
-    <description>
-      Maximum number of transactions that can be fetched in one call to open_txns().
-      Increasing this will decrease the number of delta files created when
-      streaming data into Hive.  But it will also increase the number of
-      open transactions at any given time, possibly impacting read performance.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.cli.print.header</name>
-    <value>false</value>
-    <description>
-      Whether to print the names of the columns in query output.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.support.concurrency</name>
-    <value>false</value>
-    <description>
-      Support concurrency and use locks, needed for Transactions. Requires Zookeeper.
-    </description>
-    <display-name>Use Locking</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>true</value>
-          <label>True</label>
-        </entry>
-        <entry>
-          <value>false</value>
-          <label>False</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>hive-env</type>
-        <name>hive_txn_acid</name>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>hive.compactor.initiator.on</name>
-    <value>false</value>
-    <description>Whether to run the compactor's initiator thread in this metastore instance or not. If there is more than one instance of the thrift metastore this should be set to true on only one instance. Setting true on only one host can be achieved by creating a config-group containing the metastore host, and overriding the default value to true in it.</description>
-    <display-name>Run Compactor</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>true</value>
-          <label>True</label>
-        </entry>
-        <entry>
-          <value>false</value>
-          <label>False</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>hive-env</type>
-        <name>hive_txn_acid</name>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>hive.compactor.worker.threads</name>
-    <value>0</value>
-    <description>Number of compactor worker threads to run on this metastore instance. Can be different values on different metastore instances.</description>
-    <display-name>Number of threads used by Compactor</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-      <maximum>20</maximum>
-      <increment-step>1</increment-step>
-    </value-attributes>
-    <depends-on>
-      <property>
-        <type>hive-env</type>
-        <name>hive_txn_acid</name>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>hive.compactor.worker.timeout</name>
-    <value>86400L</value>
-    <description>
-      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
-      Time before a given compaction in working state is declared a failure
-      and returned to the initiated state.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.compactor.check.interval</name>
-    <value>300L</value>
-    <description>
-      Expects a time value with unit (d/day, h/hour, m/min, s/sec, ms/msec, us/usec, ns/nsec), which is sec if not specified.
-      Time between checks to see if any partitions need compacted.
-      This should be kept high because each check for compaction requires many calls against the NameNode.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.fetch.task.conversion</name>
-    <value>more</value>
-    <description>
-      Expects one of [none, minimal, more].
-      Some select queries can be converted to single FETCH task minimizing latency.
-      Currently the query should be single sourced not having any subquery and should not have
-      any aggregations or distincts (which incurs RS), lateral views and joins.
-      0. none : disable hive.fetch.task.conversion
-      1. minimal : SELECT STAR, FILTER on partition columns, LIMIT only
-      2. more    : SELECT, FILTER, LIMIT only (support TABLESAMPLE and virtual columns)
-    </description>
-  </property>
-
-  <property>
-    <name>hive.fetch.task.aggr</name>
-    <value>false</value>
-    <description>
-      Aggregation queries with no group-by clause (for example, select count(*) from src) execute
-      final aggregations in single reduce task. If this is set true, Hive delegates final aggregation
-      stage to fetch task, possibly decreasing the query time.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.fetch.task.conversion.threshold</name>
-    <value>1073741824</value>
-    <description>
-      Input threshold for applying hive.fetch.task.conversion. If target table is native, input length
-      is calculated by summation of file lengths. If it's not native, storage handler for the table
-      can optionally implement org.apache.hadoop.hive.ql.metadata.InputEstimator interface.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.compactor.delta.num.threshold</name>
-    <value>10</value>
-    <description>Number of delta files that must exist in a directory before the compactor will attempt a minor compaction.</description>
-  </property>
-
-  <property>
-    <name>hive.compactor.delta.pct.threshold</name>
-    <value>0.1f</value>
-    <description>Percentage (by size) of base that deltas can be before major compaction is initiated.</description>
-  </property>
-
-  <property>
-    <name>hive.compactor.abortedtxn.threshold</name>
-    <value>1000</value>
-    <description>Number of aborted transactions involving a particular table or partition before major compaction is initiated.</description>
-  </property>
-
-  <property>
-    <name>datanucleus.cache.level2.type</name>
-    <value>none</value>
-    <description>Determines caching mechanism DataNucleus L2 cache will use. It is strongly recommended to use default value of 'none' as other values may cause consistency errors in Hive.</description>
-  </property>
-
-  <property>
-    <name>hive.server2.thrift.port</name>
-    <value>10500</value>
-    <display-name>HiveServer2 Port</display-name>
-    <description>
-      TCP port number to listen on, default 10000.
-    </description>
-    <value-attributes>
-      <overridable>false</overridable>
-      <type>int</type>
-    </value-attributes>
-  </property>
-
-  <property>
-    <name>hive.server2.allow.user.substitution</name>
-    <value>true</value>
-    <description>Allow alternate user to be specified as part of HiveServer2 open connection request.</description>
-  </property>
-
-  <property>
-    <name>hive.server2.thrift.max.worker.threads</name>
-    <value>500</value>
-    <description>Maximum number of Thrift worker threads</description>
-  </property>
-
-  <property>
-    <name>hive.server2.thrift.sasl.qop</name>
-    <value>auth</value>
-    <description>
-      Expects one of [auth, auth-int, auth-conf].
-      Sasl QOP value; Set it to one of following values to enable higher levels of
-      protection for HiveServer2 communication with clients.
-      "auth" - authentication only (default)
-      "auth-int" - authentication plus integrity protection
-      "auth-conf" - authentication plus integrity and confidentiality protection
-      This is applicable only if HiveServer2 is configured to use Kerberos authentication.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.server2.authentication.spnego.principal</name>
-    <value>/etc/security/keytabs/spnego.service.keytab</value>
-    <description>
-      SPNego service principal, optional,
-      typical value would look like HTTP/_HOST@EXAMPLE.COM
-      SPNego service principal would be used by HiveServer2 when Kerberos security is enabled
-      and HTTP transport mode is used.
-      This needs to be set only if SPNEGO is to be used in authentication.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.server2.authentication.spnego.keytab</name>
-    <value>HTTP/_HOST@EXAMPLE.COM</value>
-    <description>
-      keytab file for SPNego principal, optional,
-      typical value would look like /etc/security/keytabs/spnego.service.keytab,
-      This keytab would be used by HiveServer2 when Kerberos security is enabled and
-      HTTP transport mode is used.
-      This needs to be set only if SPNEGO is to be used in authentication.
-      SPNego authentication would be honored only if valid
-      hive.server2.authentication.spnego.principal
-      and
-      hive.server2.authentication.spnego.keytab
-      are specified.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.server2.authentication</name>
-    <description>Authentication mode, default NONE. Options are NONE, NOSASL, KERBEROS, LDAP, PAM and CUSTOM</description>
-    <value>NONE</value>
-    <display-name>HiveServer2 Authentication</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>NONE</value>
-          <label>None</label>
-        </entry>
-        <entry>
-          <value>LDAP</value>
-          <label>LDAP</label>
-        </entry>
-        <entry>
-          <value>KERBEROS</value>
-          <label>Kerberos</label>
-        </entry>
-        <entry>
-          <value>PAM</value>
-          <label>PAM</label>
-        </entry>
-        <entry>
-          <value>CUSTOM</value>
-          <label>Custom</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-  </property>
-
-  <property>
-    <name>hive.server2.support.dynamic.service.discovery</name>
-    <value>true</value>
-    <description>Whether HiveServer2 supports dynamic service discovery for its clients.
-      To support this, each instance of HiveServer2 currently uses ZooKeeper to register itself,
-      when it is brought up. JDBC/ODBC clients should use the ZooKeeper ensemble: hive.zookeeper.quorum
-      in their connection string.
-    </description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-  </property>
-
-  <property>
-    <name>hive.server2.zookeeper.namespace</name>
-    <value>hiveserver2</value>
-    <description>The parent node in ZooKeeper used by HiveServer2 when supporting dynamic service discovery.</description>
-  </property>
-
-  <property>
-    <name>hive.server2.thrift.http.port</name>
-    <value>10501</value>
-    <description>Port number of HiveServer2 Thrift interface when hive.server2.transport.mode is 'http'.</description>
-  </property>
-
-  <property>
-    <name>hive.server2.transport.mode</name>
-    <value>binary</value>
-    <description>
-      Expects one of [binary, http].
-      Transport mode of HiveServer2.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.default.fileformat</name>
-    <value>TextFile</value>
-    <description>Default file format for CREATE TABLE statement.</description>
-    <display-name>Default File Format</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>ORC</value>
-          <description>The Optimized Row Columnar (ORC) file format provides a highly efficient way to store Hive data. It was designed to overcome limitations of the other Hive file formats. Using ORC files improves performance when Hive is reading, writing, and processing data.</description>
-        </entry>
-        <entry>
-          <value>TextFile</value>
-          <description>Text file format saves Hive data as normal text.</description>
-        </entry>
-      </entries>
-    </value-attributes>
-  </property>
-
-  <property>
-    <name>atlas.cluster.name</name>
-    <value>primary</value>
-    <depends-on>
-      <property>
-        <type>application-properties</type>
-        <name>atlas.enableTLS</name>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>atlas.rest.address</name>
-    <value>http://localhost:21000</value>
-    <depends-on>
-      <property>
-        <type>application-properties</type>
-        <name>atlas.enableTLS</name>
-      </property>
-      <property>
-        <type>application-properties</type>
-        <name>atlas.server.http.port</name>
-      </property>
-      <property>
-        <type>application-properties</type>
-        <name>atlas.server.https.port</name>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>hive.default.fileformat.managed</name>
-    <value>TextFile</value>
-    <description>
-      Default file format for CREATE TABLE statement applied to managed tables only.
-      External tables will be created with default file format. Leaving this null
-      will result in using the default file format for all tables.
-    </description>
-  </property>
-
-  <property>
-    <name>datanucleus.rdbms.datastoreAdapterClassName</name>
-    <description>Datanucleus Class, This property used only when hive db is SQL Anywhere</description>
-    <depends-on>
-      <property>
-        <type>hive-env</type>
-        <name>hive_database</name>
-      </property>
-    </depends-on>
-  </property>
-
-  <property>
-    <name>atlas.hook.hive.maxThreads</name>
-    <value>1</value>
-    <description>
-      Maximum number of threads used by Atlas hook.
-    </description>
-  </property>
-
-  <property>
-    <name>atlas.hook.hive.minThreads</name>
-    <value>1</value>
-    <description>
-      Minimum number of threads maintained by Atlas hook.
-    </description>
-  </property>
-
-</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/037d9338/ambari-server/src/main/resources/stacks/HDP/2.4/services/HIVE/configuration/llap-daemon-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/services/HIVE/configuration/llap-daemon-log4j.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/services/HIVE/configuration/llap-daemon-log4j.xml
deleted file mode 100644
index 1c60285..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/services/HIVE/configuration/llap-daemon-log4j.xml
+++ /dev/null
@@ -1,126 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-  <name>content</name>
-  <description>Custom log4j.properties</description>
-  <value>
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-status = WARN
-name = LlapDaemonLog4j2
-packages = org.apache.hadoop.hive.ql.log
-
-# list of properties
-property.llap.daemon.log.level = WARN
-property.llap.daemon.root.logger = console
-property.llap.daemon.log.dir = .
-property.llap.daemon.log.file = llapdaemon.log
-property.llap.daemon.historylog.file = llapdaemon_history.log
-property.llap.daemon.log.maxfilesize = 256MB
-property.llap.daemon.log.maxbackupindex = 20
-
-# list of all appenders
-appenders = console, RFA, HISTORYAPPENDER
-
-# console appender
-appender.console.type = Console
-appender.console.name = console
-appender.console.target = SYSTEM_ERR
-appender.console.layout.type = PatternLayout
-appender.console.layout.pattern = %d{yy/MM/dd HH:mm:ss} [%t%x] %p %c{2} : %m%n
-
-# rolling file appender
-appender.RFA.type = RollingFile
-appender.RFA.name = RFA
-appender.RFA.fileName = ${sys:llap.daemon.log.dir}/${sys:llap.daemon.log.file}
-appender.RFA.filePattern = ${sys:llap.daemon.log.dir}/${sys:llap.daemon.log.file}_%i
-appender.RFA.layout.type = PatternLayout
-appender.RFA.layout.pattern = %d{ISO8601} %-5p [%t%x]: %c{2} (%F:%M(%L)) - %m%n
-appender.RFA.policies.type = Policies
-appender.RFA.policies.size.type = SizeBasedTriggeringPolicy
-appender.RFA.policies.size.size = ${sys:llap.daemon.log.maxfilesize}
-appender.RFA.strategy.type = DefaultRolloverStrategy
-appender.RFA.strategy.max = ${sys:llap.daemon.log.maxbackupindex}
-
-# history file appender
-appender.HISTORYAPPENDER.type = RollingFile
-appender.HISTORYAPPENDER.name = HISTORYAPPENDER
-appender.HISTORYAPPENDER.fileName = ${sys:llap.daemon.log.dir}/${sys:llap.daemon.historylog.file}
-appender.HISTORYAPPENDER.filePattern = ${sys:llap.daemon.log.dir}/${sys:llap.daemon.historylog.file}_%i
-appender.HISTORYAPPENDER.layout.type = PatternLayout
-appender.HISTORYAPPENDER.layout.pattern = %m%n
-appender.HISTORYAPPENDER.policies.type = Policies
-appender.HISTORYAPPENDER.policies.size.type = SizeBasedTriggeringPolicy
-appender.HISTORYAPPENDER.policies.size.size = ${sys:llap.daemon.log.maxfilesize}
-appender.HISTORYAPPENDER.strategy.type = DefaultRolloverStrategy
-appender.HISTORYAPPENDER.strategy.max = ${sys:llap.daemon.log.maxbackupindex}
-
-# list of all loggers
-loggers = NIOServerCnxn, ClientCnxnSocketNIO, DataNucleus, Datastore, JPOX, HistoryLogger
-
-logger.NIOServerCnxn.name = org.apache.zookeeper.server.NIOServerCnxn
-logger.NIOServerCnxn.level = WARN
-
-logger.ClientCnxnSocketNIO.name = org.apache.zookeeper.ClientCnxnSocketNIO
-logger.ClientCnxnSocketNIO.level = WARN
-
-logger.DataNucleus.name = DataNucleus
-logger.DataNucleus.level = ERROR
-
-logger.Datastore.name = Datastore
-logger.Datastore.level = ERROR
-
-logger.JPOX.name = JPOX
-logger.JPOX.level = ERROR
-
-logger.HistoryLogger.name = org.apache.hadoop.hive.llap.daemon.HistoryLogger
-logger.HistoryLogger.level = WARN
-logger.HistoryLogger.additivity = false
-logger.HistoryLogger.appenderRefs = HistoryAppender
-logger.HistoryLogger.appenderRef.HistoryAppender.ref = HISTORYAPPENDER
-
-# root logger
-rootLogger.level = ${sys:llap.daemon.log.level}
-rootLogger.appenderRefs = root
-rootLogger.appenderRef.root.ref = ${sys:llap.daemon.root.logger}
-  </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/037d9338/ambari-server/src/main/resources/stacks/HDP/2.4/services/HIVE/configuration/llap-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/services/HIVE/configuration/llap-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/services/HIVE/configuration/llap-env.xml
deleted file mode 100644
index 24a95cf..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/services/HIVE/configuration/llap-env.xml
+++ /dev/null
@@ -1,72 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-<configuration supports_final="false" supports_adding_forbidden="true">
-  <property>
-    <name>enable_hive_interactive</name>
-    <value>false</value>
-    <description>Enable or disable interactive query in this cluster.</description>
-    <display-name>Enable Interactive Query</display-name>
-    <value-attributes>
-      <type>value-list</type>
-      <entries>
-        <entry>
-          <value>true</value>
-          <label>Yes</label>
-        </entry>
-        <entry>
-          <value>false</value>
-          <label>No</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-  </property>
-  <property>
-    <name>llap_queue_name</name>
-    <value>default</value>
-    <description>LLAP Queue Name.</description>
-    <display-name>LLAP Queue Name</display-name>
-    <value-attributes>
-      <type>combo</type>
-      <entries>
-        <entry>
-          <value>default</value>
-          <label>Default</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-  </property>
-  <property>
-    <name>llap_am_queue_name</name>
-    <value>default</value>
-    <description>LLAP AM Queue Name.</description>
-    <display-name>LLAP AM Queue Name</display-name>
-    <value-attributes>
-      <type>combo</type>
-      <entries>
-        <entry>
-          <value>default</value>
-          <label>Default</label>
-        </entry>
-      </entries>
-      <selection-cardinality>1</selection-cardinality>
-    </value-attributes>
-  </property>
-</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/037d9338/ambari-server/src/main/resources/stacks/HDP/2.4/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.4/services/HIVE/metainfo.xml
index eb3cff6..85b669e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/services/HIVE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.4/services/HIVE/metainfo.xml
@@ -21,97 +21,6 @@
     <service>
       <name>HIVE</name>
       <version>1.2.1.2.4</version>
-        <components>
-          <component>
-            <name>HIVE_SERVER_INTERACTIVE</name>
-            <displayName>HiveServer2 Interactive</displayName>
-            <category>MASTER</category>
-            <cardinality>0-1</cardinality>
-            <versionAdvertised>true</versionAdvertised>
-            <clientsToUpdateConfigs></clientsToUpdateConfigs>
-            <dependencies>
-              <dependency>
-                <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
-                <scope>cluster</scope>
-                <auto-deploy>
-                  <enabled>true</enabled>
-                  <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
-                </auto-deploy>
-              </dependency>
-              <dependency>
-                <name>YARN/YARN_CLIENT</name>
-                <scope>host</scope>
-                <auto-deploy>
-                  <enabled>true</enabled>
-                  <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
-                </auto-deploy>
-              </dependency>
-              <dependency>
-                <name>HDFS/HDFS_CLIENT</name>
-                <scope>host</scope>
-                <auto-deploy>
-                  <enabled>true</enabled>
-                  <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
-                </auto-deploy>
-              </dependency>
-              <dependency>
-                <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
-                <scope>host</scope>
-                <auto-deploy>
-                  <enabled>true</enabled>
-                  <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
-                </auto-deploy>
-              </dependency>
-              <dependency>
-                <name>TEZ/TEZ_CLIENT</name>
-                <scope>host</scope>
-                <auto-deploy>
-                  <enabled>true</enabled>
-                  <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
-                </auto-deploy>
-              </dependency>
-              <dependency>
-                <name>PIG/PIG</name>
-                <scope>host</scope>
-                <auto-deploy>
-                  <enabled>true</enabled>
-                  <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
-                </auto-deploy>
-              </dependency>
-              <dependency>
-                <name>SLIDER/SLIDER</name>
-                <scope>host</scope>
-                <auto-deploy>
-                  <enabled>true</enabled>
-                  <co-locate>HIVE/HIVE_SERVER_INTERACTIVE</co-locate>
-                </auto-deploy>
-              </dependency>
-            </dependencies>
-                <commandScript>
-                  <script>scripts/hive_server_interactive.py</script>
-                  <scriptType>PYTHON</scriptType>
-                </commandScript>
-                <configuration-dependencies>
-                  <config-type>hive-site</config-type>
-                  <config-type>hive-interactive-site</config-type>
-                </configuration-dependencies>
-          </component>
-        </components>
-        <themes>
-          <theme>
-            <fileName>theme.json</fileName>
-            <default>true</default>
-          </theme>
-        </themes>
-
-      <requiredServices>
-        <service>ZOOKEEPER</service>
-        <service>HDFS</service>
-        <service>YARN</service>
-        <service>TEZ</service>
-        <service>PIG</service>
-        <service>SLIDER</service>
-      </requiredServices>
     </service>
   </services>
-</metainfo>
+</metainfo>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/037d9338/ambari-server/src/main/resources/stacks/HDP/2.4/services/HIVE/themes/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.4/services/HIVE/themes/theme.json b/ambari-server/src/main/resources/stacks/HDP/2.4/services/HIVE/themes/theme.json
deleted file mode 100644
index 26b9532..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.4/services/HIVE/themes/theme.json
+++ /dev/null
@@ -1,76 +0,0 @@
-{
-  "configuration" : {
-    "placement" : {
-      "configs" : [
-        {
-          "config": "llap-env/enable_hive_interactive",
-          "subsection-name": "interactive-query-row1-col1"
-        },
-        {
-          "config": "llap-env/llap_queue_name",
-          "subsection-name": "interactive-query-row1-col1",
-          "depends-on": [
-            {
-              "configs":[
-                "llap-env/enable_hive_interactive"
-              ],
-              "if": "${llap-env/enable_hive_interactive}",
-              "then": {
-                "property_value_attributes": {
-                  "visible": true
-                }
-              },
-              "else": {
-                "property_value_attributes": {
-                  "visible": false
-                }
-              }
-            }
-          ]
-        },
-        {
-          "config": "llap-env/llap_am_queue_name",
-          "subsection-name": "interactive-query-row1-col1",
-          "depends-on": [
-            {
-              "configs":[
-                "llap-env/enable_hive_interactive"
-              ],
-              "if": "${llap-env/enable_hive_interactive}",
-              "then": {
-                "property_value_attributes": {
-                  "visible": true
-                }
-              },
-              "else": {
-                "property_value_attributes": {
-                  "visible": false
-                }
-              }
-            }
-          ]
-        }
-      ]
-    },
-    "widgets" : [
-      {
-        "config": "llap-env/enable_hive_interactive",
-        "widget": {
-          "type": "toggle"
-        }
-      },
-      {
-        "config": "llap-env/llap_queue_name",
-        "widget": {
-          "type": "list"
-        }
-      },
-      {
-        "config": "llap-env/llap_am_queue_name",
-        "widget": {
-          "type": "list"
-        }
-      }
-    ]
-  }
-}
\ No newline at end of file