You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@bigtop.apache.org by rv...@apache.org on 2017/03/22 06:10:43 UTC

[52/52] bigtop git commit: BIGTOP-1406. package Ambari in Bigtop

BIGTOP-1406. package Ambari in Bigtop


Project: http://git-wip-us.apache.org/repos/asf/bigtop/repo
Commit: http://git-wip-us.apache.org/repos/asf/bigtop/commit/0d3448b8
Tree: http://git-wip-us.apache.org/repos/asf/bigtop/tree/0d3448b8
Diff: http://git-wip-us.apache.org/repos/asf/bigtop/diff/0d3448b8

Branch: refs/heads/master
Commit: 0d3448b812781488010f80febcbfe6e29af8d075
Parents: bf841ad
Author: Roman Shaposhnik <rv...@apache.org>
Authored: Mon Feb 27 12:26:46 2017 -0800
Committer: Roman Shaposhnik <rv...@apache.org>
Committed: Tue Mar 21 23:08:12 2017 -0700

----------------------------------------------------------------------
 .../ODPi/1.0/blueprints/multinode-default.json  |  108 -
 .../ODPi/1.0/blueprints/singlenode-default.json |   65 -
 .../ODPi/1.0/configuration/cluster-env.xml      |  232 --
 .../src/common/ambari/ODPi/1.0/hooks/.hash      |    1 -
 .../1.0/hooks/after-INSTALL/scripts/hook.py     |   37 -
 .../1.0/hooks/after-INSTALL/scripts/params.py   |  101 -
 .../scripts/shared_initialization.py            |  108 -
 .../hooks/before-ANY/files/changeToSecureUid.sh |   53 -
 .../ODPi/1.0/hooks/before-ANY/scripts/hook.py   |   36 -
 .../ODPi/1.0/hooks/before-ANY/scripts/params.py |  230 --
 .../before-ANY/scripts/shared_initialization.py |  224 --
 .../1.0/hooks/before-INSTALL/scripts/hook.py    |   37 -
 .../1.0/hooks/before-INSTALL/scripts/params.py  |  113 -
 .../scripts/repo_initialization.py              |   68 -
 .../scripts/shared_initialization.py            |   37 -
 .../1.0/hooks/before-RESTART/scripts/hook.py    |   29 -
 .../hooks/before-START/files/checkForFormat.sh  |   65 -
 .../before-START/files/fast-hdfs-resource.jar   |  Bin 19285850 -> 0 bytes
 .../before-START/files/task-log4j.properties    |  134 -
 .../hooks/before-START/files/topology_script.py |   66 -
 .../ODPi/1.0/hooks/before-START/scripts/hook.py |   39 -
 .../1.0/hooks/before-START/scripts/params.py    |  318 --
 .../before-START/scripts/rack_awareness.py      |   47 -
 .../scripts/shared_initialization.py            |  175 -
 .../templates/commons-logging.properties.j2     |   43 -
 .../templates/exclude_hosts_list.j2             |   21 -
 .../templates/hadoop-metrics2.properties.j2     |  104 -
 .../before-START/templates/health_check.j2      |   81 -
 .../templates/include_hosts_list.j2             |   21 -
 .../templates/topology_mappings.data.j2         |   24 -
 .../src/common/ambari/ODPi/1.0/kerberos.json    |   60 -
 .../src/common/ambari/ODPi/1.0/metainfo.xml     |   22 -
 .../ODPi/1.0/properties/stack_features.json     |   51 -
 .../ambari/ODPi/1.0/properties/stack_tools.json |    4 -
 .../common/ambari/ODPi/1.0/repos/repoinfo.xml   |   33 -
 .../ambari/ODPi/1.0/role_command_order.json     |   75 -
 .../ambari/ODPi/1.0/services/HDFS/metainfo.xml  |   27 -
 .../ambari/ODPi/1.0/services/HIVE/alerts.json   |  232 --
 .../services/HIVE/configuration/hcat-env.xml    |   41 -
 .../services/HIVE/configuration/hive-env.xml    |  540 ---
 .../HIVE/configuration/hive-exec-log4j.xml      |   96 -
 .../services/HIVE/configuration/hive-log4j.xml  |  106 -
 .../services/HIVE/configuration/hive-site.xml   | 2796 --------------
 .../HIVE/configuration/hivemetastore-site.xml   |   43 -
 .../HIVE/configuration/hiveserver2-site.xml     |  122 -
 .../services/HIVE/configuration/webhcat-env.xml |   38 -
 .../HIVE/configuration/webhcat-log4j.xml        |   63 -
 .../HIVE/configuration/webhcat-site.xml         |  287 --
 .../HIVE/etc/hive-schema-0.12.0.mysql.sql       |  777 ----
 .../HIVE/etc/hive-schema-0.12.0.oracle.sql      |  718 ----
 .../HIVE/etc/hive-schema-0.12.0.postgres.sql    | 1406 -------
 .../HIVE/etc/hive-schema-0.13.0.mysql.sql       |  889 -----
 .../HIVE/etc/hive-schema-0.13.0.oracle.sql      |  835 -----
 .../HIVE/etc/hive-schema-0.13.0.postgres.sql    | 1538 --------
 .../etc/upgrade-0.12.0-to-0.13.0.oracle.sql     |  165 -
 .../services/HIVE/etc/upgrade-0.13.0.oracle.sql |   38 -
 .../ambari/ODPi/1.0/services/HIVE/kerberos.json |  132 -
 .../ambari/ODPi/1.0/services/HIVE/metainfo.xml  |  372 --
 .../alert_hive_interactive_thrift_port.py       |  216 --
 .../HIVE/package/alerts/alert_hive_metastore.py |  270 --
 .../package/alerts/alert_hive_thrift_port.py    |  274 --
 .../package/alerts/alert_llap_app_status.py     |  299 --
 .../HIVE/package/alerts/alert_webhcat_server.py |  228 --
 .../package/etc/hive-schema-0.12.0.mysql.sql    |  777 ----
 .../package/etc/hive-schema-0.12.0.oracle.sql   |  718 ----
 .../package/etc/hive-schema-0.12.0.postgres.sql | 1406 -------
 .../services/HIVE/package/files/addMysqlUser.sh |   39 -
 .../services/HIVE/package/files/hcatSmoke.sh    |   41 -
 .../services/HIVE/package/files/hiveSmoke.sh    |   24 -
 .../HIVE/package/files/hiveTezSetup.cmd         |   58 -
 .../services/HIVE/package/files/hiveserver2.sql |   23 -
 .../HIVE/package/files/hiveserver2Smoke.sh      |   32 -
 .../1.0/services/HIVE/package/files/pigSmoke.sh |   18 -
 .../HIVE/package/files/removeMysqlUser.sh       |   33 -
 .../HIVE/package/files/startMetastore.sh        |   25 -
 .../HIVE/package/files/templetonSmoke.sh        |   58 -
 .../services/HIVE/package/scripts/__init__.py   |   19 -
 .../1.0/services/HIVE/package/scripts/hcat.py   |   81 -
 .../HIVE/package/scripts/hcat_client.py         |   85 -
 .../HIVE/package/scripts/hcat_service_check.py  |   86 -
 .../1.0/services/HIVE/package/scripts/hive.py   |  481 ---
 .../HIVE/package/scripts/hive_client.py         |   68 -
 .../HIVE/package/scripts/hive_interactive.py    |  302 --
 .../HIVE/package/scripts/hive_metastore.py      |  259 --
 .../HIVE/package/scripts/hive_server.py         |  211 --
 .../package/scripts/hive_server_interactive.py  |  535 ---
 .../HIVE/package/scripts/hive_server_upgrade.py |  141 -
 .../HIVE/package/scripts/hive_service.py        |  187 -
 .../package/scripts/hive_service_interactive.py |  109 -
 .../HIVE/package/scripts/mysql_server.py        |   64 -
 .../HIVE/package/scripts/mysql_service.py       |   49 -
 .../HIVE/package/scripts/mysql_users.py         |   70 -
 .../HIVE/package/scripts/mysql_utils.py         |   35 -
 .../1.0/services/HIVE/package/scripts/params.py |   29 -
 .../HIVE/package/scripts/params_linux.py        |  735 ----
 .../HIVE/package/scripts/params_windows.py      |   74 -
 .../HIVE/package/scripts/service_check.py       |  190 -
 .../HIVE/package/scripts/setup_ranger_hive.py   |   98 -
 .../scripts/setup_ranger_hive_interactive.py    |   78 -
 .../HIVE/package/scripts/status_params.py       |  123 -
 .../services/HIVE/package/scripts/webhcat.py    |  145 -
 .../HIVE/package/scripts/webhcat_server.py      |  164 -
 .../HIVE/package/scripts/webhcat_service.py     |   96 -
 .../package/scripts/webhcat_service_check.py    |  128 -
 .../hadoop-metrics2-hivemetastore.properties.j2 |   54 -
 .../hadoop-metrics2-hiveserver2.properties.j2   |   54 -
 .../templates/hadoop-metrics2-llapdaemon.j2     |   52 -
 .../hadoop-metrics2-llaptaskscheduler.j2        |   52 -
 .../HIVE/package/templates/hive.conf.j2         |   35 -
 .../package/templates/startHiveserver2.sh.j2    |   24 -
 .../templates/startHiveserver2Interactive.sh.j2 |   24 -
 .../package/templates/templeton_smoke.pig.j2    |   24 -
 .../ODPi/1.0/services/KERBEROS/metainfo.xml     |   26 -
 .../1.0/services/YARN/MAPREDUCE2_metrics.json   | 2596 -------------
 .../ODPi/1.0/services/YARN/YARN_metrics.json    | 3486 ------------------
 .../ODPi/1.0/services/YARN/YARN_widgets.json    |  611 ---
 .../ambari/ODPi/1.0/services/YARN/alerts.json   |  418 ---
 .../YARN/configuration-mapred/mapred-env.xml    |  105 -
 .../YARN/configuration-mapred/mapred-site.xml   |  481 ---
 .../YARN/configuration/capacity-scheduler.xml   |  130 -
 .../services/YARN/configuration/yarn-env.xml    |  260 --
 .../services/YARN/configuration/yarn-log4j.xml  |   94 -
 .../services/YARN/configuration/yarn-site.xml   |  579 ---
 .../ambari/ODPi/1.0/services/YARN/kerberos.json |  214 --
 .../ambari/ODPi/1.0/services/YARN/metainfo.xml  |  317 --
 .../ambari/ODPi/1.0/services/YARN/package/.hash |    1 -
 .../package/alerts/alert_nodemanager_health.py  |  209 --
 .../alerts/alert_nodemanagers_summary.py        |  219 --
 .../files/validateYarnComponentStatusWindows.py |  161 -
 .../services/YARN/package/scripts/__init__.py   |   20 -
 .../scripts/application_timeline_server.py      |  155 -
 .../YARN/package/scripts/historyserver.py       |  190 -
 .../YARN/package/scripts/install_jars.py        |   99 -
 .../package/scripts/mapred_service_check.py     |  168 -
 .../YARN/package/scripts/mapreduce2_client.py   |   98 -
 .../YARN/package/scripts/nodemanager.py         |  161 -
 .../YARN/package/scripts/nodemanager_upgrade.py |   73 -
 .../1.0/services/YARN/package/scripts/params.py |   31 -
 .../YARN/package/scripts/params_linux.py        |  469 ---
 .../YARN/package/scripts/params_windows.py      |   59 -
 .../YARN/package/scripts/resourcemanager.py     |  289 --
 .../services/YARN/package/scripts/service.py    |  105 -
 .../YARN/package/scripts/service_check.py       |  159 -
 .../YARN/package/scripts/setup_ranger_yarn.py   |   71 -
 .../YARN/package/scripts/status_params.py       |   61 -
 .../1.0/services/YARN/package/scripts/yarn.py   |  499 ---
 .../YARN/package/scripts/yarn_client.py         |   67 -
 .../package/templates/container-executor.cfg.j2 |   40 -
 .../package/templates/exclude_hosts_list.j2     |   21 -
 .../YARN/package/templates/mapreduce.conf.j2    |   35 -
 .../package/templates/taskcontroller.cfg.j2     |   38 -
 .../YARN/package/templates/yarn.conf.j2         |   35 -
 .../ODPi/1.0/services/ZOOKEEPER/metainfo.xml    |   27 -
 .../ambari/ODPi/1.0/services/stack_advisor.py   | 1947 ----------
 .../src/common/ambari/ODPi/1.0/widgets.json     |   95 -
 .../src/common/ambari/install_ambari.sh         |    2 -
 .../src/deb/ambari/source/include-binaries      |    1 -
 bigtop.bom                                      |   10 +
 158 files changed, 10 insertions(+), 38545 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/blueprints/multinode-default.json
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/blueprints/multinode-default.json b/bigtop-packages/src/common/ambari/ODPi/1.0/blueprints/multinode-default.json
deleted file mode 100755
index 53248e4..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/blueprints/multinode-default.json
+++ /dev/null
@@ -1,108 +0,0 @@
-{
-    "configurations" : [
-    ],
-    "host_groups" : [
-        {
-            "name" : "master_1",
-            "components" : [
-                {
-                    "name" : "NAMENODE"
-                },
-                {
-                    "name" : "ZOOKEEPER_SERVER"
-                },
-                {
-                    "name" : "HDFS_CLIENT"
-                },
-                {
-                    "name" : "YARN_CLIENT"
-                }
-            ],
-            "cardinality" : "1"
-        },
-        {
-            "name" : "master_2",
-            "components" : [
-
-                {
-                    "name" : "ZOOKEEPER_CLIENT"
-                },
-                {
-                    "name" : "HISTORYSERVER"
-                },
-                {
-                    "name" : "SECONDARY_NAMENODE"
-                },
-                {
-                    "name" : "HDFS_CLIENT"
-                },
-                {
-                    "name" : "YARN_CLIENT"
-                },
-                {
-                    "name" : "POSTGRESQL_SERVER"
-                }
-            ],
-            "cardinality" : "1"
-        },
-        {
-            "name" : "master_3",
-            "components" : [
-                {
-                    "name" : "RESOURCEMANAGER"
-                },
-                {
-                    "name" : "ZOOKEEPER_SERVER"
-                }
-            ],
-            "cardinality" : "1"
-        },
-        {
-            "name" : "master_4",
-            "components" : [
-                {
-                    "name" : "ZOOKEEPER_SERVER"
-                }
-            ],
-            "cardinality" : "1"
-        },
-        {
-            "name" : "slave",
-            "components" : [
-                {
-                    "name" : "NODEMANAGER"
-                },
-                {
-                    "name" : "DATANODE"
-                }
-            ],
-            "cardinality" : "${slavesCount}"
-        },
-        {
-            "name" : "gateway",
-            "components" : [
-                {
-                    "name" : "AMBARI_SERVER"
-                },
-                {
-                    "name" : "ZOOKEEPER_CLIENT"
-                },
-                {
-                    "name" : "HDFS_CLIENT"
-                },
-                {
-                    "name" : "YARN_CLIENT"
-                },
-                {
-                    "name" : "MAPREDUCE2_CLIENT"
-                }
-            ],
-            "cardinality" : "1"
-        }
-    ],
-    "Blueprints" : {
-        "blueprint_name" : "blueprint-multinode-default",
-        "stack_name" : "ODPi",
-        "stack_version" : "1.0"
-    }
-}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/blueprints/singlenode-default.json
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/blueprints/singlenode-default.json b/bigtop-packages/src/common/ambari/ODPi/1.0/blueprints/singlenode-default.json
deleted file mode 100755
index 6aeb516..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/blueprints/singlenode-default.json
+++ /dev/null
@@ -1,65 +0,0 @@
-{
-    "configurations" : [
-    ],
-    "host_groups" : [
-        {
-            "name" : "host_group_1",
-            "components" : [
-                {
-                    "name" : "HISTORYSERVER"
-                },
-                {
-                    "name" : "NAMENODE"
-                },
-                {
-                    "name" : "SUPERVISOR"
-                },
-                {
-                    "name" : "AMBARI_SERVER"
-                },
-                {
-                    "name" : "APP_TIMELINE_SERVER"
-                },
-                {
-                    "name" : "HDFS_CLIENT"
-                },
-                {
-                    "name" : "NODEMANAGER"
-                },
-                {
-                    "name" : "DATANODE"
-                },
-                {
-                    "name" : "RESOURCEMANAGER"
-                },
-                {
-                    "name" : "ZOOKEEPER_SERVER"
-                },
-                {
-                    "name" : "ZOOKEEPER_CLIENT"
-                },
-                {
-                    "name" : "SECONDARY_NAMENODE"
-                },
-                {
-                    "name" : "YARN_CLIENT"
-                },
-                {
-                    "name" : "MAPREDUCE2_CLIENT"
-                },
-                {
-                    "name" : "POSTGRESQL_SERVER"
-                },
-                {
-                    "name" : "DRPC_SERVER"
-                }
-            ],
-            "cardinality" : "1"
-        }
-    ],
-    "Blueprints" : {
-        "blueprint_name" : "blueprint-singlenode-default",
-        "stack_name" : "ODPi",
-        "stack_version" : "1.0"
-    }
-}

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/configuration/cluster-env.xml
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/configuration/cluster-env.xml b/bigtop-packages/src/common/ambari/ODPi/1.0/configuration/cluster-env.xml
deleted file mode 100755
index 61274b6..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/configuration/cluster-env.xml
+++ /dev/null
@@ -1,232 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>recovery_enabled</name>
-    <value>true</value>
-    <description>Auto start enabled or not for this cluster.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>recovery_type</name>
-    <value>AUTO_START</value>
-    <description>Auto start type.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>recovery_lifetime_max_count</name>
-    <value>1024</value>
-    <description>Auto start lifetime maximum count of recovery attempt allowed per host component. This is reset when agent is restarted.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>recovery_max_count</name>
-    <value>6</value>
-    <description>Auto start maximum count of recovery attempt allowed per host component in a window. This is reset when agent is restarted.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>recovery_window_in_minutes</name>
-    <value>60</value>
-    <description>Auto start recovery window size in minutes.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>recovery_retry_interval</name>
-    <value>5</value>
-    <description>Auto start recovery retry gap between tries per host component.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>security_enabled</name>
-    <value>false</value>
-    <description>Hadoop Security</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>kerberos_domain</name>
-    <value>EXAMPLE.COM</value>
-    <description>Kerberos realm.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>ignore_groupsusers_create</name>
-    <display-name>Skip group modifications during install</display-name>
-    <value>false</value>
-    <property-type>ADDITIONAL_USER_PROPERTY</property-type>
-    <description>Whether to ignore failures on users and group creation</description>
-    <value-attributes>
-      <overridable>false</overridable>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>smokeuser</name>
-    <display-name>Smoke User</display-name>
-    <value>ambari-qa</value>
-    <property-type>USER</property-type>
-    <description>User executing service checks</description>
-    <value-attributes>
-      <type>user</type>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>smokeuser_keytab</name>
-    <value>/etc/security/keytabs/smokeuser.headless.keytab</value>
-    <description>Path to smoke test user keytab file</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>user_group</name>
-    <display-name>Hadoop Group</display-name>
-    <value>hadoop</value>
-    <property-type>GROUP</property-type>
-    <description>Hadoop user group.</description>
-    <value-attributes>
-      <type>user</type>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>repo_suse_rhel_template</name>
-    <value>[{{repo_id}}]
-name={{repo_id}}
-{% if mirror_list %}mirrorlist={{mirror_list}}{% else %}baseurl={{base_url}}{% endif %}
-
-path=/
-enabled=1
-gpgcheck=0</value>
-    <description>Template of repositories for rhel and suse.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>repo_ubuntu_template</name>
-    <value>{{package_type}} {{base_url}} {{components}}</value>
-    <description>Template of repositories for ubuntu.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>override_uid</name>
-    <value>true</value>
-    <property-type>ADDITIONAL_USER_PROPERTY</property-type>
-    <display-name>Have Ambari manage UIDs</display-name>
-    <description>Have Ambari manage UIDs</description>
-    <value-attributes>
-      <overridable>false</overridable>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>fetch_nonlocal_groups</name>
-    <value>true</value>
-    <display-name>Ambari fetch nonlocal groups</display-name>
-    <description>Ambari requires fetching all the groups. This can be slow
-        on envs with enabled ldap. Setting this option to false will enable Ambari,
-        to skip user/group management connected with ldap groups.</description>
-    <value-attributes>
-      <overridable>false</overridable>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>managed_hdfs_resource_property_names</name>
-    <value/>
-    <description>Comma separated list of property names with HDFS resource paths.
-        Resource from this list will be managed even if it is marked as not managed in the stack</description>
-    <value-attributes>
-      <overridable>false</overridable>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <!-- Define stack_tools property in the base stack. DO NOT override this property for each stack version -->
-  <property>
-    <name>stack_tools</name>
-    <value/>
-    <description>Stack specific tools</description>
-    <property-type>VALUE_FROM_PROPERTY_FILE</property-type>
-    <value-attributes>
-      <property-file-name>stack_tools.json</property-file-name>
-      <property-file-type>json</property-file-type>
-      <read-only>true</read-only>
-      <overridable>false</overridable>
-      <visible>false</visible>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <!-- Define stack_features property in the base stack. DO NOT override this property for each stack version -->
-  <property>
-    <name>stack_features</name>
-    <value/>
-    <description>List of features supported by the stack</description>
-    <property-type>VALUE_FROM_PROPERTY_FILE</property-type>
-    <value-attributes>
-      <property-file-name>stack_features.json</property-file-name>
-      <property-file-type>json</property-file-type>
-      <read-only>true</read-only>
-      <overridable>false</overridable>
-      <visible>false</visible>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>stack_root</name>
-    <value>/usr/odpi</value>
-    <description>Stack root folder</description>
-    <value-attributes>
-      <read-only>true</read-only>
-      <overridable>false</overridable>
-      <visible>false</visible>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>alerts_repeat_tolerance</name>
-    <value>1</value>
-    <description>The number of consecutive alerts required to transition an alert from the SOFT to the HARD state.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>ignore_bad_mounts</name>
-    <value>false</value>
-    <description>For properties handled by handle_mounted_dirs this will make Ambari not to create any directories.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>manage_dirs_on_root</name>
-    <value>true</value>
-    <description>For properties handled by handle_mounted_dirs this will make Ambari to manage (create and set permissions) unknown directories on / partition</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>one_dir_per_partition</name>
-    <value>false</value>
-    <description>For properties handled by handle_mounted_dirs this will make Ambari </description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/.hash
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/.hash b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/.hash
deleted file mode 100755
index f8c8c1f..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/.hash
+++ /dev/null
@@ -1 +0,0 @@
-18a52d08dc963523592f7f1f2997089b6655de71
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/after-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/after-INSTALL/scripts/hook.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/after-INSTALL/scripts/hook.py
deleted file mode 100755
index 8a583b3..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/after-INSTALL/scripts/hook.py
+++ /dev/null
@@ -1,37 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.libraries.script.hook import Hook
-from shared_initialization import link_configs
-from shared_initialization import setup_config
-from shared_initialization import setup_stack_symlinks
-
-class AfterInstallHook(Hook):
-
-  def hook(self, env):
-    import params
-
-    env.set_params(params)
-    setup_stack_symlinks()
-    setup_config()
-
-    link_configs(self.stroutfile)
-
-if __name__ == "__main__":
-  AfterInstallHook().execute()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/after-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/after-INSTALL/scripts/params.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/after-INSTALL/scripts/params.py
deleted file mode 100755
index 819d8f7..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/after-INSTALL/scripts/params.py
+++ /dev/null
@@ -1,101 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-
-from ambari_commons.constants import AMBARI_SUDO_BINARY
-from resource_management.libraries.script import Script
-from resource_management.libraries.functions import default
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import format_jvm_option
-from resource_management.libraries.functions.version import format_stack_version
-
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-dfs_type = default("/commandParams/dfs_type", "")
-
-is_parallel_execution_enabled = int(default("/agentConfigParams/agent/parallel_execution", 0)) == 1
-
-sudo = AMBARI_SUDO_BINARY
-
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-stack_version_formatted = format_stack_version(stack_version_unformatted)
-
-# current host stack version
-current_version = default("/hostLevelParams/current_version", None)
-
-# default hadoop params
-mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
-hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
-hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
-
-# HDP 2.2+ params
-if Script.is_stack_greater_or_equal("2.2"):
-  mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
-
-  # not supported in HDP 2.2+
-  hadoop_conf_empty_dir = None
-
-versioned_stack_root = '/usr/hdp/current'
-
-#security params
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-
-#java params
-java_home = config['hostLevelParams']['java_home']
-
-#hadoop params
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
-hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
-
-jsvc_path = "/usr/lib/bigtop-utils"
-
-hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
-namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
-namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
-
-jtnode_opt_newsize = "200m"
-jtnode_opt_maxnewsize = "200m"
-jtnode_heapsize =  "1024m"
-ttnode_heapsize = "1024m"
-
-dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
-mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
-
-#users and groups
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-user_group = config['configurations']['cluster-env']['user_group']
-
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-has_namenode = not len(namenode_host) == 0
-
-if has_namenode or dfs_type == 'HCFS':
-  hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
-
-link_configs_lock_file = os.path.join(tmp_dir, "link_configs_lock_file")
-stack_select_lock_file = os.path.join(tmp_dir, "stack_select_lock_file")
-
-upgrade_suspended = default("/roleParams/upgrade_suspended", False)

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/after-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/after-INSTALL/scripts/shared_initialization.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/after-INSTALL/scripts/shared_initialization.py
deleted file mode 100755
index 9982dc6..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/after-INSTALL/scripts/shared_initialization.py
+++ /dev/null
@@ -1,108 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os
-
-import ambari_simplejson as json
-from resource_management.core.logger import Logger
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.version import compare_versions
-from resource_management.libraries.functions.fcntl_based_process_lock import FcntlBasedProcessLock
-from resource_management.libraries.resources.xml_config import XmlConfig
-from resource_management.libraries.script import Script
-
-
-def setup_stack_symlinks():
-  """
-  Invokes <stack-selector-tool> set all against a calculated fully-qualified, "normalized" version based on a
-  stack version, such as "2.3". This should always be called after a component has been
-  installed to ensure that all HDP pointers are correct. The stack upgrade logic does not
-  interact with this since it's done via a custom command and will not trigger this hook.
-  :return:
-  """
-  import params
-  if params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.2') >= 0:
-    # try using the exact version first, falling back in just the stack if it's not defined
-    # which would only be during an intial cluster installation
-    version = params.current_version if params.current_version is not None else params.stack_version_unformatted
-
-    if not params.upgrade_suspended:
-      # On parallel command execution this should be executed by a single process at a time.
-      with FcntlBasedProcessLock(params.stack_select_lock_file, enabled = params.is_parallel_execution_enabled, skip_fcntl_failures = True):
-        stack_select.select_all(version)
-
-def setup_config():
-  import params
-  stackversion = params.stack_version_unformatted
-  Logger.info("FS Type: {0}".format(params.dfs_type))
-
-  is_hadoop_conf_dir_present = False
-  if hasattr(params, "hadoop_conf_dir") and params.hadoop_conf_dir is not None and os.path.exists(params.hadoop_conf_dir):
-    is_hadoop_conf_dir_present = True
-  else:
-    Logger.warning("Parameter hadoop_conf_dir is missing or directory does not exist. This is expected if this host does not have any Hadoop components.")
-
-  if is_hadoop_conf_dir_present and (params.has_namenode or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS'):
-    # create core-site only if the hadoop config diretory exists
-    XmlConfig("core-site.xml",
-              conf_dir=params.hadoop_conf_dir,
-              configurations=params.config['configurations']['core-site'],
-              configuration_attributes=params.config['configuration_attributes']['core-site'],
-              owner=params.hdfs_user,
-              group=params.user_group,
-              only_if=format("ls {hadoop_conf_dir}"))
-
-
-def load_version(struct_out_file):
-  """
-  Load version from file.  Made a separate method for testing
-  """
-  json_version = None
-  try:
-    if os.path.exists(struct_out_file):
-      with open(struct_out_file, 'r') as fp:
-        json_info = json.load(fp)
-        json_version = json_info['version']
-  except:
-    pass
-
-  return json_version
-  
-
-def link_configs(struct_out_file):
-  """
-  Links configs, only on a fresh install of HDP-2.3 and higher
-  """
-  import params
-
-  if not Script.is_stack_greater_or_equal("2.3"):
-    Logger.info("Can only link configs for HDP-2.3 and higher.")
-    return
-
-  json_version = load_version(struct_out_file)
-
-  if not json_version:
-    Logger.info("Could not load 'version' from {0}".format(struct_out_file))
-    return
-
-  # On parallel command execution this should be executed by a single process at a time.
-  with FcntlBasedProcessLock(params.link_configs_lock_file, enabled = params.is_parallel_execution_enabled, skip_fcntl_failures = True):
-    for k, v in conf_select.get_package_dirs().iteritems():
-      conf_select.convert_conf_directories_to_symlinks(k, json_version, v)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/files/changeToSecureUid.sh
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/files/changeToSecureUid.sh b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/files/changeToSecureUid.sh
deleted file mode 100755
index 08542c4..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/files/changeToSecureUid.sh
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-username=$1
-directories=$2
-
-function find_available_uid() {
- for ((i=1001; i<=2000; i++))
- do
-   grep -q $i /etc/passwd
-   if [ "$?" -ne 0 ]
-   then
-    newUid=$i
-    break
-   fi
- done
-}
-
-find_available_uid
-
-if [ $newUid -eq 0 ]
-then
-  echo "Failed to find Uid between 1000 and 2000"
-  exit 1
-fi
-
-set -e
-
-dir_array=($(echo $directories | sed 's/,/\n/g'))
-old_uid=$(id -u $username)
-sudo_prefix="/var/lib/ambari-agent/ambari-sudo.sh -H -E"
-echo "Changing uid of $username from $old_uid to $newUid"
-echo "Changing directory permisions for ${dir_array[@]}"
-$sudo_prefix usermod -u $newUid $username && for dir in ${dir_array[@]} ; do ls $dir 2> /dev/null && echo "Changing permission for $dir" && $sudo_prefix chown -Rh $newUid $dir ; done
-exit 0

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/scripts/hook.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/scripts/hook.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/scripts/hook.py
deleted file mode 100755
index c34be0b..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/scripts/hook.py
+++ /dev/null
@@ -1,36 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from shared_initialization import *
-
-class BeforeAnyHook(Hook):
-
-  def hook(self, env):
-    import params
-    env.set_params(params)
-
-    setup_users()
-    if params.has_namenode or params.dfs_type == 'HCFS':
-      setup_hadoop_env()
-    setup_java()
-
-if __name__ == "__main__":
-  BeforeAnyHook().execute()
-

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/scripts/params.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/scripts/params.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/scripts/params.py
deleted file mode 100755
index 5544085..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/scripts/params.py
+++ /dev/null
@@ -1,230 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import collections
-import re
-import os
-
-import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
-
-from resource_management.libraries.script import Script
-from resource_management.libraries.functions import default
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import format_jvm_option
-from resource_management.libraries.functions.is_empty import is_empty
-from resource_management.libraries.functions.version import format_stack_version
-from resource_management.libraries.functions.version import compare_versions
-from resource_management.libraries.functions.expect import expect
-from ambari_commons.os_check import OSCheck
-from ambari_commons.constants import AMBARI_SUDO_BINARY
-
-
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-dfs_type = default("/commandParams/dfs_type", "")
-
-artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
-jdk_name = default("/hostLevelParams/jdk_name", None)
-java_home = config['hostLevelParams']['java_home']
-java_version = expect("/hostLevelParams/java_version", int)
-jdk_location = config['hostLevelParams']['jdk_location']
-
-sudo = AMBARI_SUDO_BINARY
-
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-stack_version_formatted = format_stack_version(stack_version_unformatted)
-
-restart_type = default("/commandParams/restart_type", "")
-version = default("/commandParams/version", None)
-# Handle upgrade and downgrade
-if (restart_type.lower() == "rolling_upgrade" or restart_type.lower() == "nonrolling_upgrade") and version:
-  stack_version_formatted = format_stack_version(version)
-
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-
-# Some datanode settings
-dfs_dn_addr = default('/configurations/hdfs-site/dfs.datanode.address', None)
-dfs_dn_http_addr = default('/configurations/hdfs-site/dfs.datanode.http.address', None)
-dfs_dn_https_addr = default('/configurations/hdfs-site/dfs.datanode.https.address', None)
-dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None)
-secure_dn_ports_are_in_use = False
-
-def get_port(address):
-  """
-  Extracts port from the address like 0.0.0.0:1019
-  """
-  if address is None:
-    return None
-  m = re.search(r'(?:http(?:s)?://)?([\w\d.]*):(\d{1,5})', address)
-  if m is not None:
-    return int(m.group(2))
-  else:
-    return None
-
-def is_secure_port(port):
-  """
-  Returns True if port is root-owned at *nix systems
-  """
-  if port is not None:
-    return port < 1024
-  else:
-    return False
-
-# hadoop default params
-mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
-
-# upgrades would cause these directories to have a version instead of "current"
-# which would cause a lot of problems when writing out hadoop-env.sh; instead
-# force the use of "current" in the hook
-hdfs_user_nofile_limit = default("/configurations/hadoop-env/hdfs_user_nofile_limit", "128000")
-hadoop_home = stack_select.get_hadoop_dir("home", force_latest_on_upgrade=True)
-hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec", force_latest_on_upgrade=True)
-
-hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
-hadoop_secure_dn_user = hdfs_user
-hadoop_dir = "/etc/hadoop"
-versioned_stack_root = '/usr/hdp/current'
-hadoop_java_io_tmpdir = os.path.join(tmp_dir, "hadoop_java_io_tmpdir")
-datanode_max_locked_memory = config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory']
-is_datanode_max_locked_memory_set = not is_empty(config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory'])
-
-# HDP 2.2+ params
-if Script.is_stack_greater_or_equal("2.2"):
-  mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
-
-  # not supported in HDP 2.2+
-  hadoop_conf_empty_dir = None
-
-  if not security_enabled:
-    hadoop_secure_dn_user = '""'
-  else:
-    dfs_dn_port = get_port(dfs_dn_addr)
-    dfs_dn_http_port = get_port(dfs_dn_http_addr)
-    dfs_dn_https_port = get_port(dfs_dn_https_addr)
-    # We try to avoid inability to start datanode as a plain user due to usage of root-owned ports
-    if dfs_http_policy == "HTTPS_ONLY":
-      secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_https_port)
-    elif dfs_http_policy == "HTTP_AND_HTTPS":
-      secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port) or is_secure_port(dfs_dn_https_port)
-    else:   # params.dfs_http_policy == "HTTP_ONLY" or not defined:
-      secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port)
-    if secure_dn_ports_are_in_use:
-      hadoop_secure_dn_user = hdfs_user
-    else:
-      hadoop_secure_dn_user = '""'
-
-#hadoop params
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
-hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
-
-jsvc_path = "/usr/lib/bigtop-utils"
-
-hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
-namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
-namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
-
-jtnode_opt_newsize = "200m"
-jtnode_opt_maxnewsize = "200m"
-jtnode_heapsize =  "1024m"
-ttnode_heapsize = "1024m"
-
-dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
-nfsgateway_heapsize = config['configurations']['hadoop-env']['nfsgateway_heapsize']
-mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
-hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
-
-#users and groups
-hbase_user = config['configurations']['hbase-env']['hbase_user']
-smoke_user =  config['configurations']['cluster-env']['smokeuser']
-gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
-gmond_user = config['configurations']['ganglia-env']["gmond_user"]
-tez_user = config['configurations']['tez-env']["tez_user"]
-oozie_user = config['configurations']['oozie-env']["oozie_user"]
-falcon_user = config['configurations']['falcon-env']["falcon_user"]
-ranger_user = config['configurations']['ranger-env']["ranger_user"]
-
-user_group = config['configurations']['cluster-env']['user_group']
-
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-falcon_server_hosts = default("/clusterHostInfo/falcon_server_hosts", [])
-ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
-
-has_namenode = not len(namenode_host) == 0
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-has_tez = 'tez-site' in config['configurations']
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_oozie_server = not len(oozie_servers) == 0
-has_falcon_server_hosts = not len(falcon_server_hosts) == 0
-has_ranger_admin = not len(ranger_admin_hosts) == 0
-
-if has_namenode or dfs_type == 'HCFS':
-  hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
-
-hbase_tmp_dir = "/tmp/hbase-hbase"
-
-proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
-ranger_group = config['configurations']['ranger-env']['ranger_group']
-dfs_cluster_administrators_group = config['configurations']['hdfs-site']["dfs.cluster.administrators"]
-
-ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
-fetch_nonlocal_groups = config['configurations']['cluster-env']["fetch_nonlocal_groups"]
-
-smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
-if has_hbase_masters:
-  hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
-#repo params
-repo_info = config['hostLevelParams']['repo_info']
-service_repo_info = default("/hostLevelParams/service_repo_info",None)
-
-user_to_groups_dict = collections.defaultdict(lambda:[user_group])
-user_to_groups_dict[smoke_user] = [proxyuser_group]
-if has_ganglia_server:
-  user_to_groups_dict[gmond_user] = [gmond_user]
-  user_to_groups_dict[gmetad_user] = [gmetad_user]
-if has_tez:
-  user_to_groups_dict[tez_user] = [proxyuser_group]
-if has_oozie_server:
-  user_to_groups_dict[oozie_user] = [proxyuser_group]
-if has_falcon_server_hosts:
-  user_to_groups_dict[falcon_user] = [proxyuser_group]
-if has_ranger_admin:
-  user_to_groups_dict[ranger_user] = [ranger_group]
-
-user_to_gid_dict = collections.defaultdict(lambda:user_group)
-
-user_list = json.loads(config['hostLevelParams']['user_list'])
-group_list = json.loads(config['hostLevelParams']['group_list'])
-host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
-
-tez_am_view_acls = config['configurations']['tez-site']["tez.am.view-acls"]
-override_uid = str(default("/configurations/cluster-env/override_uid", "true")).lower()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/scripts/shared_initialization.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/scripts/shared_initialization.py
deleted file mode 100755
index 1a7d21a..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-ANY/scripts/shared_initialization.py
+++ /dev/null
@@ -1,224 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-import re
-import getpass
-import tempfile
-from copy import copy
-from resource_management.libraries.functions.version import compare_versions
-from resource_management import *
-
-def setup_users():
-  """
-  Creates users before cluster installation
-  """
-  import params
-  should_create_users_and_groups = not params.host_sys_prepped and not params.ignore_groupsusers_create
-
-  if should_create_users_and_groups:
-    for group in params.group_list:
-      Group(group,
-      )
-
-    for user in params.user_list:
-      User(user,
-          gid = params.user_to_gid_dict[user],
-          groups = params.user_to_groups_dict[user],
-          fetch_nonlocal_groups = params.fetch_nonlocal_groups
-      )
-
-    if params.override_uid == "true":
-      set_uid(params.smoke_user, params.smoke_user_dirs)
-    else:
-      Logger.info('Skipping setting uid for smoke user as host is sys prepped')
-  else:
-    Logger.info('Skipping creation of User and Group as host is sys prepped or ignore_groupsusers_create flag is on')
-    pass
-
-
-  if params.has_hbase_masters:
-    Directory (params.hbase_tmp_dir,
-               owner = params.hbase_user,
-               mode=0775,
-               create_parents = True,
-               cd_access="a",
-    )
-    if not params.host_sys_prepped and params.override_uid == "true":
-      set_uid(params.hbase_user, params.hbase_user_dirs)
-    else:
-      Logger.info('Skipping setting uid for hbase user as host is sys prepped')      
-      pass
-
-  if not params.host_sys_prepped:
-    if params.has_namenode:
-      if should_create_users_and_groups:
-        create_dfs_cluster_admins()
-    if params.has_tez and params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.3') >= 0:
-      if should_create_users_and_groups:
-        create_tez_am_view_acls()
-  else:
-    Logger.info('Skipping setting dfs cluster admin and tez view acls as host is sys prepped')
-
-def create_dfs_cluster_admins():
-  """
-  dfs.cluster.administrators support format <comma-delimited list of usernames><space><comma-delimited list of group names>
-  """
-  import params
-
-  groups_list = create_users_and_groups(params.dfs_cluster_administrators_group)
-
-  User(params.hdfs_user,
-    groups = params.user_to_groups_dict[params.hdfs_user] + groups_list,
-          fetch_nonlocal_groups = params.fetch_nonlocal_groups
-  )
-
-def create_tez_am_view_acls():
-
-  """
-  tez.am.view-acls support format <comma-delimited list of usernames><space><comma-delimited list of group names>
-  """
-  import params
-
-  if not params.tez_am_view_acls.startswith("*"):
-    create_users_and_groups(params.tez_am_view_acls)
-
-def create_users_and_groups(user_and_groups):
-
-  import params
-
-  parts = re.split('\s', user_and_groups)
-  if len(parts) == 1:
-    parts.append("")
-
-  users_list = parts[0].split(",") if parts[0] else []
-  groups_list = parts[1].split(",") if parts[1] else []
-
-  if users_list:
-    User(users_list,
-          fetch_nonlocal_groups = params.fetch_nonlocal_groups
-    )
-
-  if groups_list:
-    Group(copy(groups_list),
-    )
-  return groups_list
-    
-def set_uid(user, user_dirs):
-  """
-  user_dirs - comma separated directories
-  """
-  import params
-
-  File(format("{tmp_dir}/changeUid.sh"),
-       content=StaticFile("changeToSecureUid.sh"),
-       mode=0555)
-  ignore_groupsusers_create_str = str(params.ignore_groupsusers_create).lower()
-  Execute(format("{tmp_dir}/changeUid.sh {user} {user_dirs}"),
-          not_if = format("(test $(id -u {user}) -gt 1000) || ({ignore_groupsusers_create_str})"))
-    
-def setup_hadoop_env():
-  import params
-  stackversion = params.stack_version_unformatted
-  Logger.info("FS Type: {0}".format(params.dfs_type))
-  if params.has_namenode or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS':
-    if params.security_enabled:
-      tc_owner = "root"
-    else:
-      tc_owner = params.hdfs_user
-
-    # create /etc/hadoop
-    Directory(params.hadoop_dir, mode=0755)
-
-    # HDP < 2.2 used a conf -> conf.empty symlink for /etc/hadoop/
-    if Script.is_stack_less_than("2.2"):
-      Directory(params.hadoop_conf_empty_dir, create_parents = True, owner="root",
-        group=params.user_group )
-
-      Link(params.hadoop_conf_dir, to=params.hadoop_conf_empty_dir,
-         not_if=format("ls {hadoop_conf_dir}"))
-
-    # write out hadoop-env.sh, but only if the directory exists
-    if os.path.exists(params.hadoop_conf_dir):
-      File(os.path.join(params.hadoop_conf_dir, 'hadoop-env.sh'), owner=tc_owner,
-        group=params.user_group,
-        content=InlineTemplate(params.hadoop_env_sh_template))
-
-    # Create tmp dir for java.io.tmpdir
-    # Handle a situation when /tmp is set to noexec
-    Directory(params.hadoop_java_io_tmpdir,
-              owner=params.hdfs_user,
-              group=params.user_group,
-              mode=01777
-    )
-
-def setup_java():
-  """
-  Installs jdk using specific params, that comes from ambari-server
-  """
-  import params
-
-  java_exec = format("{java_home}/bin/java")
-
-  if not os.path.isfile(java_exec):
-    if not params.jdk_name: # if custom jdk is used.
-      raise Fail(format("Unable to access {java_exec}. Confirm you have copied jdk to this host."))
-
-    jdk_curl_target = format("{tmp_dir}/{jdk_name}")
-    java_dir = os.path.dirname(params.java_home)
-
-    Directory(params.artifact_dir,
-              create_parents = True,
-              )
-
-    File(jdk_curl_target,
-         content = DownloadSource(format("{jdk_location}/{jdk_name}")),
-         not_if = format("test -f {jdk_curl_target}")
-    )
-
-    tmp_java_dir = tempfile.mkdtemp(prefix="jdk_tmp_", dir=params.tmp_dir)
-
-    try:
-      if params.jdk_name.endswith(".bin"):
-        chmod_cmd = ("chmod", "+x", jdk_curl_target)
-        install_cmd = format("cd {tmp_java_dir} && echo A | {jdk_curl_target} -noregister && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
-      elif params.jdk_name.endswith(".gz"):
-        chmod_cmd = ("chmod","a+x", java_dir)
-        install_cmd = format("cd {tmp_java_dir} && tar -xf {jdk_curl_target} && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
-
-      Directory(java_dir
-      )
-
-      Execute(chmod_cmd,
-              sudo = True,
-              )
-
-      Execute(install_cmd,
-              )
-
-    finally:
-      Directory(tmp_java_dir, action="delete")
-
-    File(format("{java_home}/bin/java"),
-         mode=0755,
-         cd_access="a",
-         )
-    Execute(('chmod', '-R', '755', params.java_home),
-      sudo = True,
-    )

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/hook.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/hook.py
deleted file mode 100755
index ce17776..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/hook.py
+++ /dev/null
@@ -1,37 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from shared_initialization import *
-from repo_initialization import *
-
-class BeforeInstallHook(Hook):
-
-  def hook(self, env):
-    import params
-
-    self.run_custom_hook('before-ANY')
-    env.set_params(params)
-    
-    install_repos()
-    install_packages()
-
-if __name__ == "__main__":
-  BeforeInstallHook().execute()

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/params.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/params.py
deleted file mode 100755
index 6193c11..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/params.py
+++ /dev/null
@@ -1,113 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from ambari_commons.constants import AMBARI_SUDO_BINARY
-from resource_management.libraries.functions.version import format_stack_version, compare_versions
-from resource_management.core.system import System
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import default, format
-from resource_management.libraries.functions.expect import expect
-
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-sudo = AMBARI_SUDO_BINARY
-
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability']
-agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int)
-stack_version_formatted = format_stack_version(stack_version_unformatted)
-
-#users and groups
-hbase_user = config['configurations']['hbase-env']['hbase_user']
-smoke_user =  config['configurations']['cluster-env']['smokeuser']
-gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
-gmond_user = config['configurations']['ganglia-env']["gmond_user"]
-tez_user = config['configurations']['tez-env']["tez_user"]
-
-user_group = config['configurations']['cluster-env']['user_group']
-proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
-
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-
-# repo templates
-repo_rhel_suse =  config['configurations']['cluster-env']['repo_suse_rhel_template']
-repo_ubuntu =  config['configurations']['cluster-env']['repo_ubuntu_template']
-
-#hosts
-hostname = config["hostname"]
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
-hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-storm_server_hosts = default("/clusterHostInfo/nimbus_hosts", [])
-falcon_host =  default('/clusterHostInfo/falcon_server_hosts', [])
-
-has_sqoop_client = 'sqoop-env' in config['configurations']
-has_namenode = not len(namenode_host) == 0
-has_hs = not len(hs_host) == 0
-has_resourcemanager = not len(rm_host) == 0
-has_slaves = not len(slave_hosts) == 0
-has_oozie_server = not len(oozie_servers)  == 0
-has_hcat_server_host = not len(hcat_server_hosts)  == 0
-has_hive_server_host = not len(hive_server_host)  == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_zk_host = not len(zk_hosts) == 0
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-has_storm_server = not len(storm_server_hosts) == 0
-has_falcon_server = not len(falcon_host) == 0
-has_tez = 'tez-site' in config['configurations']
-
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-if has_ganglia_server:
-  ganglia_server_host = ganglia_server_hosts[0]
-
-hbase_tmp_dir = "/tmp/hbase-hbase"
-
-#security params
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-
-#java params
-java_home = config['hostLevelParams']['java_home']
-artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
-jdk_name = default("/hostLevelParams/jdk_name", None) # None when jdk is already installed by user
-jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
-jce_location = config['hostLevelParams']['jdk_location']
-jdk_location = config['hostLevelParams']['jdk_location']
-ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
-host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
-
-smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
-if has_hbase_masters:
-  hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
-#repo params
-repo_info = config['hostLevelParams']['repo_info']
-service_repo_info = default("/hostLevelParams/service_repo_info",None)

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/repo_initialization.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/repo_initialization.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/repo_initialization.py
deleted file mode 100755
index a35dce7..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/repo_initialization.py
+++ /dev/null
@@ -1,68 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from ambari_commons.os_check import OSCheck
-from resource_management.libraries.resources.repository import Repository
-from resource_management.core.logger import Logger
-import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
-
-# components_lits = repoName + postfix
-_UBUNTU_REPO_COMPONENTS_POSTFIX = ["main"]
-
-def _alter_repo(action, repo_string, repo_template):
-  """
-  @param action: "delete" or "create"
-  @param repo_string: e.g. "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0._\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]"
-  """
-  repo_dicts = json.loads(repo_string)
-
-  if not isinstance(repo_dicts, list):
-    repo_dicts = [repo_dicts]
-
-  if 0 == len(repo_dicts):
-    Logger.info("Repository list is empty. Ambari may not be managing the repositories.")
-  else:
-    Logger.info("Initializing {0} repositories".format(str(len(repo_dicts))))
-
-  for repo in repo_dicts:
-    if not 'baseUrl' in repo:
-      repo['baseUrl'] = None
-    if not 'mirrorsList' in repo:
-      repo['mirrorsList'] = None
-    
-    ubuntu_components = [ repo['repoName'] ] + _UBUNTU_REPO_COMPONENTS_POSTFIX
-    
-    Repository(repo['repoId'],
-               action = action,
-               base_url = repo['baseUrl'],
-               mirror_list = repo['mirrorsList'],
-               repo_file_name = repo['repoName'],
-               repo_template = repo_template,
-               components = ubuntu_components, # ubuntu specific
-    )
-
-def install_repos():
-  import params
-  if params.host_sys_prepped:
-    return
-
-  template = params.repo_rhel_suse if OSCheck.is_suse_family() or OSCheck.is_redhat_family() else params.repo_ubuntu
-  _alter_repo("create", params.repo_info, template)
-  if params.service_repo_info:
-    _alter_repo("create", params.service_repo_info, template)

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/shared_initialization.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/shared_initialization.py
deleted file mode 100755
index 1609050..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-INSTALL/scripts/shared_initialization.py
+++ /dev/null
@@ -1,37 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-
-from resource_management.libraries.functions import stack_tools
-from resource_management.libraries.functions.version import compare_versions
-from resource_management.core.resources.packaging import Package
-
-def install_packages():
-  import params
-  if params.host_sys_prepped:
-    return
-
-  packages = ['unzip', 'curl']
-  if params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.2') >= 0:
-    stack_selector_package = stack_tools.get_stack_tool_package(stack_tools.STACK_SELECTOR_NAME)
-    packages.append(stack_selector_package)
-  Package(packages,
-          retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
-          retry_count=params.agent_stack_retry_count)

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-RESTART/scripts/hook.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-RESTART/scripts/hook.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-RESTART/scripts/hook.py
deleted file mode 100755
index 14b9d99..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-RESTART/scripts/hook.py
+++ /dev/null
@@ -1,29 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-class BeforeRestartHook(Hook):
-
-  def hook(self, env):
-    self.run_custom_hook('before-START')
-
-if __name__ == "__main__":
-  BeforeRestartHook().execute()
-

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/checkForFormat.sh b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/checkForFormat.sh
deleted file mode 100755
index 68aa96d..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/checkForFormat.sh
+++ /dev/null
@@ -1,65 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-export hdfs_user=$1
-shift
-export conf_dir=$1
-shift
-export bin_dir=$1
-shift
-export mark_dir=$1
-shift
-export name_dirs=$*
-
-export EXIT_CODE=0
-export command="namenode -format"
-export list_of_non_empty_dirs=""
-
-mark_file=/var/run/hadoop/hdfs/namenode-formatted
-if [[ -f ${mark_file} ]] ; then
-  /var/lib/ambari-agent/ambari-sudo.sh rm -f ${mark_file}
-  /var/lib/ambari-agent/ambari-sudo.sh mkdir -p ${mark_dir}
-fi
-
-if [[ ! -d $mark_dir ]] ; then
-  for dir in `echo $name_dirs | tr ',' ' '` ; do
-    echo "NameNode Dirname = $dir"
-    cmd="ls $dir | wc -l  | grep -q ^0$"
-    eval $cmd
-    if [[ $? -ne 0 ]] ; then
-      (( EXIT_CODE = $EXIT_CODE + 1 ))
-      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
-    fi
-  done
-
-  if [[ $EXIT_CODE == 0 ]] ; then
-    /var/lib/ambari-agent/ambari-sudo.sh su ${hdfs_user} - -s /bin/bash -c "export PATH=$PATH:$bin_dir ; yes Y | hdfs --config ${conf_dir} ${command}"
-    (( EXIT_CODE = $EXIT_CODE | $? ))
-  else
-    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
-  fi
-else
-  echo "${mark_dir} exists. Namenode DFS already formatted"
-fi
-
-exit $EXIT_CODE
-

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/fast-hdfs-resource.jar
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/fast-hdfs-resource.jar b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/fast-hdfs-resource.jar
deleted file mode 100755
index c90890b..0000000
Binary files a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/fast-hdfs-resource.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/task-log4j.properties
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/task-log4j.properties b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/task-log4j.properties
deleted file mode 100755
index 7e12962..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/task-log4j.properties
+++ /dev/null
@@ -1,134 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-
-# Define some default values that can be overridden by system properties
-hadoop.root.logger=INFO,console
-hadoop.log.dir=.
-hadoop.log.file=hadoop.log
-
-#
-# Job Summary Appender 
-#
-# Use following logger to send summary to separate file defined by 
-# hadoop.mapreduce.jobsummary.log.file rolled daily:
-# hadoop.mapreduce.jobsummary.logger=INFO,JSA
-# 
-hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
-hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hadoop.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshhold=ALL
-
-#
-# Daily Rolling File Appender
-#
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this 
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-
-#
-# TaskLog Appender
-#
-
-#Default values
-hadoop.tasklog.taskid=null
-hadoop.tasklog.iscleanup=false
-hadoop.tasklog.noKeepSplits=4
-hadoop.tasklog.totalLogFileSize=100
-hadoop.tasklog.purgeLogSplits=true
-hadoop.tasklog.logsRetainHours=12
-
-log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
-log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
-log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
-log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
-
-log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
-log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-
-#
-# Rolling File Appender
-#
-
-#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Logfile size and and 30-day backups
-#log4j.appender.RFA.MaxFileSize=1MB
-#log4j.appender.RFA.MaxBackupIndex=30
-
-#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-# Custom Logging levels
-
-hadoop.metrics.log.level=INFO
-#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
-#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
-#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
-log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
-
-# Jets3t library
-log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
-
-#
-# Null Appender
-# Trap security logger on the hadoop client side
-#
-log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
- 
-# Removes "deprecated" messages
-log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/topology_script.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/topology_script.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/topology_script.py
deleted file mode 100755
index 0f7a55c..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/files/topology_script.py
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/usr/bin/env python
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import sys, os
-from string import join
-import ConfigParser
-
-
-DEFAULT_RACK = "/default-rack"
-DATA_FILE_NAME =  os.path.dirname(os.path.abspath(__file__)) + "/topology_mappings.data"
-SECTION_NAME = "network_topology"
-
-class TopologyScript():
-
-  def load_rack_map(self):
-    try:
-      #RACK_MAP contains both host name vs rack and ip vs rack mappings
-      mappings = ConfigParser.ConfigParser()
-      mappings.read(DATA_FILE_NAME)
-      return dict(mappings.items(SECTION_NAME))
-    except ConfigParser.NoSectionError:
-      return {}
-
-  def get_racks(self, rack_map, args):
-    if len(args) == 1:
-      return DEFAULT_RACK
-    else:
-      return join([self.lookup_by_hostname_or_ip(input_argument, rack_map) for input_argument in args[1:]],)
-
-  def lookup_by_hostname_or_ip(self, hostname_or_ip, rack_map):
-    #try looking up by hostname
-    rack = rack_map.get(hostname_or_ip)
-    if rack is not None:
-      return rack
-    #try looking up by ip
-    rack = rack_map.get(self.extract_ip(hostname_or_ip))
-    #try by localhost since hadoop could be passing in 127.0.0.1 which might not be mapped
-    return rack if rack is not None else rack_map.get("localhost.localdomain", DEFAULT_RACK)
-
-  #strips out port and slashes in case hadoop passes in something like 127.0.0.1/127.0.0.1:50010
-  def extract_ip(self, container_string):
-    return container_string.split("/")[0].split(":")[0]
-
-  def execute(self, args):
-    rack_map = self.load_rack_map()
-    rack = self.get_racks(rack_map, args)
-    print rack
-
-if __name__ == "__main__":
-  TopologyScript().execute(sys.argv)

http://git-wip-us.apache.org/repos/asf/bigtop/blob/0d3448b8/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/hook.py
----------------------------------------------------------------------
diff --git a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/hook.py b/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/hook.py
deleted file mode 100755
index f21e4b1..0000000
--- a/bigtop-packages/src/common/ambari/ODPi/1.0/hooks/before-START/scripts/hook.py
+++ /dev/null
@@ -1,39 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from rack_awareness import create_topology_script_and_mapping
-from shared_initialization import setup_hadoop, setup_configs, create_javahome_symlink
-
-class BeforeStartHook(Hook):
-
-  def hook(self, env):
-    import params
-
-    self.run_custom_hook('before-ANY')
-    env.set_params(params)
-
-    setup_hadoop()
-    setup_configs()
-    create_javahome_symlink()
-    create_topology_script_and_mapping()
-
-if __name__ == "__main__":
-  BeforeStartHook().execute()