You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jo...@apache.org on 2018/08/10 15:30:14 UTC

[ambari] branch trunk updated: [AMBARI-24446] - Removal of Unsupported Stacks (#2010)

This is an automated email from the ASF dual-hosted git repository.

jonathanhurley pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/ambari.git


The following commit(s) were added to refs/heads/trunk by this push:
     new d680af8  [AMBARI-24446] - Removal of Unsupported Stacks (#2010)
d680af8 is described below

commit d680af8057a40ab38ab2866799eb9150283c6a4c
Author: Jonathan Hurley <jo...@apache.org>
AuthorDate: Fri Aug 10 11:30:10 2018 -0400

    [AMBARI-24446] - Removal of Unsupported Stacks (#2010)
---
 .../src/test/python/ambari_agent/TestCheckWebUI.py |    78 -
 .../ACCUMULO/1.6.1.2.2.0/alerts.json               |   139 -
 .../1.6.1.2.2.0/configuration/accumulo-env.xml     |   256 -
 .../1.6.1.2.2.0/configuration/accumulo-log4j.xml   |   115 -
 .../1.6.1.2.2.0/configuration/accumulo-site.xml    |   190 -
 .../ACCUMULO/1.6.1.2.2.0/configuration/client.xml  |    23 -
 .../ACCUMULO/1.6.1.2.2.0/kerberos.json             |    80 -
 .../ACCUMULO/1.6.1.2.2.0/metainfo.xml              |   220 -
 .../ACCUMULO/1.6.1.2.2.0/metrics.json              |  1377 -
 .../1.6.1.2.2.0/package/files/accumulo-metrics.xml |    60 -
 .../1.6.1.2.2.0/package/scripts/__init__.py        |    19 -
 .../1.6.1.2.2.0/package/scripts/accumulo_client.py |    61 -
 .../package/scripts/accumulo_configuration.py      |   384 -
 .../1.6.1.2.2.0/package/scripts/accumulo_gc.py     |    24 -
 .../1.6.1.2.2.0/package/scripts/accumulo_master.py |    24 -
 .../package/scripts/accumulo_monitor.py            |    24 -
 .../1.6.1.2.2.0/package/scripts/accumulo_script.py |   110 -
 .../package/scripts/accumulo_service.py            |    75 -
 .../package/scripts/accumulo_tserver.py            |    24 -
 .../ACCUMULO/1.6.1.2.2.0/package/scripts/params.py |   222 -
 .../1.6.1.2.2.0/package/scripts/service_check.py   |    68 -
 .../1.6.1.2.2.0/package/scripts/status_params.py   |    53 -
 .../package/templates/accumulo_jaas.conf.j2        |    29 -
 .../1.6.1.2.2.0/package/templates/auditLog.xml.j2  |    35 -
 .../ACCUMULO/1.6.1.2.2.0/package/templates/gc.j2   |    20 -
 .../package/templates/generic_logger.xml.j2        |    83 -
 .../hadoop-metrics2-accumulo.properties.j2         |    66 -
 .../1.6.1.2.2.0/package/templates/masters.j2       |    20 -
 .../1.6.1.2.2.0/package/templates/monitor.j2       |    20 -
 .../package/templates/monitor_logger.xml.j2        |    64 -
 .../1.6.1.2.2.0/package/templates/slaves.j2        |    20 -
 .../1.6.1.2.2.0/package/templates/tracers.j2       |    20 -
 .../ACCUMULO/1.6.1.2.2.0/role_command_order.json   |    14 -
 .../ACCUMULO/1.6.1.2.2.0/themes/credentials.json   |    54 -
 .../ACCUMULO/1.6.1.2.2.0/themes/directories.json   |    88 -
 .../AMBARI_INFRA_SOLR/0.1.0/alerts.json            |    37 -
 .../configuration/infra-solr-client-log4j.xml      |    68 -
 .../0.1.0/configuration/infra-solr-env.xml         |   334 -
 .../0.1.0/configuration/infra-solr-log4j.xml       |    56 -
 .../configuration/infra-solr-security-json.xml     |   153 -
 .../0.1.0/configuration/infra-solr-xml.xml         |    35 -
 .../AMBARI_INFRA_SOLR/0.1.0/kerberos.json          |    53 -
 .../AMBARI_INFRA_SOLR/0.1.0/metainfo.xml           |   207 -
 .../0.1.0/package/scripts/collection.py            |   295 -
 .../0.1.0/package/scripts/command_commons.py       |   354 -
 .../0.1.0/package/scripts/infra_solr.py            |   171 -
 .../0.1.0/package/scripts/infra_solr_client.py     |    60 -
 .../0.1.0/package/scripts/migrate.py               |    62 -
 .../0.1.0/package/scripts/params.py                |   219 -
 .../0.1.0/package/scripts/service_check.py         |    48 -
 .../0.1.0/package/scripts/setup_infra_solr.py      |   163 -
 .../0.1.0/package/scripts/status_params.py         |    40 -
 .../package/templates/infra-solr-security.json.j2  |    84 -
 .../0.1.0/package/templates/infra-solr.conf.j2     |    17 -
 .../package/templates/infra_solr_jaas.conf.j2      |    26 -
 .../templates/input.config-ambari-infra.json.j2    |    48 -
 .../0.1.0/properties/infra-solr-env.sh.j2          |   108 -
 .../properties/solr-client-log4j.properties.j2     |    42 -
 .../0.1.0/properties/solr-log4j.properties.j2      |    40 -
 .../AMBARI_INFRA_SOLR/0.1.0/properties/solr.xml.j2 |   122 -
 .../0.1.0/quicklinks/quicklinks.json               |    34 -
 .../0.1.0/role_command_order.json                  |     7 -
 .../AMBARI_INFRA_SOLR/0.1.0/service_advisor.py     |   127 -
 .../0.1.0/themes/directories.json                  |   127 -
 .../AMBARI_INFRA_SOLR/0.1.0/themes/theme.json      |   107 -
 .../AMBARI_METRICS/0.1.0/alerts.json               |   192 -
 .../AMBARI_METRICS/0.1.0/configuration/ams-env.xml |   208 -
 .../0.1.0/configuration/ams-grafana-env.xml        |   102 -
 .../0.1.0/configuration/ams-grafana-ini.xml        |   323 -
 .../0.1.0/configuration/ams-hbase-env.xml          |   297 -
 .../0.1.0/configuration/ams-hbase-log4j.xml        |   188 -
 .../0.1.0/configuration/ams-hbase-policy.xml       |    53 -
 .../configuration/ams-hbase-security-site.xml      |   163 -
 .../0.1.0/configuration/ams-hbase-site.xml         |   633 -
 .../0.1.0/configuration/ams-log4j.xml              |    86 -
 .../0.1.0/configuration/ams-site.xml               |   826 -
 .../0.1.0/configuration/ams-ssl-client.xml         |    42 -
 .../0.1.0/configuration/ams-ssl-server.xml         |    84 -
 .../AMBARI_METRICS/0.1.0/kerberos.json             |   157 -
 .../AMBARI_METRICS/0.1.0/metainfo.xml              |   262 -
 .../AMBARI_METRICS/0.1.0/metrics.json              |  2472 --
 .../package/alerts/alert_ambari_metrics_monitor.py |   104 -
 .../grafana-dashboards/HDF/grafana-kafka-home.json |  1089 -
 .../HDF/grafana-kafka-hosts.json                   |  1991 --
 .../HDF/grafana-kafka-topics.json                  |   521 -
 .../grafana-dashboards/HDF/grafana-nifi-home.json  |   940 -
 .../grafana-dashboards/HDF/grafana-nifi-hosts.json |  1142 -
 .../HDF/grafana-storm-components.json              |   579 -
 .../grafana-dashboards/HDF/grafana-storm-home.json |   501 -
 .../HDF/grafana-storm-kafka-offset.json            |   258 -
 .../HDF/grafana-storm-topology.json                |   894 -
 .../grafana-dashboards/HDP/grafana-druid-home.json |   995 -
 .../HDP/grafana-druid-ingestion.json               |   776 -
 .../HDP/grafana-druid-query.json                   |   858 -
 .../grafana-dashboards/HDP/grafana-hbase-home.json |  2559 --
 .../grafana-dashboards/HDP/grafana-hbase-misc.json |  1812 --
 .../HDP/grafana-hbase-regionservers.json           | 10635 --------
 .../HDP/grafana-hbase-tables.json                  |  2285 --
 .../HDP/grafana-hbase-users.json                   |   565 -
 .../HDP/grafana-hdfs-datanodes.json                |  1322 -
 .../grafana-dashboards/HDP/grafana-hdfs-home.json  |   757 -
 .../HDP/grafana-hdfs-namenodes.json                |  1945 --
 .../grafana-dashboards/HDP/grafana-hdfs-topn.json  |   862 -
 .../grafana-dashboards/HDP/grafana-hdfs-users.json |   270 -
 .../HDP/grafana-hive-hivemetastore.json            |   587 -
 .../HDP/grafana-hive-hiverserver2.json             |   793 -
 .../grafana-dashboards/HDP/grafana-hive-home.json  |  1121 -
 .../HDP/grafana-infra-solr-collections.json        |  2088 --
 .../HDP/grafana-infra-solr-cores.json              |  2113 --
 .../HDP/grafana-infra-solr-hosts.json              |  1184 -
 .../grafana-dashboards/HDP/grafana-kafka-home.json |  1089 -
 .../HDP/grafana-kafka-hosts.json                   |  2007 --
 .../HDP/grafana-kafka-topics.json                  |   521 -
 .../HDP/grafana-llapdaemon-daemons.json            |  2174 --
 .../HDP/grafana-llapdaemon-heatmaps.json           |   364 -
 .../HDP/grafana-llapdaemon-overview.json           |  1815 --
 .../HDP/grafana-logsearch-home.json                |   600 -
 .../HDP/grafana-solr-cores-dashboard.json          |  1597 --
 .../HDP/grafana-solr-hosts-dashboard.json          |   285 -
 .../HDP/grafana-storm-components.json              |   579 -
 .../grafana-dashboards/HDP/grafana-storm-home.json |   501 -
 .../HDP/grafana-storm-kafka-offset.json            |   258 -
 .../HDP/grafana-storm-topology.json                |   894 -
 .../HDP/grafana-yarn-applications.json             |   519 -
 .../grafana-dashboards/HDP/grafana-yarn-home.json  |   636 -
 .../HDP/grafana-yarn-jobhistoryserver.json         |   402 -
 .../HDP/grafana-yarn-nodemanagers.json             |  1827 --
 .../HDP/grafana-yarn-queues.json                   |  1522 --
 .../HDP/grafana-yarn-resourcemanagers.json         |   958 -
 .../HDP/grafana-yarn-timelineserver.json           |   700 -
 .../default/grafana-ambari-server-database.json    |   917 -
 .../default/grafana-ambari-server-topn.json        |   453 -
 .../default/grafana-ambari-server.json             |   744 -
 .../default/grafana-ams-hbase-home.json            |  2558 --
 .../default/grafana-ams-hbase-misc.json            |  1696 --
 .../default/grafana-ams-hbase-regionservers.json   |  9078 -------
 .../default/grafana-system-home.json               |  1768 --
 .../default/grafana-system-servers.json            |  2613 --
 .../0.1.0/package/files/hbaseSmokeVerify.sh        |    34 -
 .../files/service-metrics/AMBARI_METRICS.txt       |   245 -
 .../0.1.0/package/files/service-metrics/FLUME.txt  |    17 -
 .../0.1.0/package/files/service-metrics/HBASE.txt  |   588 -
 .../0.1.0/package/files/service-metrics/HDFS.txt   |   342 -
 .../0.1.0/package/files/service-metrics/HIVE.txt   |   181 -
 .../0.1.0/package/files/service-metrics/HOST.txt   |    55 -
 .../0.1.0/package/files/service-metrics/KAFKA.txt  |  1104 -
 .../0.1.0/package/files/service-metrics/STORM.txt  |     7 -
 .../0.1.0/package/files/service-metrics/YARN.txt   |   178 -
 .../0.1.0/package/scripts/__init__.py              |    19 -
 .../AMBARI_METRICS/0.1.0/package/scripts/ams.py    |   589 -
 .../0.1.0/package/scripts/ams_service.py           |   125 -
 .../0.1.0/package/scripts/functions.py             |    47 -
 .../AMBARI_METRICS/0.1.0/package/scripts/hbase.py  |   282 -
 .../0.1.0/package/scripts/hbase_master.py          |    70 -
 .../0.1.0/package/scripts/hbase_regionserver.py    |    66 -
 .../0.1.0/package/scripts/hbase_service.py         |    55 -
 .../0.1.0/package/scripts/metrics_collector.py     |    90 -
 .../0.1.0/package/scripts/metrics_grafana.py       |    83 -
 .../0.1.0/package/scripts/metrics_grafana_util.py  |   475 -
 .../0.1.0/package/scripts/metrics_monitor.py       |    79 -
 .../AMBARI_METRICS/0.1.0/package/scripts/params.py |   421 -
 .../0.1.0/package/scripts/params_linux.py          |    70 -
 .../0.1.0/package/scripts/params_windows.py        |    62 -
 .../0.1.0/package/scripts/service_check.py         |   305 -
 .../0.1.0/package/scripts/service_mapping.py       |    22 -
 .../0.1.0/package/scripts/split_points.py          |   246 -
 .../AMBARI_METRICS/0.1.0/package/scripts/status.py |    54 -
 .../0.1.0/package/scripts/status_params.py         |    45 -
 .../0.1.0/package/templates/ams.conf.j2            |    35 -
 .../package/templates/ams_collector_jaas.conf.j2   |    26 -
 .../package/templates/ams_zookeeper_jaas.conf.j2   |    26 -
 .../templates/hadoop-metrics2-hbase.properties.j2  |    62 -
 .../package/templates/hbase_client_jaas.conf.j2    |    23 -
 .../package/templates/hbase_grant_permissions.j2   |    39 -
 .../package/templates/hbase_master_jaas.conf.j2    |    36 -
 .../templates/hbase_regionserver_jaas.conf.j2      |    36 -
 .../0.1.0/package/templates/metric_groups.conf.j2  |    37 -
 .../0.1.0/package/templates/metric_monitor.ini.j2  |    52 -
 .../templates/metrics_grafana_datasource.json.j2   |    33 -
 .../0.1.0/package/templates/regionservers.j2       |    20 -
 .../package/templates/smoketest_metrics.json.j2    |    14 -
 .../0.1.0/quicklinks/quicklinks.json               |    34 -
 .../AMBARI_METRICS/0.1.0/role_command_order.json   |     9 -
 .../AMBARI_METRICS/0.1.0/service_advisor.py        |   766 -
 .../AMBARI_METRICS/0.1.0/themes/credentials.json   |    42 -
 .../AMBARI_METRICS/0.1.0/themes/directories.json   |   247 -
 .../AMBARI_METRICS/0.1.0/themes/theme.json         |   288 -
 .../common-services/ATLAS/0.1.0.2.3/alerts.json    |    39 -
 .../configuration/application-properties.xml       |   166 -
 .../ATLAS/0.1.0.2.3/configuration/atlas-env.xml    |   156 -
 .../ATLAS/0.1.0.2.3/configuration/atlas-log4j.xml  |   153 -
 .../common-services/ATLAS/0.1.0.2.3/kerberos.json  |    65 -
 .../common-services/ATLAS/0.1.0.2.3/metainfo.xml   |   117 -
 .../0.1.0.2.3/package/scripts/atlas_client.py      |    53 -
 .../ATLAS/0.1.0.2.3/package/scripts/metadata.py    |   265 -
 .../0.1.0.2.3/package/scripts/metadata_server.py   |   182 -
 .../ATLAS/0.1.0.2.3/package/scripts/params.py      |   423 -
 .../0.1.0.2.3/package/scripts/service_check.py     |    55 -
 .../package/scripts/setup_ranger_atlas.py          |    70 -
 .../0.1.0.2.3/package/scripts/status_params.py     |    60 -
 .../package/templates/atlas_hbase_setup.rb.j2      |    42 -
 .../0.1.0.2.3/package/templates/atlas_jaas.conf.j2 |    26 -
 .../package/templates/atlas_kafka_acl.sh.j2        |    41 -
 .../package/templates/input.config-atlas.json.j2   |    48 -
 .../0.1.0.2.3/package/templates/kafka_jaas.conf.j2 |    41 -
 .../ATLAS/0.1.0.2.3/role_command_order.json        |     7 -
 .../configuration/application-properties.xml       |   481 -
 .../ATLAS/0.7.0.2.5/configuration/atlas-log4j.xml  |   166 -
 .../0.7.0.2.5/configuration/atlas-solrconfig.xml   |   641 -
 .../common-services/ATLAS/0.7.0.2.5/kerberos.json  |   105 -
 .../common-services/ATLAS/0.7.0.2.5/metainfo.xml   |    79 -
 .../ATLAS/0.7.0.2.5/quicklinks/quicklinks.json     |    36 -
 .../ATLAS/0.7.0.2.5/role_command_order.json        |     7 -
 .../ATLAS/0.7.0.2.5/themes/directories.json        |   127 -
 .../ATLAS/0.7.0.2.5/themes/theme.json              |   619 -
 .../DRUID/0.10.1/configuration/druid-broker.xml    |   106 -
 .../DRUID/0.10.1/configuration/druid-common.xml    |   270 -
 .../0.10.1/configuration/druid-coordinator.xml     |    43 -
 .../DRUID/0.10.1/configuration/druid-env.xml       |   248 -
 .../0.10.1/configuration/druid-historical.xml      |    94 -
 .../DRUID/0.10.1/configuration/druid-log4j.xml     |    84 -
 .../DRUID/0.10.1/configuration/druid-logrotate.xml |    68 -
 .../0.10.1/configuration/druid-middlemanager.xml   |   122 -
 .../DRUID/0.10.1/configuration/druid-overlord.xml  |    52 -
 .../DRUID/0.10.1/configuration/druid-router.xml    |    59 -
 .../common-services/DRUID/0.10.1/metainfo.xml      |   223 -
 .../DRUID/0.10.1/package/scripts/broker.py         |    28 -
 .../DRUID/0.10.1/package/scripts/coordinator.py    |    28 -
 .../DRUID/0.10.1/package/scripts/druid.py          |   306 -
 .../DRUID/0.10.1/package/scripts/druid_node.py     |   115 -
 .../DRUID/0.10.1/package/scripts/historical.py     |    28 -
 .../DRUID/0.10.1/package/scripts/middlemanager.py  |    28 -
 .../DRUID/0.10.1/package/scripts/overlord.py       |    28 -
 .../DRUID/0.10.1/package/scripts/params.py         |   200 -
 .../DRUID/0.10.1/package/scripts/router.py         |    28 -
 .../DRUID/0.10.1/package/scripts/service_check.py  |    44 -
 .../DRUID/0.10.1/package/scripts/status_params.py  |    24 -
 .../DRUID/0.10.1/quicklinks/quicklinks.json        |    37 -
 .../DRUID/0.10.1/role_command_order.json           |    17 -
 .../common-services/DRUID/0.10.1/themes/theme.json |   120 -
 .../common-services/FALCON/0.5.0.2.1/alerts.json   |    61 -
 .../configuration/falcon-client.properties.xml     |    29 -
 .../FALCON/0.5.0.2.1/configuration/falcon-env.xml  |   205 -
 .../0.5.0.2.1/configuration/falcon-log4j.xml       |   208 -
 .../configuration/falcon-runtime.properties.xml    |    53 -
 .../configuration/falcon-startup.properties.xml    |   251 -
 .../common-services/FALCON/0.5.0.2.1/kerberos.json |    72 -
 .../common-services/FALCON/0.5.0.2.1/metainfo.xml  |   151 -
 .../FALCON/0.5.0.2.1/package/scripts/falcon.py     |   298 -
 .../0.5.0.2.1/package/scripts/falcon_client.py     |    67 -
 .../0.5.0.2.1/package/scripts/falcon_server.py     |   116 -
 .../package/scripts/falcon_server_upgrade.py       |    87 -
 .../FALCON/0.5.0.2.1/package/scripts/params.py     |    31 -
 .../0.5.0.2.1/package/scripts/params_linux.py      |   208 -
 .../0.5.0.2.1/package/scripts/params_windows.py    |    46 -
 .../0.5.0.2.1/package/scripts/service_check.py     |    55 -
 .../0.5.0.2.1/package/scripts/status_params.py     |    67 -
 .../package/templates/client.properties.j2         |    42 -
 .../package/templates/input.config-falcon.json.j2  |    48 -
 .../package/templates/runtime.properties.j2        |    50 -
 .../FALCON/0.5.0.2.1/quicklinks/quicklinks.json    |    36 -
 .../FALCON/0.5.0.2.1/role_command_order.json       |     7 -
 .../FALCON/0.5.0.2.1/themes/directories.json       |   137 -
 .../common-services/FLUME/1.4.0.2.0/alerts.json    |    27 -
 .../FLUME/1.4.0.2.0/configuration/flume-conf.xml   |    39 -
 .../FLUME/1.4.0.2.0/configuration/flume-env.xml    |   112 -
 .../FLUME/1.4.0.2.0/configuration/flume-log4j.xml  |    96 -
 .../common-services/FLUME/1.4.0.2.0/kerberos.json  |    44 -
 .../common-services/FLUME/1.4.0.2.0/metainfo.xml   |    75 -
 .../common-services/FLUME/1.4.0.2.0/metrics.json   |   367 -
 .../package/alerts/alert_flume_agent_status.py     |   106 -
 .../FLUME/1.4.0.2.0/package/scripts/flume.py       |   311 -
 .../FLUME/1.4.0.2.0/package/scripts/flume_check.py |    51 -
 .../1.4.0.2.0/package/scripts/flume_handler.py     |   126 -
 .../FLUME/1.4.0.2.0/package/scripts/params.py      |   151 -
 .../1.4.0.2.0/package/scripts/params_linux.py      |    45 -
 .../1.4.0.2.0/package/scripts/params_windows.py    |    33 -
 .../1.4.0.2.0/package/scripts/service_mapping.py   |    21 -
 .../package/templates/flume-metrics2.properties.j2 |    38 -
 .../1.4.0.2.0/package/templates/flume.conf.j2      |    24 -
 .../package/templates/input.config-flume.json.j2   |    53 -
 .../package/templates/log4j.properties.j2          |    67 -
 .../common-services/GANGLIA/3.5.0/alerts.json      |   137 -
 .../GANGLIA/3.5.0/configuration/ganglia-env.xml    |    85 -
 .../common-services/GANGLIA/3.5.0/metainfo.xml     |   129 -
 .../GANGLIA/3.5.0/package/files/checkGmetad.sh     |    37 -
 .../GANGLIA/3.5.0/package/files/checkGmond.sh      |    62 -
 .../GANGLIA/3.5.0/package/files/checkRrdcached.sh  |    34 -
 .../GANGLIA/3.5.0/package/files/gmetad.init        |    73 -
 .../GANGLIA/3.5.0/package/files/gmetadLib.sh       |   204 -
 .../GANGLIA/3.5.0/package/files/gmond.init         |    73 -
 .../GANGLIA/3.5.0/package/files/gmondLib.sh        |   538 -
 .../GANGLIA/3.5.0/package/files/rrdcachedLib.sh    |    47 -
 .../GANGLIA/3.5.0/package/files/setupGanglia.sh    |   145 -
 .../GANGLIA/3.5.0/package/files/startGmetad.sh     |    68 -
 .../GANGLIA/3.5.0/package/files/startGmond.sh      |    85 -
 .../GANGLIA/3.5.0/package/files/startRrdcached.sh  |    79 -
 .../GANGLIA/3.5.0/package/files/stopGmetad.sh      |    43 -
 .../GANGLIA/3.5.0/package/files/stopGmond.sh       |    54 -
 .../GANGLIA/3.5.0/package/files/stopRrdcached.sh   |    41 -
 .../GANGLIA/3.5.0/package/files/teardownGanglia.sh |    28 -
 .../GANGLIA/3.5.0/package/scripts/functions.py     |    40 -
 .../GANGLIA/3.5.0/package/scripts/ganglia.py       |   101 -
 .../3.5.0/package/scripts/ganglia_monitor.py       |   245 -
 .../package/scripts/ganglia_monitor_service.py     |    28 -
 .../3.5.0/package/scripts/ganglia_server.py        |   130 -
 .../package/scripts/ganglia_server_service.py      |    28 -
 .../GANGLIA/3.5.0/package/scripts/params.py        |   166 -
 .../GANGLIA/3.5.0/package/scripts/status_params.py |    25 -
 .../3.5.0/package/templates/ganglia.conf.j2        |    34 -
 .../package/templates/gangliaClusters.conf.j2      |    43 -
 .../3.5.0/package/templates/gangliaEnv.sh.j2       |    46 -
 .../3.5.0/package/templates/gangliaLib.sh.j2       |    86 -
 .../GANGLIA/3.5.0/package/templates/rrd.py.j2      |   361 -
 .../common-services/HAWQ/2.0.0/alerts.json         |   125 -
 .../HAWQ/2.0.0/configuration/hawq-check-env.xml    |    94 -
 .../HAWQ/2.0.0/configuration/hawq-env.xml          |    61 -
 .../HAWQ/2.0.0/configuration/hawq-limits-env.xml   |    75 -
 .../HAWQ/2.0.0/configuration/hawq-site.xml         |   583 -
 .../HAWQ/2.0.0/configuration/hawq-sysctl-env.xml   |   360 -
 .../HAWQ/2.0.0/configuration/hdfs-client.xml       |   471 -
 .../HAWQ/2.0.0/configuration/yarn-client.xml       |   155 -
 .../common-services/HAWQ/2.0.0/kerberos.json       |    74 -
 .../common-services/HAWQ/2.0.0/metainfo.xml        |   201 -
 .../common-services/HAWQ/2.0.0/metrics.json        |   109 -
 .../2.0.0/package/alerts/alert_component_status.py |    76 -
 .../alerts/alert_segment_registration_status.py    |   119 -
 .../HAWQ/2.0.0/package/alerts/alert_sync_status.py |    96 -
 .../HAWQ/2.0.0/package/scripts/__init__.py         |    19 -
 .../HAWQ/2.0.0/package/scripts/common.py           |   348 -
 .../HAWQ/2.0.0/package/scripts/custom_params.py    |    21 -
 .../HAWQ/2.0.0/package/scripts/hawq_constants.py   |    89 -
 .../HAWQ/2.0.0/package/scripts/hawqmaster.py       |   126 -
 .../HAWQ/2.0.0/package/scripts/hawqsegment.py      |    63 -
 .../HAWQ/2.0.0/package/scripts/hawqstandby.py      |    72 -
 .../HAWQ/2.0.0/package/scripts/hawqstatus.py       |    33 -
 .../HAWQ/2.0.0/package/scripts/master_helper.py    |    76 -
 .../HAWQ/2.0.0/package/scripts/params.py           |   158 -
 .../HAWQ/2.0.0/package/scripts/service_check.py    |   171 -
 .../HAWQ/2.0.0/package/scripts/utils.py            |   107 -
 .../common-services/HAWQ/2.0.0/service_advisor.py  |   437 -
 .../common-services/HAWQ/2.0.0/themes/theme.json   |   285 -
 .../common-services/HAWQ/2.0.0/widgets.json        |   187 -
 .../common-services/HBASE/0.96.0.2.0/alerts.json   |   127 -
 .../HBASE/0.96.0.2.0/configuration/hbase-env.xml   |   254 -
 .../HBASE/0.96.0.2.0/configuration/hbase-log4j.xml |   188 -
 .../0.96.0.2.0/configuration/hbase-policy.xml      |    53 -
 .../HBASE/0.96.0.2.0/configuration/hbase-site.xml  |   588 -
 .../common-services/HBASE/0.96.0.2.0/kerberos.json |   124 -
 .../common-services/HBASE/0.96.0.2.0/metainfo.xml  |   179 -
 .../common-services/HBASE/0.96.0.2.0/metrics.json  |  9424 -------
 .../0.96.0.2.0/package/files/draining_servers.rb   |   164 -
 .../package/files/hbase-smoke-cleanup.sh           |    23 -
 .../0.96.0.2.0/package/files/hbaseSmokeVerify.sh   |    34 -
 .../HBASE/0.96.0.2.0/package/scripts/__init__.py   |    19 -
 .../HBASE/0.96.0.2.0/package/scripts/functions.py  |    54 -
 .../HBASE/0.96.0.2.0/package/scripts/hbase.py      |   252 -
 .../0.96.0.2.0/package/scripts/hbase_client.py     |    67 -
 .../package/scripts/hbase_decommission.py          |    94 -
 .../0.96.0.2.0/package/scripts/hbase_master.py     |   111 -
 .../package/scripts/hbase_regionserver.py          |   122 -
 .../0.96.0.2.0/package/scripts/hbase_service.py    |    67 -
 .../0.96.0.2.0/package/scripts/hbase_upgrade.py    |    42 -
 .../HBASE/0.96.0.2.0/package/scripts/params.py     |    28 -
 .../0.96.0.2.0/package/scripts/params_linux.py     |   457 -
 .../0.96.0.2.0/package/scripts/params_windows.py   |    43 -
 .../package/scripts/phoenix_queryserver.py         |    86 -
 .../0.96.0.2.0/package/scripts/phoenix_service.py  |    56 -
 .../0.96.0.2.0/package/scripts/service_check.py    |    99 -
 .../package/scripts/setup_ranger_hbase.py          |   106 -
 .../0.96.0.2.0/package/scripts/status_params.py    |    68 -
 .../HBASE/0.96.0.2.0/package/scripts/upgrade.py    |   105 -
 ...oop-metrics2-hbase.properties-GANGLIA-MASTER.j2 |   135 -
 .../hadoop-metrics2-hbase.properties-GANGLIA-RS.j2 |   133 -
 .../0.96.0.2.0/package/templates/hbase-smoke.sh.j2 |    44 -
 .../0.96.0.2.0/package/templates/hbase.conf.j2     |    35 -
 .../package/templates/hbase_client_jaas.conf.j2    |    23 -
 .../package/templates/hbase_grant_permissions.j2   |    39 -
 .../package/templates/hbase_master_jaas.conf.j2    |    36 -
 .../templates/hbase_queryserver_jaas.conf.j2       |    26 -
 .../templates/hbase_regionserver_jaas.conf.j2      |    36 -
 .../package/templates/input.config-hbase.json.j2   |    79 -
 .../0.96.0.2.0/package/templates/regionservers.j2  |    20 -
 .../HBASE/0.96.0.2.0/role_command_order.json       |     9 -
 .../HBASE/0.96.0.2.0/themes/directories.json       |   167 -
 .../common-services/HBASE/0.96.0.2.0/widgets.json  |   504 -
 .../common-services/HDFS/2.1.0.2.0/alerts.json     |  1786 --
 .../HDFS/2.1.0.2.0/configuration/core-site.xml     |   210 -
 .../HDFS/2.1.0.2.0/configuration/hadoop-env.xml    |   398 -
 .../HDFS/2.1.0.2.0/configuration/hadoop-policy.xml |   130 -
 .../HDFS/2.1.0.2.0/configuration/hdfs-log4j.xml    |   249 -
 .../HDFS/2.1.0.2.0/configuration/hdfs-site.xml     |   517 -
 .../HDFS/2.1.0.2.0/configuration/ssl-client.xml    |    70 -
 .../HDFS/2.1.0.2.0/configuration/ssl-server.xml    |    80 -
 .../common-services/HDFS/2.1.0.2.0/kerberos.json   |   233 -
 .../common-services/HDFS/2.1.0.2.0/metainfo.xml    |   372 -
 .../common-services/HDFS/2.1.0.2.0/metrics.json    |  7910 ------
 .../package/alerts/alert_checkpoint_time.py        |   255 -
 .../alerts/alert_datanode_unmounted_data_dir.py    |   178 -
 .../package/alerts/alert_ha_namenode_health.py     |   243 -
 .../package/alerts/alert_metrics_deviation.py      |   498 -
 .../package/alerts/alert_upgrade_finalized.py      |   179 -
 .../HDFS/2.1.0.2.0/package/files/checkWebUI.py     |    86 -
 .../HDFS/2.1.0.2.0/package/scripts/__init__.py     |    20 -
 .../scripts/balancer-emulator/balancer-err.log     |  1032 -
 .../package/scripts/balancer-emulator/balancer.log |    29 -
 .../scripts/balancer-emulator/hdfs-command.py      |    45 -
 .../HDFS/2.1.0.2.0/package/scripts/datanode.py     |   172 -
 .../2.1.0.2.0/package/scripts/datanode_upgrade.py  |   120 -
 .../HDFS/2.1.0.2.0/package/scripts/hdfs.py         |   251 -
 .../HDFS/2.1.0.2.0/package/scripts/hdfs_client.py  |    78 -
 .../2.1.0.2.0/package/scripts/hdfs_datanode.py     |    85 -
 .../2.1.0.2.0/package/scripts/hdfs_namenode.py     |   638 -
 .../2.1.0.2.0/package/scripts/hdfs_nfsgateway.py   |    76 -
 .../2.1.0.2.0/package/scripts/hdfs_rebalance.py    |   160 -
 .../2.1.0.2.0/package/scripts/hdfs_snamenode.py    |    72 -
 .../2.1.0.2.0/package/scripts/install_params.py    |    29 -
 .../HDFS/2.1.0.2.0/package/scripts/journalnode.py  |   143 -
 .../package/scripts/journalnode_upgrade.py         |   152 -
 .../HDFS/2.1.0.2.0/package/scripts/namenode.py     |   408 -
 .../2.1.0.2.0/package/scripts/namenode_ha_state.py |   223 -
 .../2.1.0.2.0/package/scripts/namenode_upgrade.py  |   322 -
 .../HDFS/2.1.0.2.0/package/scripts/nfsgateway.py   |    87 -
 .../HDFS/2.1.0.2.0/package/scripts/params.py       |    29 -
 .../HDFS/2.1.0.2.0/package/scripts/params_linux.py |   578 -
 .../2.1.0.2.0/package/scripts/params_windows.py    |    86 -
 .../2.1.0.2.0/package/scripts/service_check.py     |   159 -
 .../2.1.0.2.0/package/scripts/setup_ranger_hdfs.py |   119 -
 .../HDFS/2.1.0.2.0/package/scripts/snamenode.py    |   101 -
 .../2.1.0.2.0/package/scripts/status_params.py     |    58 -
 .../HDFS/2.1.0.2.0/package/scripts/utils.py        |   431 -
 .../HDFS/2.1.0.2.0/package/scripts/zkfc_slave.py   |   194 -
 .../package/templates/exclude_hosts_list.j2        |    21 -
 .../HDFS/2.1.0.2.0/package/templates/hdfs.conf.j2  |    35 -
 .../package/templates/hdfs_dn_jaas.conf.j2         |    27 -
 .../2.1.0.2.0/package/templates/hdfs_jaas.conf.j2  |    27 -
 .../package/templates/hdfs_jn_jaas.conf.j2         |    27 -
 .../package/templates/hdfs_nn_jaas.conf.j2         |    27 -
 .../package/templates/include_hosts_list.j2        |    21 -
 .../HDFS/2.1.0.2.0/package/templates/slaves.j2     |    21 -
 .../HDFS/2.1.0.2.0/role_command_order.json         |    22 -
 .../HDFS/2.1.0.2.0/themes/directories.json         |   177 -
 .../common-services/HDFS/2.1.0.2.0/widgets.json    |   456 -
 .../common-services/HIVE/0.12.0.2.0/alerts.json    |   232 -
 .../HIVE/0.12.0.2.0/configuration/hcat-env.xml     |    60 -
 .../HIVE/0.12.0.2.0/configuration/hive-env.xml     |   282 -
 .../0.12.0.2.0/configuration/hive-exec-log4j.xml   |   114 -
 .../HIVE/0.12.0.2.0/configuration/hive-log4j.xml   |   147 -
 .../HIVE/0.12.0.2.0/configuration/hive-site.xml    |   560 -
 .../HIVE/0.12.0.2.0/configuration/webhcat-env.xml  |    57 -
 .../0.12.0.2.0/configuration/webhcat-log4j.xml     |   104 -
 .../HIVE/0.12.0.2.0/configuration/webhcat-site.xml |   161 -
 .../0.12.0.2.0/etc/hive-schema-0.12.0.mysql.sql    |   777 -
 .../0.12.0.2.0/etc/hive-schema-0.12.0.oracle.sql   |   718 -
 .../0.12.0.2.0/etc/hive-schema-0.12.0.postgres.sql |  1406 -
 .../common-services/HIVE/0.12.0.2.0/kerberos.json  |   121 -
 .../common-services/HIVE/0.12.0.2.0/metainfo.xml   |   367 -
 .../alerts/alert_hive_interactive_thrift_port.py   |   230 -
 .../package/alerts/alert_hive_metastore.py         |   276 -
 .../package/alerts/alert_hive_thrift_port.py       |   286 -
 .../package/alerts/alert_llap_app_status.py        |   303 -
 .../package/alerts/alert_webhcat_server.py         |   228 -
 .../package/etc/hive-schema-0.12.0.mysql.sql       |   777 -
 .../package/etc/hive-schema-0.12.0.oracle.sql      |   718 -
 .../package/etc/hive-schema-0.12.0.postgres.sql    |  1406 -
 .../HIVE/0.12.0.2.0/package/files/addMysqlUser.sh  |    39 -
 .../HIVE/0.12.0.2.0/package/files/hcatSmoke.sh     |    41 -
 .../HIVE/0.12.0.2.0/package/files/hiveSmoke.sh     |    24 -
 .../HIVE/0.12.0.2.0/package/files/hiveTezSetup.cmd |    58 -
 .../HIVE/0.12.0.2.0/package/files/hiveserver2.sql  |    23 -
 .../0.12.0.2.0/package/files/hiveserver2Smoke.sh   |    32 -
 .../HIVE/0.12.0.2.0/package/files/pigSmoke.sh      |    18 -
 .../0.12.0.2.0/package/files/removeMysqlUser.sh    |    33 -
 .../0.12.0.2.0/package/files/startMetastore.sh     |    25 -
 .../0.12.0.2.0/package/files/templetonSmoke.sh     |   101 -
 .../HIVE/0.12.0.2.0/package/scripts/__init__.py    |    19 -
 .../HIVE/0.12.0.2.0/package/scripts/hcat.py        |    87 -
 .../HIVE/0.12.0.2.0/package/scripts/hcat_client.py |    79 -
 .../package/scripts/hcat_service_check.py          |    89 -
 .../HIVE/0.12.0.2.0/package/scripts/hive.py        |   572 -
 .../HIVE/0.12.0.2.0/package/scripts/hive_client.py |    63 -
 .../0.12.0.2.0/package/scripts/hive_interactive.py |   359 -
 .../0.12.0.2.0/package/scripts/hive_metastore.py   |   211 -
 .../HIVE/0.12.0.2.0/package/scripts/hive_server.py |   164 -
 .../package/scripts/hive_server_interactive.py     |   626 -
 .../package/scripts/hive_server_upgrade.py         |   134 -
 .../0.12.0.2.0/package/scripts/hive_service.py     |   186 -
 .../package/scripts/hive_service_interactive.py    |   109 -
 .../0.12.0.2.0/package/scripts/mysql_server.py     |    63 -
 .../0.12.0.2.0/package/scripts/mysql_service.py    |    65 -
 .../HIVE/0.12.0.2.0/package/scripts/mysql_users.py |    77 -
 .../HIVE/0.12.0.2.0/package/scripts/mysql_utils.py |    35 -
 .../HIVE/0.12.0.2.0/package/scripts/params.py      |    30 -
 .../0.12.0.2.0/package/scripts/params_linux.py     |   885 -
 .../0.12.0.2.0/package/scripts/params_windows.py   |    75 -
 .../HIVE/0.12.0.2.0/package/scripts/pre_upgrade.py |   111 -
 .../0.12.0.2.0/package/scripts/service_check.py    |   193 -
 .../package/scripts/setup_ranger_hive.py           |   156 -
 .../scripts/setup_ranger_hive_interactive.py       |    77 -
 .../0.12.0.2.0/package/scripts/status_params.py    |   118 -
 .../HIVE/0.12.0.2.0/package/scripts/webhcat.py     |   139 -
 .../0.12.0.2.0/package/scripts/webhcat_server.py   |    94 -
 .../0.12.0.2.0/package/scripts/webhcat_service.py  |   100 -
 .../package/scripts/webhcat_service_check.py       |   133 -
 .../hadoop-metrics2-hivemetastore.properties.j2    |    64 -
 .../hadoop-metrics2-hiveserver2.properties.j2      |    64 -
 .../templates/hadoop-metrics2-llapdaemon.j2        |    63 -
 .../templates/hadoop-metrics2-llaptaskscheduler.j2 |    63 -
 .../HIVE/0.12.0.2.0/package/templates/hive.conf.j2 |    35 -
 .../package/templates/input.config-hive.json.j2    |    85 -
 .../package/templates/startHiveserver2.sh.j2       |    24 -
 .../templates/startHiveserver2Interactive.sh.j2    |    24 -
 .../package/templates/templeton_smoke.pig.j2       |    24 -
 .../package/templates/zkmigrator_jaas.conf.j2      |    26 -
 .../HIVE/0.12.0.2.0/role_command_order.json        |    14 -
 .../HIVE/0.12.0.2.0/themes/credentials.json        |    43 -
 .../HIVE/0.12.0.2.0/themes/database.json           |   157 -
 .../HIVE/0.12.0.2.0/themes/directories.json        |   197 -
 .../KAFKA/0.10.0/configuration/kafka-broker.xml    |    39 -
 .../configuration/kafka_client_jaas_conf.xml       |    43 -
 .../KAFKA/0.10.0/configuration/kafka_jaas_conf.xml |   116 -
 .../0.10.0/configuration/ranger-kafka-audit.xml    |    58 -
 .../common-services/KAFKA/0.10.0/kerberos.json     |    82 -
 .../common-services/KAFKA/0.10.0/metainfo.xml      |    28 -
 .../common-services/KAFKA/0.8.1/alerts.json        |    32 -
 .../KAFKA/0.8.1/configuration/kafka-broker.xml     |   432 -
 .../KAFKA/0.8.1/configuration/kafka-env.xml        |   122 -
 .../KAFKA/0.8.1/configuration/kafka-log4j.xml      |   170 -
 .../0.8.1/configuration/kafka_client_jaas_conf.xml |    41 -
 .../KAFKA/0.8.1/configuration/kafka_jaas_conf.xml  |    70 -
 .../common-services/KAFKA/0.8.1/metainfo.xml       |   118 -
 .../common-services/KAFKA/0.8.1/metrics.json       |   264 -
 .../KAFKA/0.8.1/package/scripts/kafka.py           |   264 -
 .../KAFKA/0.8.1/package/scripts/kafka_broker.py    |   148 -
 .../KAFKA/0.8.1/package/scripts/params.py          |   357 -
 .../KAFKA/0.8.1/package/scripts/service_check.py   |    81 -
 .../0.8.1/package/scripts/setup_ranger_kafka.py    |    99 -
 .../KAFKA/0.8.1/package/scripts/status_params.py   |    26 -
 .../KAFKA/0.8.1/package/scripts/upgrade.py         |    72 -
 .../KAFKA/0.8.1/package/scripts/utils.py           |    38 -
 .../package/templates/input.config-kafka.json.j2   |    92 -
 .../KAFKA/0.8.1/package/templates/kafka.conf.j2    |    35 -
 .../package/templates/kafka_client_jaas.conf.j2    |    32 -
 .../0.8.1/package/templates/kafka_jaas.conf.j2     |    99 -
 .../package/templates/tools-log4j.properties.j2    |    21 -
 .../KAFKA/0.8.1/role_command_order.json            |     7 -
 .../KAFKA/0.8.1/themes/directories.json            |    98 -
 .../common-services/KAFKA/0.9.0/alerts.json        |    32 -
 .../KAFKA/0.9.0/configuration/kafka-broker.xml     |   168 -
 .../KAFKA/0.9.0/configuration/kafka-env.xml        |    28 -
 .../0.9.0/configuration/ranger-kafka-audit.xml     |   177 -
 .../ranger-kafka-plugin-properties.xml             |    94 -
 .../configuration/ranger-kafka-policymgr-ssl.xml   |    66 -
 .../0.9.0/configuration/ranger-kafka-security.xml  |    64 -
 .../common-services/KAFKA/0.9.0/kerberos.json      |    62 -
 .../common-services/KAFKA/0.9.0/metainfo.xml       |    30 -
 .../common-services/KAFKA/0.9.0/metrics.json       |   239 -
 .../KAFKA/0.9.0/role_command_order.json            |     6 -
 .../common-services/KAFKA/0.9.0/widgets.json       |   182 -
 .../1.10.3-10/configuration/kerberos-env.xml       |   391 -
 .../KERBEROS/1.10.3-10/configuration/krb5-conf.xml |    85 -
 .../KERBEROS/1.10.3-10/kerberos.json               |    18 -
 .../KERBEROS/1.10.3-10/metainfo.xml                |   131 -
 .../1.10.3-10/package/scripts/kerberos_client.py   |    91 -
 .../KERBEROS/1.10.3-10/package/scripts/params.py   |   207 -
 .../1.10.3-10/package/scripts/service_check.py     |    86 -
 .../1.10.3-10/package/scripts/status_params.py     |    34 -
 .../KERBEROS/1.10.3-10/properties/krb5_conf.j2     |    63 -
 .../1.10.3-30/configuration/kerberos-env.xml       |   391 -
 .../KERBEROS/1.10.3-30/configuration/krb5-conf.xml |    85 -
 .../KERBEROS/1.10.3-30/kerberos.json               |    18 -
 .../KERBEROS/1.10.3-30/metainfo.xml                |   131 -
 .../1.10.3-30/package/scripts/kerberos_client.py   |    91 -
 .../KERBEROS/1.10.3-30/package/scripts/params.py   |   206 -
 .../1.10.3-30/package/scripts/service_check.py     |    86 -
 .../1.10.3-30/package/scripts/status_params.py     |    34 -
 .../KERBEROS/1.10.3-30/properties/krb5_conf.j2     |    63 -
 .../common-services/KNOX/0.5.0.2.2/alerts.json     |    32 -
 .../0.5.0.2.2/configuration/admin-topology.xml     |    97 -
 .../KNOX/0.5.0.2.2/configuration/gateway-log4j.xml |   110 -
 .../KNOX/0.5.0.2.2/configuration/gateway-site.xml  |    72 -
 .../KNOX/0.5.0.2.2/configuration/knox-env.xml      |    93 -
 .../KNOX/0.5.0.2.2/configuration/ldap-log4j.xml    |    93 -
 .../ranger-knox-plugin-properties.xml              |   268 -
 .../KNOX/0.5.0.2.2/configuration/topology.xml      |   133 -
 .../KNOX/0.5.0.2.2/configuration/users-ldif.xml    |   140 -
 .../common-services/KNOX/0.5.0.2.2/kerberos.json   |    63 -
 .../common-services/KNOX/0.5.0.2.2/metainfo.xml    |   100 -
 .../0.5.0.2.2/package/files/validateKnoxStatus.py  |    43 -
 .../KNOX/0.5.0.2.2/package/scripts/knox.py         |   192 -
 .../KNOX/0.5.0.2.2/package/scripts/knox_gateway.py |   215 -
 .../KNOX/0.5.0.2.2/package/scripts/knox_ldap.py    |    59 -
 .../KNOX/0.5.0.2.2/package/scripts/params.py       |    29 -
 .../KNOX/0.5.0.2.2/package/scripts/params_linux.py |   570 -
 .../0.5.0.2.2/package/scripts/params_windows.py    |    71 -
 .../0.5.0.2.2/package/scripts/service_check.py     |    96 -
 .../0.5.0.2.2/package/scripts/setup_ranger_knox.py |   129 -
 .../0.5.0.2.2/package/scripts/status_params.py     |    59 -
 .../KNOX/0.5.0.2.2/package/scripts/upgrade.py      |   118 -
 .../package/templates/input.config-knox.json.j2    |    60 -
 .../package/templates/krb5JAASLogin.conf.j2        |    27 -
 .../KNOX/0.5.0.2.2/role_command_order.json         |     7 -
 .../KNOX/0.5.0.2.2/themes/credentials.json         |    38 -
 .../KNOX/0.5.0.2.2/themes/directories.json         |    88 -
 .../common-services/LOGSEARCH/0.5.0/alerts.json    |    77 -
 .../configuration/logfeeder-ambari-config.xml      |    38 -
 .../0.5.0/configuration/logfeeder-env.xml          |   187 -
 .../0.5.0/configuration/logfeeder-grok.xml         |    49 -
 .../logfeeder-kafka-output-config.xml              |    61 -
 .../0.5.0/configuration/logfeeder-log4j.xml        |    78 -
 .../configuration/logfeeder-output-config.xml      |    37 -
 .../0.5.0/configuration/logfeeder-properties.xml   |   145 -
 .../configuration/logfeeder-system_log-env.xml     |    61 -
 .../0.5.0/configuration/logsearch-admin-json.xml   |    54 -
 .../logsearch-audit_logs-solrconfig.xml            |    62 -
 .../0.5.0/configuration/logsearch-common-env.xml   |   124 -
 .../configuration/logsearch-common-properties.xml  |    23 -
 .../0.5.0/configuration/logsearch-env.xml          |   201 -
 .../0.5.0/configuration/logsearch-log4j.xml        |   121 -
 .../0.5.0/configuration/logsearch-properties.xml   |   310 -
 .../logsearch-service_logs-solrconfig.xml          |    62 -
 .../common-services/LOGSEARCH/0.5.0/kerberos.json  |    72 -
 .../common-services/LOGSEARCH/0.5.0/metainfo.xml   |   204 -
 .../common-services/LOGSEARCH/0.5.0/metrics.json   |    53 -
 .../0.5.0/package/alerts/alert_logfeeder.py        |    85 -
 .../LOGSEARCH/0.5.0/package/scripts/logfeeder.py   |    70 -
 .../LOGSEARCH/0.5.0/package/scripts/logsearch.py   |    71 -
 .../LOGSEARCH/0.5.0/package/scripts/params.py      |   429 -
 .../0.5.0/package/scripts/service_check.py         |    41 -
 .../0.5.0/package/scripts/setup_logfeeder.py       |   136 -
 .../0.5.0/package/scripts/setup_logsearch.py       |   162 -
 .../0.5.0/package/scripts/status_params.py         |    38 -
 .../package/templates/HadoopServiceConfig.json.j2  |   505 -
 .../0.5.0/package/templates/global.config.json.j2  |    27 -
 .../templates/input.config-logsearch.json.j2       |    55 -
 .../0.5.0/package/templates/logfeeder_jaas.conf.j2 |    26 -
 .../0.5.0/package/templates/logsearch_jaas.conf.j2 |    26 -
 .../0.5.0/properties/audit_logs-solrconfig.xml.j2  |  1889 --
 .../0.5.0/properties/input.config-ambari.json.j2   |   858 -
 .../0.5.0/properties/kafka-output.json.j2          |    34 -
 .../properties/logfeeder-default_grok_patterns.j2  |   148 -
 .../LOGSEARCH/0.5.0/properties/logfeeder-env.sh.j2 |    47 -
 .../0.5.0/properties/logfeeder-log4j.xml.j2        |    72 -
 .../0.5.0/properties/logsearch-admin.json.j2       |    25 -
 .../LOGSEARCH/0.5.0/properties/logsearch-env.sh.j2 |    51 -
 .../0.5.0/properties/logsearch-log4j.xml.j2        |    82 -
 .../0.5.0/properties/output.config.json.j2         |    59 -
 .../LOGSEARCH/0.5.0/properties/secure_log.json     |    31 -
 .../properties/service_logs-solrconfig.xml.j2      |  1889 --
 .../0.5.0/properties/system_messages.json          |    31 -
 .../LOGSEARCH/0.5.0/quicklinks/quicklinks.json     |    35 -
 .../LOGSEARCH/0.5.0/role_command_order.json        |     9 -
 .../LOGSEARCH/0.5.0/service_advisor.py             |   174 -
 .../LOGSEARCH/0.5.0/themes/credentials.json        |    42 -
 .../LOGSEARCH/0.5.0/themes/directories.json        |   137 -
 .../LOGSEARCH/0.5.0/themes/theme.json              |   480 -
 .../MAHOUT/1.0.0.2.3/configuration/mahout-env.xml  |    41 -
 .../1.0.0.2.3/configuration/mahout-log4j.xml       |    68 -
 .../common-services/MAHOUT/1.0.0.2.3/kerberos.json |    24 -
 .../common-services/MAHOUT/1.0.0.2.3/metainfo.xml  |    79 -
 .../MAHOUT/1.0.0.2.3/package/scripts/mahout.py     |    64 -
 .../1.0.0.2.3/package/scripts/mahout_client.py     |    53 -
 .../MAHOUT/1.0.0.2.3/package/scripts/params.py     |    97 -
 .../1.0.0.2.3/package/scripts/service_check.py     |    91 -
 .../MAHOUT/1.0.0.2.3/role_command_order.json       |     6 -
 .../common-services/OOZIE/4.0.0.2.0/alerts.json    |    45 -
 .../OOZIE/4.0.0.2.0/configuration/oozie-env.xml    |   245 -
 .../OOZIE/4.0.0.2.0/configuration/oozie-log4j.xml  |   125 -
 .../OOZIE/4.0.0.2.0/configuration/oozie-site.xml   |   342 -
 .../common-services/OOZIE/4.0.0.2.0/kerberos.json  |    72 -
 .../common-services/OOZIE/4.0.0.2.0/metainfo.xml   |   186 -
 .../package/alerts/alert_check_oozie_server.py     |   246 -
 .../OOZIE/4.0.0.2.0/package/files/oozieSmoke2.sh   |    84 -
 .../package/files/prepareOozieHdfsDirectories.sh   |    42 -
 .../OOZIE/4.0.0.2.0/package/files/wrap_ooziedb.sh  |    31 -
 .../package/scripts/check_oozie_server_status.py   |    38 -
 .../OOZIE/4.0.0.2.0/package/scripts/oozie.py       |   540 -
 .../4.0.0.2.0/package/scripts/oozie_client.py      |    74 -
 .../4.0.0.2.0/package/scripts/oozie_server.py      |   154 -
 .../package/scripts/oozie_server_upgrade.py        |   261 -
 .../4.0.0.2.0/package/scripts/oozie_service.py     |   192 -
 .../OOZIE/4.0.0.2.0/package/scripts/params.py      |    39 -
 .../4.0.0.2.0/package/scripts/params_linux.py      |   388 -
 .../4.0.0.2.0/package/scripts/params_windows.py    |    34 -
 .../4.0.0.2.0/package/scripts/service_check.py     |   140 -
 .../4.0.0.2.0/package/scripts/status_params.py     |    65 -
 .../4.0.0.2.0/package/templates/adminusers.txt.j2  |    28 -
 .../package/templates/input.config-oozie.json.j2   |    48 -
 .../package/templates/oozie-log4j.properties.j2    |    93 -
 .../4.0.0.2.0/package/templates/oozie.conf.j2      |    35 -
 .../package/templates/zkmigrator_jaas.conf.j2      |    26 -
 .../OOZIE/4.0.0.2.0/role_command_order.json        |     9 -
 .../OOZIE/4.0.0.2.0/themes/credentials.json        |    42 -
 .../OOZIE/4.0.0.2.0/themes/database.json           |   140 -
 .../OOZIE/4.0.0.2.0/themes/directories.json        |   127 -
 .../common-services/OOZIE/4.2.0.2.3/alerts.json    |    45 -
 .../OOZIE/4.2.0.2.3/configuration/oozie-env.xml    |   107 -
 .../OOZIE/4.2.0.2.3/configuration/oozie-log4j.xml  |   148 -
 .../OOZIE/4.2.0.2.3/configuration/oozie-site.xml   |   190 -
 .../common-services/OOZIE/4.2.0.2.3/kerberos.json  |    73 -
 .../common-services/OOZIE/4.2.0.2.3/metainfo.xml   |   187 -
 .../OOZIE/4.2.0.2.3/quicklinks/quicklinks.json     |    46 -
 .../OOZIE/4.2.0.2.5/configuration/oozie-site.xml   |    34 -
 .../common-services/OOZIE/4.2.0.2.5/metainfo.xml   |    27 -
 .../PIG/0.12.0.2.0/configuration/pig-env.xml       |    41 -
 .../PIG/0.12.0.2.0/configuration/pig-log4j.xml     |    65 -
 .../0.12.0.2.0/configuration/pig-properties.xml    |    93 -
 .../common-services/PIG/0.12.0.2.0/kerberos.json   |    18 -
 .../common-services/PIG/0.12.0.2.0/metainfo.xml    |    86 -
 .../PIG/0.12.0.2.0/package/files/pigSmoke.sh       |    18 -
 .../PIG/0.12.0.2.0/package/scripts/params.py       |    31 -
 .../PIG/0.12.0.2.0/package/scripts/params_linux.py |   104 -
 .../0.12.0.2.0/package/scripts/params_windows.py   |    45 -
 .../PIG/0.12.0.2.0/package/scripts/pig.py          |    86 -
 .../PIG/0.12.0.2.0/package/scripts/pig_client.py   |    66 -
 .../0.12.0.2.0/package/scripts/service_check.py    |   142 -
 .../PIG/0.12.0.2.0/role_command_order.json         |     6 -
 .../common-services/PXF/3.0.0/alerts.json          |    19 -
 .../PXF/3.0.0/configuration/pxf-profiles.xml       |   226 -
 .../3.0.0/configuration/pxf-public-classpath.xml   |    62 -
 .../PXF/3.0.0/configuration/pxf-site.xml           |    27 -
 .../common-services/PXF/3.0.0/kerberos.json        |    33 -
 .../common-services/PXF/3.0.0/metainfo.xml         |    89 -
 .../PXF/3.0.0/package/alerts/api_status.py         |   175 -
 .../PXF/3.0.0/package/scripts/params.py            |    94 -
 .../PXF/3.0.0/package/scripts/pxf.py               |   132 -
 .../PXF/3.0.0/package/scripts/pxf_constants.py     |    33 -
 .../PXF/3.0.0/package/scripts/pxf_utils.py         |    49 -
 .../PXF/3.0.0/package/scripts/service_check.py     |   421 -
 .../PXF/3.0.0/package/templates/pxf-env.j2         |    34 -
 .../common-services/PXF/3.0.0/service_advisor.py   |    93 -
 .../common-services/RANGER/0.4.0/alerts.json       |    74 -
 .../0.4.0/configuration/admin-properties.xml       |   303 -
 .../RANGER/0.4.0/configuration/ranger-env.xml      |   327 -
 .../RANGER/0.4.0/configuration/ranger-site.xml     |    69 -
 .../0.4.0/configuration/usersync-properties.xml    |   158 -
 .../common-services/RANGER/0.4.0/metainfo.xml      |   126 -
 .../alerts/alert_ranger_admin_passwd_check.py      |   195 -
 .../RANGER/0.4.0/package/scripts/params.py         |   454 -
 .../RANGER/0.4.0/package/scripts/ranger_admin.py   |   235 -
 .../RANGER/0.4.0/package/scripts/ranger_service.py |    69 -
 .../RANGER/0.4.0/package/scripts/ranger_tagsync.py |   152 -
 .../0.4.0/package/scripts/ranger_usersync.py       |   121 -
 .../RANGER/0.4.0/package/scripts/service_check.py  |    49 -
 .../RANGER/0.4.0/package/scripts/setup_ranger.py   |   153 -
 .../0.4.0/package/scripts/setup_ranger_xml.py      |   873 -
 .../RANGER/0.4.0/package/scripts/status_params.py  |    39 -
 .../package/templates/input.config-ranger.json.j2  |    79 -
 .../0.4.0/package/templates/ranger_admin_pam.j2    |    22 -
 .../0.4.0/package/templates/ranger_remote_pam.j2   |    22 -
 .../package/templates/ranger_solr_jaas_conf.j2     |    26 -
 .../RANGER/0.4.0/quicklinks/quicklinks.json        |    36 -
 .../RANGER/0.4.0/role_command_order.json           |     8 -
 .../RANGER/0.4.0/themes/theme_version_1.json       |   722 -
 .../common-services/RANGER/0.5.0/alerts.json       |    74 -
 .../0.5.0/configuration/admin-properties.xml       |   146 -
 .../0.5.0/configuration/ranger-admin-site.xml      |   563 -
 .../RANGER/0.5.0/configuration/ranger-env.xml      |   221 -
 .../RANGER/0.5.0/configuration/ranger-site.xml     |    58 -
 .../0.5.0/configuration/ranger-ugsync-site.xml     |   460 -
 .../0.5.0/configuration/usersync-properties.xml    |   108 -
 .../common-services/RANGER/0.5.0/metainfo.xml      |    50 -
 .../RANGER/0.5.0/quicklinks/quicklinks.json        |    40 -
 .../RANGER/0.5.0/role_command_order.json           |     6 -
 .../RANGER/0.5.0/themes/theme_version_2.json       |  1470 --
 .../common-services/RANGER/0.6.0/alerts.json       |    76 -
 .../RANGER/0.6.0/configuration/admin-log4j.xml     |   131 -
 .../0.6.0/configuration/admin-properties.xml       |    38 -
 .../0.6.0/configuration/ranger-admin-site.xml      |   343 -
 .../RANGER/0.6.0/configuration/ranger-env.xml      |   117 -
 .../0.6.0/configuration/ranger-tagsync-site.xml    |   186 -
 .../0.6.0/configuration/ranger-ugsync-site.xml     |   100 -
 .../tagsync-application-properties.xml             |    62 -
 .../RANGER/0.6.0/configuration/tagsync-log4j.xml   |    89 -
 .../RANGER/0.6.0/configuration/usersync-log4j.xml  |    88 -
 .../common-services/RANGER/0.6.0/kerberos.json     |   159 -
 .../common-services/RANGER/0.6.0/metainfo.xml      |   117 -
 .../RANGER/0.6.0/role_command_order.json           |     6 -
 .../RANGER/0.6.0/themes/theme_version_3.json       |   693 -
 .../0.7.0/configuration/atlas-tagsync-ssl.xml      |    72 -
 .../0.7.0/configuration/ranger-admin-site.xml      |    42 -
 .../RANGER/0.7.0/configuration/ranger-env.xml      |    83 -
 .../configuration/ranger-solr-configuration.xml    |    59 -
 .../configuration/ranger-tagsync-policymgr-ssl.xml |    72 -
 .../0.7.0/configuration/ranger-tagsync-site.xml    |    43 -
 .../0.7.0/configuration/ranger-ugsync-site.xml     |    82 -
 .../common-services/RANGER/0.7.0/metainfo.xml      |    41 -
 .../0.7.0/properties/ranger-solrconfig.xml.j2      |  1877 --
 .../RANGER/0.7.0/themes/theme_version_5.json       |    89 -
 .../RANGER_KMS/0.5.0.2.3/alerts.json               |    32 -
 .../0.5.0.2.3/configuration/dbks-site.xml          |   124 -
 .../RANGER_KMS/0.5.0.2.3/configuration/kms-env.xml |   124 -
 .../0.5.0.2.3/configuration/kms-log4j.xml          |   118 -
 .../0.5.0.2.3/configuration/kms-properties.xml     |   166 -
 .../0.5.0.2.3/configuration/kms-site.xml           |   107 -
 .../0.5.0.2.3/configuration/ranger-kms-audit.xml   |   150 -
 .../configuration/ranger-kms-policymgr-ssl.xml     |    66 -
 .../configuration/ranger-kms-security.xml          |    64 -
 .../0.5.0.2.3/configuration/ranger-kms-site.xml    |    58 -
 .../RANGER_KMS/0.5.0.2.3/kerberos.json             |    42 -
 .../RANGER_KMS/0.5.0.2.3/metainfo.xml              |    85 -
 .../RANGER_KMS/0.5.0.2.3/package/scripts/kms.py    |   700 -
 .../0.5.0.2.3/package/scripts/kms_server.py        |   124 -
 .../0.5.0.2.3/package/scripts/kms_service.py       |    58 -
 .../RANGER_KMS/0.5.0.2.3/package/scripts/params.py |   337 -
 .../0.5.0.2.3/package/scripts/service_check.py     |    41 -
 .../0.5.0.2.3/package/scripts/status_params.py     |    36 -
 .../templates/input.config-ranger-kms.json.j2      |    48 -
 .../RANGER_KMS/0.5.0.2.3/role_command_order.json   |     7 -
 .../scripts/accumulo_tracer.py => README.txt}      |    10 +-
 .../0.60.0.2.2/configuration/slider-client.xml     |    56 -
 .../SLIDER/0.60.0.2.2/configuration/slider-env.xml |    46 -
 .../0.60.0.2.2/configuration/slider-log4j.xml      |    92 -
 .../SLIDER/0.60.0.2.2/kerberos.json                |    18 -
 .../common-services/SLIDER/0.60.0.2.2/metainfo.xml |   109 -
 .../0.60.0.2.2/package/files/hbaseSmokeVerify.sh   |    34 -
 .../SLIDER/0.60.0.2.2/package/scripts/__init__.py  |    19 -
 .../SLIDER/0.60.0.2.2/package/scripts/params.py    |    54 -
 .../0.60.0.2.2/package/scripts/params_linux.py     |    83 -
 .../0.60.0.2.2/package/scripts/params_windows.py   |    45 -
 .../0.60.0.2.2/package/scripts/service_check.py    |    63 -
 .../SLIDER/0.60.0.2.2/package/scripts/slider.py    |    96 -
 .../0.60.0.2.2/package/scripts/slider_client.py    |    61 -
 .../package/templates/storm-slider-env.sh.j2       |    38 -
 .../common-services/SPARK/1.2.1/alerts.json        |    80 -
 .../SPARK/1.2.1/configuration/spark-defaults.xml   |   187 -
 .../SPARK/1.2.1/configuration/spark-env.xml        |   166 -
 .../configuration/spark-javaopts-properties.xml    |    31 -
 .../1.2.1/configuration/spark-log4j-properties.xml |    47 -
 .../configuration/spark-metrics-properties.xml     |   166 -
 .../common-services/SPARK/1.2.1/kerberos.json      |    55 -
 .../common-services/SPARK/1.2.1/metainfo.xml       |   194 -
 .../scripts/alerts/alert_spark_livy_port.py        |   148 -
 .../scripts/alerts/alert_spark_thrift_port.py      |   160 -
 .../1.2.1/package/scripts/job_history_server.py    |   102 -
 .../SPARK/1.2.1/package/scripts/livy_server.py     |   148 -
 .../SPARK/1.2.1/package/scripts/livy_service.py    |    50 -
 .../SPARK/1.2.1/package/scripts/params.py          |   286 -
 .../SPARK/1.2.1/package/scripts/service_check.py   |    61 -
 .../SPARK/1.2.1/package/scripts/setup_livy.py      |    87 -
 .../SPARK/1.2.1/package/scripts/setup_spark.py     |   145 -
 .../SPARK/1.2.1/package/scripts/spark_client.py    |    84 -
 .../SPARK/1.2.1/package/scripts/spark_service.py   |   122 -
 .../1.2.1/package/scripts/spark_thrift_server.py   |    86 -
 .../SPARK/1.2.1/package/scripts/status_params.py   |    45 -
 .../package/templates/input.config-spark.json.j2   |    66 -
 .../SPARK/1.2.1/quicklinks/quicklinks.json         |    28 -
 .../SPARK/1.2.1/role_command_order.json            |     6 -
 .../SPARK/1.2.1/themes/directories.json            |   148 -
 .../common-services/SPARK/1.3.1/metainfo.xml       |   142 -
 .../common-services/SPARK/1.4.1/kerberos.json      |    74 -
 .../common-services/SPARK/1.4.1/metainfo.xml       |   100 -
 .../configuration/spark-hive-site-override.xml     |    59 -
 .../1.5.2/configuration/spark-thrift-sparkconf.xml |   136 -
 .../common-services/SPARK/1.5.2/metainfo.xml       |    43 -
 .../SPARK/1.5.2/role_command_order.json            |     6 -
 .../SPARK/1.6.0/configuration/spark-defaults.xml   |    59 -
 .../configuration/spark-thrift-fairscheduler.xml   |    36 -
 .../1.6.0/configuration/spark-thrift-sparkconf.xml |   193 -
 .../common-services/SPARK/1.6.0/metainfo.xml       |    45 -
 .../SPARK/1.6.0/role_command_order.json            |     8 -
 .../common-services/SPARK2/2.0.0/alerts.json       |    80 -
 .../SPARK2/2.0.0/configuration/spark2-defaults.xml |   117 -
 .../SPARK2/2.0.0/configuration/spark2-env.xml      |   159 -
 .../configuration/spark2-hive-site-override.xml    |    59 -
 .../configuration/spark2-log4j-properties.xml      |    46 -
 .../configuration/spark2-metrics-properties.xml    |   165 -
 .../configuration/spark2-thrift-fairscheduler.xml  |    36 -
 .../configuration/spark2-thrift-sparkconf.xml      |   162 -
 .../common-services/SPARK2/2.0.0/kerberos.json     |    74 -
 .../common-services/SPARK2/2.0.0/metainfo.xml      |   239 -
 .../scripts/alerts/alert_spark2_livy_port.py       |   148 -
 .../scripts/alerts/alert_spark2_thrift_port.py     |   161 -
 .../2.0.0/package/scripts/job_history_server.py    |   102 -
 .../SPARK2/2.0.0/package/scripts/livy2_server.py   |   148 -
 .../SPARK2/2.0.0/package/scripts/livy2_service.py  |    50 -
 .../SPARK2/2.0.0/package/scripts/params.py         |   271 -
 .../SPARK2/2.0.0/package/scripts/service_check.py  |    63 -
 .../SPARK2/2.0.0/package/scripts/setup_livy2.py    |    90 -
 .../SPARK2/2.0.0/package/scripts/setup_spark.py    |   117 -
 .../SPARK2/2.0.0/package/scripts/spark_client.py   |    56 -
 .../SPARK2/2.0.0/package/scripts/spark_service.py  |   145 -
 .../2.0.0/package/scripts/spark_thrift_server.py   |    85 -
 .../SPARK2/2.0.0/package/scripts/status_params.py  |    45 -
 .../package/templates/input.config-spark2.json.j2  |    66 -
 .../SPARK2/2.0.0/quicklinks/quicklinks.json        |    28 -
 .../SPARK2/2.0.0/role_command_order.json           |     6 -
 .../SPARK2/2.0.0/themes/directories.json           |   148 -
 .../SQOOP/1.4.4.2.0/configuration/sqoop-env.xml    |    93 -
 .../SQOOP/1.4.4.2.0/configuration/sqoop-site.xml   |    49 -
 .../common-services/SQOOP/1.4.4.2.0/metainfo.xml   |    97 -
 .../SQOOP/1.4.4.2.0/package/scripts/__init__.py    |    19 -
 .../SQOOP/1.4.4.2.0/package/scripts/params.py      |    27 -
 .../1.4.4.2.0/package/scripts/params_linux.py      |   137 -
 .../1.4.4.2.0/package/scripts/params_windows.py    |    30 -
 .../1.4.4.2.0/package/scripts/service_check.py     |    59 -
 .../SQOOP/1.4.4.2.0/package/scripts/sqoop.py       |   124 -
 .../1.4.4.2.0/package/scripts/sqoop_client.py      |    61 -
 .../SQOOP/1.4.4.2.0/role_command_order.json        |     6 -
 .../0.10.0/configuration/ranger-storm-audit.xml    |   177 -
 .../ranger-storm-plugin-properties.xml             |   147 -
 .../configuration/ranger-storm-policymgr-ssl.xml   |    66 -
 .../0.10.0/configuration/ranger-storm-security.xml |    64 -
 .../0.10.0/configuration/storm-cluster-log4j.xml   |   131 -
 .../STORM/0.10.0/configuration/storm-env.xml       |    75 -
 .../STORM/0.10.0/configuration/storm-site.xml      |    72 -
 .../0.10.0/configuration/storm-worker-log4j.xml    |   174 -
 .../common-services/STORM/0.10.0/metainfo.xml      |    57 -
 .../common-services/STORM/0.10.0/metrics.json      |  1202 -
 .../STORM/0.10.0/role_command_order.json           |     8 -
 .../common-services/STORM/0.10.0/widgets.json      |   127 -
 .../common-services/STORM/0.9.1/alerts.json        |   145 -
 .../STORM/0.9.1/configuration/storm-env.xml        |   165 -
 .../STORM/0.9.1/configuration/storm-site.xml       |   837 -
 .../common-services/STORM/0.9.1/kerberos.json      |   107 -
 .../common-services/STORM/0.9.1/metainfo.xml       |   186 -
 .../common-services/STORM/0.9.1/metrics.json       |   858 -
 .../package/alerts/check_supervisor_process_win.py |    50 -
 .../STORM/0.9.1/package/files/wordCount.jar        |   Bin 690588 -> 0 bytes
 .../STORM/0.9.1/package/scripts/drpc_server.py     |    86 -
 .../STORM/0.9.1/package/scripts/nimbus.py          |   115 -
 .../STORM/0.9.1/package/scripts/nimbus_prod.py     |    75 -
 .../STORM/0.9.1/package/scripts/pacemaker.py       |    85 -
 .../STORM/0.9.1/package/scripts/params.py          |    28 -
 .../STORM/0.9.1/package/scripts/params_linux.py    |   426 -
 .../STORM/0.9.1/package/scripts/params_windows.py  |    54 -
 .../STORM/0.9.1/package/scripts/rest_api.py        |    82 -
 .../STORM/0.9.1/package/scripts/service.py         |    95 -
 .../STORM/0.9.1/package/scripts/service_check.py   |    79 -
 .../0.9.1/package/scripts/setup_ranger_storm.py    |   137 -
 .../STORM/0.9.1/package/scripts/status_params.py   |    83 -
 .../STORM/0.9.1/package/scripts/storm.py           |   193 -
 .../STORM/0.9.1/package/scripts/storm_upgrade.py   |   177 -
 .../0.9.1/package/scripts/storm_yaml_utils.py      |    56 -
 .../STORM/0.9.1/package/scripts/supervisor.py      |   111 -
 .../STORM/0.9.1/package/scripts/supervisor_prod.py |    78 -
 .../0.9.1/package/scripts/supervisord_service.py   |    34 -
 .../STORM/0.9.1/package/scripts/ui_server.py       |   132 -
 .../0.9.1/package/templates/client_jaas.conf.j2    |    33 -
 .../STORM/0.9.1/package/templates/config.yaml.j2   |    75 -
 .../package/templates/input.config-storm.json.j2   |   147 -
 .../package/templates/storm-metrics2.properties.j2 |    35 -
 .../STORM/0.9.1/package/templates/storm.conf.j2    |    35 -
 .../0.9.1/package/templates/storm_jaas.conf.j2     |    75 -
 .../0.9.1/package/templates/worker-launcher.cfg.j2 |    19 -
 .../STORM/0.9.1/quicklinks/quicklinks.json         |    45 -
 .../STORM/0.9.1/role_command_order.json            |    13 -
 .../STORM/0.9.1/themes/directories.json            |   147 -
 .../ranger-storm-plugin-properties.xml             |   261 -
 .../STORM/0.9.3/configuration/storm-env.xml        |    44 -
 .../STORM/0.9.3/configuration/storm-site.xml       |   125 -
 .../common-services/STORM/0.9.3/metainfo.xml       |    53 -
 .../common-services/STORM/0.9.3/metrics.json       |  1203 -
 .../STORM/0.9.3/role_command_order.json            |     7 -
 .../1.0.1/configuration/ranger-storm-audit.xml     |    57 -
 .../storm-atlas-application.properties.xml         |    31 -
 .../1.0.1/configuration/storm-cluster-log4j.xml    |   132 -
 .../STORM/1.0.1/configuration/storm-site.xml       |   174 -
 .../1.0.1/configuration/storm-worker-log4j.xml     |   189 -
 .../common-services/STORM/1.0.1/kerberos.json      |   139 -
 .../common-services/STORM/1.0.1/metainfo.xml       |    42 -
 .../STORM/1.1.0/configuration/storm-site.xml       |    48 -
 .../common-services/STORM/1.1.0/kerberos.json      |   139 -
 .../common-services/STORM/1.1.0/metainfo.xml       |    42 -
 .../SUPERSET/0.15.0/configuration/superset-env.xml |   136 -
 .../SUPERSET/0.15.0/configuration/superset.xml     |   178 -
 .../common-services/SUPERSET/0.15.0/metainfo.xml   |    88 -
 .../SUPERSET/0.15.0/package/scripts/params.py      |    89 -
 .../0.15.0/package/scripts/service_check.py        |    37 -
 .../0.15.0/package/scripts/status_params.py        |    25 -
 .../SUPERSET/0.15.0/package/scripts/superset.py    |   167 -
 .../SUPERSET/0.15.0/package/templates/superset.sh  |    95 -
 .../SUPERSET/0.15.0/quicklinks/quicklinks.json     |    24 -
 .../SUPERSET/0.15.0/role_command_order.json        |     7 -
 .../SUPERSET/0.15.0/themes/theme.json              |   120 -
 .../TEZ/0.4.0.2.1/configuration/tez-env.xml        |    65 -
 .../TEZ/0.4.0.2.1/configuration/tez-site.xml       |   275 -
 .../common-services/TEZ/0.4.0.2.1/kerberos.json    |    18 -
 .../common-services/TEZ/0.4.0.2.1/metainfo.xml     |   110 -
 .../TEZ/0.4.0.2.1/package/scripts/params.py        |    29 -
 .../TEZ/0.4.0.2.1/package/scripts/params_linux.py  |   118 -
 .../0.4.0.2.1/package/scripts/params_windows.py    |    54 -
 .../TEZ/0.4.0.2.1/package/scripts/pre_upgrade.py   |    59 -
 .../TEZ/0.4.0.2.1/package/scripts/service_check.py |   111 -
 .../TEZ/0.4.0.2.1/package/scripts/tez.py           |    79 -
 .../TEZ/0.4.0.2.1/package/scripts/tez_client.py    |   128 -
 .../TEZ/0.4.0.2.1/role_command_order.json          |     6 -
 .../TEZ/0.4.0.2.1/themes/directories.json          |    69 -
 .../YARN/2.1.0.2.0/MAPREDUCE2_metrics.json         |  2596 --
 .../YARN/2.1.0.2.0/YARN_metrics.json               |  3486 ---
 .../YARN/2.1.0.2.0/YARN_widgets.json               |   611 -
 .../common-services/YARN/2.1.0.2.0/alerts.json     |   392 -
 .../2.1.0.2.0/configuration-mapred/mapred-env.xml  |   111 -
 .../2.1.0.2.0/configuration-mapred/mapred-site.xml |   484 -
 .../2.1.0.2.0/configuration/capacity-scheduler.xml |   130 -
 .../2.1.0.2.0/configuration/container-executor.xml |    36 -
 .../YARN/2.1.0.2.0/configuration/yarn-env.xml      |   271 -
 .../YARN/2.1.0.2.0/configuration/yarn-log4j.xml    |   115 -
 .../YARN/2.1.0.2.0/configuration/yarn-site.xml     |   427 -
 .../common-services/YARN/2.1.0.2.0/kerberos.json   |   225 -
 .../common-services/YARN/2.1.0.2.0/metainfo.xml    |   324 -
 .../package/alerts/alert_nodemanager_health.py     |   209 -
 .../package/alerts/alert_nodemanagers_summary.py   |   219 -
 .../files/validateYarnComponentStatusWindows.py    |   161 -
 .../YARN/2.1.0.2.0/package/scripts/__init__.py     |    20 -
 .../package/scripts/application_timeline_server.py |    97 -
 .../2.1.0.2.0/package/scripts/historyserver.py     |   132 -
 .../YARN/2.1.0.2.0/package/scripts/install_jars.py |    99 -
 .../package/scripts/mapred_service_check.py        |   172 -
 .../2.1.0.2.0/package/scripts/mapreduce2_client.py |    91 -
 .../YARN/2.1.0.2.0/package/scripts/nodemanager.py  |   102 -
 .../package/scripts/nodemanager_upgrade.py         |    74 -
 .../YARN/2.1.0.2.0/package/scripts/params.py       |    32 -
 .../YARN/2.1.0.2.0/package/scripts/params_linux.py |   597 -
 .../2.1.0.2.0/package/scripts/params_windows.py    |    70 -
 .../2.1.0.2.0/package/scripts/resourcemanager.py   |   261 -
 .../YARN/2.1.0.2.0/package/scripts/service.py      |   119 -
 .../2.1.0.2.0/package/scripts/service_check.py     |   181 -
 .../2.1.0.2.0/package/scripts/setup_ranger_yarn.py |    71 -
 .../2.1.0.2.0/package/scripts/status_params.py     |    50 -
 .../YARN/2.1.0.2.0/package/scripts/yarn.py         |   540 -
 .../YARN/2.1.0.2.0/package/scripts/yarn_client.py  |    63 -
 .../package/templates/exclude_hosts_list.j2        |    21 -
 .../package/templates/include_hosts_list.j2        |    21 -
 .../templates/input.config-mapreduce2.json.j2      |    48 -
 .../package/templates/mapred_jaas.conf.j2          |    28 -
 .../2.1.0.2.0/package/templates/mapreduce.conf.j2  |    35 -
 .../package/templates/taskcontroller.cfg.j2        |    38 -
 .../YARN/2.1.0.2.0/package/templates/yarn.conf.j2  |    35 -
 .../package/templates/yarn_ats_jaas.conf.j2        |    27 -
 .../2.1.0.2.0/package/templates/yarn_jaas.conf.j2  |    36 -
 .../package/templates/yarn_nm_jaas.conf.j2         |    27 -
 .../2.1.0.2.0/properties/container-executor.cfg.j2 |    40 -
 .../YARN/2.1.0.2.0/role_command_order.json         |    21 -
 .../YARN/2.1.0.2.0/themes-mapred/directories.json  |   137 -
 .../YARN/2.1.0.2.0/themes/directories.json         |   177 -
 .../common-services/ZEPPELIN/0.6.0/alerts.json     |    18 -
 .../0.6.0/configuration/zeppelin-config.xml        |   220 -
 .../ZEPPELIN/0.6.0/configuration/zeppelin-env.xml  |   196 -
 .../configuration/zeppelin-log4j-properties.xml    |    37 -
 .../0.6.0/configuration/zeppelin-shiro-ini.xml     |    97 -
 .../common-services/ZEPPELIN/0.6.0/kerberos.json   |    52 -
 .../common-services/ZEPPELIN/0.6.0/metainfo.xml    |   103 -
 .../package/scripts/alerts/alert_check_zeppelin.py |    48 -
 .../package/scripts/interpreter_json_template.py   |   361 -
 .../0.6.0/package/scripts/livy2_config_template.py |   107 -
 .../ZEPPELIN/0.6.0/package/scripts/master.py       |   522 -
 .../ZEPPELIN/0.6.0/package/scripts/params.py       |   259 -
 .../0.6.0/package/scripts/service_check.py         |    39 -
 .../package/scripts/spark2_config_template.py      |    84 -
 .../0.6.0/package/scripts/status_params.py         |    29 -
 .../templates/input.config-zeppelin.json.j2        |    48 -
 .../ZEPPELIN/0.6.0/quicklinks/quicklinks.json      |    35 -
 .../ZEPPELIN/0.6.0/role_command_order.json         |     7 -
 .../common-services/ZEPPELIN/0.7.0/alerts.json     |    18 -
 .../0.7.0/configuration/zeppelin-config.xml        |   220 -
 .../ZEPPELIN/0.7.0/configuration/zeppelin-env.xml  |   196 -
 .../configuration/zeppelin-log4j-properties.xml    |    37 -
 .../0.7.0/configuration/zeppelin-shiro-ini.xml     |    97 -
 .../common-services/ZEPPELIN/0.7.0/kerberos.json   |    52 -
 .../common-services/ZEPPELIN/0.7.0/metainfo.xml    |   103 -
 .../package/scripts/alerts/alert_check_zeppelin.py |    48 -
 .../package/scripts/interpreter_json_template.py   |   518 -
 .../ZEPPELIN/0.7.0/package/scripts/master.py       |   664 -
 .../ZEPPELIN/0.7.0/package/scripts/params.py       |   273 -
 .../0.7.0/package/scripts/service_check.py         |    39 -
 .../0.7.0/package/scripts/status_params.py         |    29 -
 .../templates/input.config-zeppelin.json.j2        |    48 -
 .../ZEPPELIN/0.7.0/quicklinks/quicklinks.json      |    35 -
 .../ZEPPELIN/0.7.0/role_command_order.json         |     7 -
 .../ZEPPELIN/0.7.0/service_advisor.py              |   209 -
 .../common-services/ZOOKEEPER/3.4.5/alerts.json    |    78 -
 .../ZOOKEEPER/3.4.5/configuration/zoo.cfg.xml      |    93 -
 .../3.4.5/configuration/zookeeper-env.xml          |   113 -
 .../3.4.5/configuration/zookeeper-log4j.xml        |   125 -
 .../common-services/ZOOKEEPER/3.4.5/kerberos.json  |    40 -
 .../common-services/ZOOKEEPER/3.4.5/metainfo.xml   |   106 -
 .../ZOOKEEPER/3.4.5/package/files/zkEnv.sh         |    96 -
 .../ZOOKEEPER/3.4.5/package/files/zkServer.sh      |   120 -
 .../ZOOKEEPER/3.4.5/package/files/zkService.sh     |    26 -
 .../ZOOKEEPER/3.4.5/package/files/zkSmoke.sh       |    93 -
 .../ZOOKEEPER/3.4.5/package/scripts/__init__.py    |    20 -
 .../ZOOKEEPER/3.4.5/package/scripts/params.py      |    27 -
 .../3.4.5/package/scripts/params_linux.py          |   111 -
 .../3.4.5/package/scripts/params_windows.py        |    69 -
 .../3.4.5/package/scripts/service_check.py         |    76 -
 .../3.4.5/package/scripts/status_params.py         |    60 -
 .../ZOOKEEPER/3.4.5/package/scripts/zookeeper.py   |   158 -
 .../3.4.5/package/scripts/zookeeper_client.py      |    81 -
 .../3.4.5/package/scripts/zookeeper_server.py      |   138 -
 .../3.4.5/package/scripts/zookeeper_service.py     |    72 -
 .../3.4.5/package/templates/configuration.xsl.j2   |    42 -
 .../templates/input.config-zookeeper.json.j2       |    46 -
 .../ZOOKEEPER/3.4.5/package/templates/zoo.cfg.j2   |    53 -
 .../templates/zookeeper_client_jaas.conf.j2        |    23 -
 .../3.4.5/package/templates/zookeeper_jaas.conf.j2 |    26 -
 .../ZOOKEEPER/3.4.5/role_command_order.json        |     9 -
 .../ZOOKEEPER/3.4.5/themes/directories.json        |   117 -
 .../common-services/ZOOKEEPER/3.4.6/metainfo.xml   |    51 -
 .../common-services/ZOOKEEPER/3.4.9/metainfo.xml   |    51 -
 .../ZOOKEEPER/3.4.9/service_advisor.py             |   169 -
 .../BIGTOP/0.8/blueprints/multinode-default.json   |   174 -
 .../BIGTOP/0.8/blueprints/singlenode-default.json  |   128 -
 .../BIGTOP/0.8/configuration/cluster-env.xml       |    75 -
 .../BIGTOP/0.8/hooks/after-INSTALL/scripts/hook.py |    35 -
 .../0.8/hooks/after-INSTALL/scripts/params.py      |    75 -
 .../after-INSTALL/scripts/shared_initialization.py |    38 -
 .../hooks/before-ANY/files/changeToSecureUid.sh    |    50 -
 .../BIGTOP/0.8/hooks/before-ANY/scripts/hook.py    |    35 -
 .../BIGTOP/0.8/hooks/before-ANY/scripts/params.py  |   134 -
 .../before-ANY/scripts/shared_initialization.py    |   114 -
 .../0.8/hooks/before-INSTALL/scripts/hook.py       |    38 -
 .../0.8/hooks/before-INSTALL/scripts/params.py     |   120 -
 .../before-INSTALL/scripts/repo_initialization.py  |    57 -
 .../scripts/shared_initialization.py               |    66 -
 .../before-INSTALL/templates/repo_suse_rhel.j2     |     7 -
 .../hooks/before-INSTALL/templates/repo_ubuntu.j2  |     1 -
 .../0.8/hooks/before-RESTART/scripts/hook.py       |    29 -
 .../0.8/hooks/before-START/files/checkForFormat.sh |    65 -
 .../hooks/before-START/files/task-log4j.properties |   134 -
 .../BIGTOP/0.8/hooks/before-START/scripts/hook.py  |    37 -
 .../0.8/hooks/before-START/scripts/params.py       |   158 -
 .../before-START/scripts/shared_initialization.py  |   163 -
 .../templates/commons-logging.properties.j2        |    43 -
 .../before-START/templates/exclude_hosts_list.j2   |    21 -
 .../templates/hadoop-metrics2.properties.j2        |    65 -
 .../before-START/templates/health_check-v2.j2      |    81 -
 .../hooks/before-START/templates/health_check.j2   |   109 -
 .../before-START/templates/include_hosts_list.j2   |    21 -
 .../main/resources/stacks/BIGTOP/0.8/metainfo.xml  |    22 -
 .../resources/stacks/BIGTOP/0.8/repos/repoinfo.xml |    37 -
 .../stacks/BIGTOP/0.8/role_command_order.json      |    64 -
 .../stacks/BIGTOP/0.8/services/FLUME/alerts.json   |    27 -
 .../services/FLUME/configuration/flume-conf.xml    |    34 -
 .../0.8/services/FLUME/configuration/flume-env.xml |    88 -
 .../services/FLUME/configuration/flume-log4j.xml   |    34 -
 .../stacks/BIGTOP/0.8/services/FLUME/metainfo.xml  |    69 -
 .../stacks/BIGTOP/0.8/services/FLUME/metrics.json  |   720 -
 .../package/files/alert_flume_agent_status.py      |   106 -
 .../0.8/services/FLUME/package/scripts/flume.py    |   255 -
 .../services/FLUME/package/scripts/flume_check.py  |    40 -
 .../FLUME/package/scripts/flume_handler.py         |    79 -
 .../0.8/services/FLUME/package/scripts/params.py   |    70 -
 .../services/FLUME/package/templates/flume.conf.j2 |    24 -
 .../FLUME/package/templates/log4j.properties.j2    |    67 -
 .../stacks/BIGTOP/0.8/services/GANGLIA/alerts.json |   137 -
 .../services/GANGLIA/configuration/ganglia-env.xml |    85 -
 .../BIGTOP/0.8/services/GANGLIA/metainfo.xml       |   127 -
 .../services/GANGLIA/package/files/checkGmetad.sh  |    37 -
 .../services/GANGLIA/package/files/checkGmond.sh   |    62 -
 .../GANGLIA/package/files/checkRrdcached.sh        |    34 -
 .../0.8/services/GANGLIA/package/files/gmetad.init |    73 -
 .../services/GANGLIA/package/files/gmetadLib.sh    |   204 -
 .../0.8/services/GANGLIA/package/files/gmond.init  |    73 -
 .../0.8/services/GANGLIA/package/files/gmondLib.sh |   538 -
 .../services/GANGLIA/package/files/rrdcachedLib.sh |    47 -
 .../services/GANGLIA/package/files/setupGanglia.sh |   141 -
 .../services/GANGLIA/package/files/startGmetad.sh  |    68 -
 .../services/GANGLIA/package/files/startGmond.sh   |    85 -
 .../GANGLIA/package/files/startRrdcached.sh        |    79 -
 .../services/GANGLIA/package/files/stopGmetad.sh   |    43 -
 .../services/GANGLIA/package/files/stopGmond.sh    |    54 -
 .../GANGLIA/package/files/stopRrdcached.sh         |    41 -
 .../GANGLIA/package/files/teardownGanglia.sh       |    28 -
 .../services/GANGLIA/package/scripts/functions.py  |    31 -
 .../services/GANGLIA/package/scripts/ganglia.py    |    97 -
 .../GANGLIA/package/scripts/ganglia_monitor.py     |   236 -
 .../package/scripts/ganglia_monitor_service.py     |    27 -
 .../GANGLIA/package/scripts/ganglia_server.py      |   119 -
 .../package/scripts/ganglia_server_service.py      |    27 -
 .../0.8/services/GANGLIA/package/scripts/params.py |   160 -
 .../GANGLIA/package/scripts/status_params.py       |    25 -
 .../GANGLIA/package/templates/ganglia.conf.j2      |    34 -
 .../package/templates/gangliaClusters.conf.j2      |    43 -
 .../GANGLIA/package/templates/gangliaEnv.sh.j2     |    46 -
 .../GANGLIA/package/templates/gangliaLib.sh.j2     |    85 -
 .../services/GANGLIA/package/templates/rrd.py.j2   |   361 -
 .../stacks/BIGTOP/0.8/services/HBASE/alerts.json   |   122 -
 .../0.8/services/HBASE/configuration/hbase-env.xml |   159 -
 .../services/HBASE/configuration/hbase-log4j.xml   |   144 -
 .../services/HBASE/configuration/hbase-policy.xml  |    53 -
 .../services/HBASE/configuration/hbase-site.xml    |   358 -
 .../stacks/BIGTOP/0.8/services/HBASE/metainfo.xml  |   151 -
 .../stacks/BIGTOP/0.8/services/HBASE/metrics.json  | 13655 ----------
 .../HBASE/package/files/draining_servers.rb        |   164 -
 .../HBASE/package/files/hbaseSmokeVerify.sh        |    34 -
 .../0.8/services/HBASE/package/scripts/__init__.py |    19 -
 .../services/HBASE/package/scripts/functions.py    |    40 -
 .../0.8/services/HBASE/package/scripts/hbase.py    |   144 -
 .../services/HBASE/package/scripts/hbase_client.py |    43 -
 .../HBASE/package/scripts/hbase_decommission.py    |    74 -
 .../services/HBASE/package/scripts/hbase_master.py |    70 -
 .../HBASE/package/scripts/hbase_regionserver.py    |    66 -
 .../HBASE/package/scripts/hbase_service.py         |    51 -
 .../0.8/services/HBASE/package/scripts/params.py   |   137 -
 .../HBASE/package/scripts/service_check.py         |    79 -
 .../HBASE/package/scripts/status_params.py         |    26 -
 ...oop-metrics2-hbase.properties-GANGLIA-MASTER.j2 |    81 -
 .../hadoop-metrics2-hbase.properties-GANGLIA-RS.j2 |    80 -
 .../HBASE/package/templates/hbase-smoke.sh.j2      |    44 -
 .../package/templates/hbase_client_jaas.conf.j2    |    23 -
 .../package/templates/hbase_grant_permissions.j2   |    39 -
 .../package/templates/hbase_master_jaas.conf.j2    |    26 -
 .../templates/hbase_regionserver_jaas.conf.j2      |    26 -
 .../HBASE/package/templates/regionservers.j2       |    20 -
 .../stacks/BIGTOP/0.8/services/HDFS/alerts.json    |   637 -
 .../0.8/services/HDFS/configuration/core-site.xml  |   175 -
 .../0.8/services/HDFS/configuration/hadoop-env.xml |   228 -
 .../services/HDFS/configuration/hadoop-policy.xml  |   130 -
 .../0.8/services/HDFS/configuration/hdfs-log4j.xml |   202 -
 .../0.8/services/HDFS/configuration/hdfs-site.xml  |   424 -
 .../stacks/BIGTOP/0.8/services/HDFS/metainfo.xml   |   259 -
 .../stacks/BIGTOP/0.8/services/HDFS/metrics.json   |  7860 ------
 .../HDFS/package/files/alert_checkpoint_time.py    |   168 -
 .../HDFS/package/files/alert_ha_namenode_health.py |   253 -
 .../services/HDFS/package/files/checkForFormat.sh  |    70 -
 .../0.8/services/HDFS/package/files/checkWebUI.py  |    53 -
 .../scripts/balancer-emulator/balancer-err.log     |  1032 -
 .../package/scripts/balancer-emulator/balancer.log |    29 -
 .../scripts/balancer-emulator/hdfs-command.py      |    45 -
 .../0.8/services/HDFS/package/scripts/datanode.py  |    59 -
 .../0.8/services/HDFS/package/scripts/hdfs.py      |    80 -
 .../services/HDFS/package/scripts/hdfs_client.py   |    53 -
 .../services/HDFS/package/scripts/hdfs_datanode.py |    56 -
 .../services/HDFS/package/scripts/hdfs_namenode.py |   170 -
 .../HDFS/package/scripts/hdfs_rebalance.py         |   130 -
 .../HDFS/package/scripts/hdfs_snamenode.py         |    51 -
 .../services/HDFS/package/scripts/journalnode.py   |    73 -
 .../0.8/services/HDFS/package/scripts/namenode.py  |   134 -
 .../0.8/services/HDFS/package/scripts/params.py    |   249 -
 .../services/HDFS/package/scripts/service_check.py |   120 -
 .../0.8/services/HDFS/package/scripts/snamenode.py |    65 -
 .../services/HDFS/package/scripts/status_params.py |    31 -
 .../0.8/services/HDFS/package/scripts/utils.py     |   152 -
 .../services/HDFS/package/scripts/zkfc_slave.py    |    64 -
 .../HDFS/package/templates/exclude_hosts_list.j2   |    21 -
 .../services/HDFS/package/templates/hdfs.conf.j2   |    35 -
 .../HDFS/package/templates/include_hosts_list.j2   |    21 -
 .../0.8/services/HDFS/package/templates/slaves.j2  |    21 -
 .../stacks/BIGTOP/0.8/services/HIVE/alerts.json    |    46 -
 .../0.8/services/HIVE/configuration/hcat-env.xml   |    59 -
 .../0.8/services/HIVE/configuration/hive-env.xml   |   163 -
 .../HIVE/configuration/hive-exec-log4j.xml         |   112 -
 .../0.8/services/HIVE/configuration/hive-log4j.xml |   121 -
 .../0.8/services/HIVE/configuration/hive-site.xml  |   542 -
 .../services/HIVE/configuration/webhcat-env.xml    |    56 -
 .../services/HIVE/configuration/webhcat-site.xml   |   134 -
 .../services/HIVE/etc/hive-schema-0.12.0.mysql.sql |   777 -
 .../HIVE/etc/hive-schema-0.12.0.oracle.sql         |   718 -
 .../HIVE/etc/hive-schema-0.12.0.postgres.sql       |  1406 -
 .../stacks/BIGTOP/0.8/services/HIVE/metainfo.xml   |   288 -
 .../services/HIVE/package/files/addMysqlUser.sh    |    41 -
 .../HIVE/package/files/addPostgreSQLUser.sh        |    44 -
 .../HIVE/package/files/alert_hive_thrift_port.py   |   169 -
 .../0.8/services/HIVE/package/files/hcatSmoke.sh   |    36 -
 .../0.8/services/HIVE/package/files/hiveSmoke.sh   |    24 -
 .../services/HIVE/package/files/hiveserver2.sql    |    23 -
 .../HIVE/package/files/hiveserver2Smoke.sh         |    32 -
 .../0.8/services/HIVE/package/files/pigSmoke.sh    |    18 -
 .../services/HIVE/package/files/startMetastore.sh  |    23 -
 .../services/HIVE/package/files/templetonSmoke.sh  |    96 -
 .../0.8/services/HIVE/package/scripts/__init__.py  |    19 -
 .../0.8/services/HIVE/package/scripts/hcat.py      |    58 -
 .../services/HIVE/package/scripts/hcat_client.py   |    43 -
 .../HIVE/package/scripts/hcat_service_check.py     |    80 -
 .../0.8/services/HIVE/package/scripts/hive.py      |   216 -
 .../services/HIVE/package/scripts/hive_client.py   |    42 -
 .../HIVE/package/scripts/hive_metastore.py         |    64 -
 .../services/HIVE/package/scripts/hive_server.py   |    66 -
 .../services/HIVE/package/scripts/hive_service.py  |   106 -
 .../services/HIVE/package/scripts/install_jars.py  |   108 -
 .../services/HIVE/package/scripts/mysql_server.py  |    70 -
 .../services/HIVE/package/scripts/mysql_service.py |    49 -
 .../0.8/services/HIVE/package/scripts/params.py    |   284 -
 .../HIVE/package/scripts/postgresql_server.py      |   113 -
 .../HIVE/package/scripts/postgresql_service.py     |    44 -
 .../services/HIVE/package/scripts/service_check.py |    46 -
 .../services/HIVE/package/scripts/status_params.py |    38 -
 .../0.8/services/HIVE/package/scripts/webhcat.py   |   131 -
 .../HIVE/package/scripts/webhcat_server.py         |    53 -
 .../HIVE/package/scripts/webhcat_service.py        |    40 -
 .../HIVE/package/scripts/webhcat_service_check.py  |    41 -
 .../HIVE/package/templates/startHiveserver2.sh.j2  |    29 -
 .../stacks/BIGTOP/0.8/services/OOZIE/alerts.json   |    43 -
 .../0.8/services/OOZIE/configuration/oozie-env.xml |   142 -
 .../services/OOZIE/configuration/oozie-log4j.xml   |    98 -
 .../services/OOZIE/configuration/oozie-site.xml    |   324 -
 .../stacks/BIGTOP/0.8/services/OOZIE/metainfo.xml  |   156 -
 .../package/files/alert_check_oozie_server.py      |   145 -
 .../services/OOZIE/package/files/oozieSmoke2.sh    |   112 -
 .../services/OOZIE/package/files/wrap_ooziedb.sh   |    31 -
 .../0.8/services/OOZIE/package/scripts/oozie.py    |   182 -
 .../services/OOZIE/package/scripts/oozie_client.py |    45 -
 .../services/OOZIE/package/scripts/oozie_server.py |    58 -
 .../OOZIE/package/scripts/oozie_service.py         |    74 -
 .../0.8/services/OOZIE/package/scripts/params.py   |   163 -
 .../OOZIE/package/scripts/service_check.py         |    60 -
 .../OOZIE/package/scripts/status_params.py         |    26 -
 .../OOZIE/package/templates/catalina.properties.j2 |    81 -
 .../package/templates/oozie-log4j.properties.j2    |    92 -
 .../0.8/services/PIG/configuration/pig-env.xml     |    40 -
 .../0.8/services/PIG/configuration/pig-log4j.xml   |    63 -
 .../services/PIG/configuration/pig-properties.xml  |    93 -
 .../stacks/BIGTOP/0.8/services/PIG/metainfo.xml    |    85 -
 .../0.8/services/PIG/package/files/pigSmoke.sh     |    18 -
 .../0.8/services/PIG/package/scripts/params.py     |    57 -
 .../BIGTOP/0.8/services/PIG/package/scripts/pig.py |    59 -
 .../0.8/services/PIG/package/scripts/pig_client.py |    41 -
 .../services/PIG/package/scripts/service_check.py  |    69 -
 .../stacks/BIGTOP/0.8/services/WEBHCAT/alerts.json |    18 -
 .../WEBHCAT/package/files/alert_webhcat_server.py  |   230 -
 .../stacks/BIGTOP/0.8/services/YARN/alerts.json    |   383 -
 .../YARN/configuration-mapred/mapred-env.xml       |    76 -
 .../YARN/configuration-mapred/mapred-site.xml      |   355 -
 .../YARN/configuration/capacity-scheduler.xml      |   130 -
 .../0.8/services/YARN/configuration/yarn-env.xml   |   198 -
 .../0.8/services/YARN/configuration/yarn-log4j.xml |    72 -
 .../0.8/services/YARN/configuration/yarn-site.xml  |   435 -
 .../stacks/BIGTOP/0.8/services/YARN/metainfo.xml   |   280 -
 .../stacks/BIGTOP/0.8/services/YARN/metrics.json   |  5354 ----
 .../YARN/package/files/alert_nodemanager_health.py |   158 -
 .../package/files/validateYarnComponentStatus.py   |   170 -
 .../0.8/services/YARN/package/scripts/__init__.py  |    20 -
 .../package/scripts/application_timeline_server.py |    57 -
 .../services/YARN/package/scripts/historyserver.py |    53 -
 .../YARN/package/scripts/mapred_service_check.py   |    80 -
 .../YARN/package/scripts/mapreduce2_client.py      |    42 -
 .../services/YARN/package/scripts/nodemanager.py   |    59 -
 .../0.8/services/YARN/package/scripts/params.py    |   182 -
 .../YARN/package/scripts/resourcemanager.py        |   108 -
 .../0.8/services/YARN/package/scripts/service.py   |    75 -
 .../services/YARN/package/scripts/service_check.py |    68 -
 .../services/YARN/package/scripts/status_params.py |    36 -
 .../0.8/services/YARN/package/scripts/yarn.py      |   238 -
 .../services/YARN/package/scripts/yarn_client.py   |    42 -
 .../package/templates/container-executor.cfg.j2    |    40 -
 .../YARN/package/templates/exclude_hosts_list.j2   |    21 -
 .../YARN/package/templates/include_hosts_list.j2   |    21 -
 .../YARN/package/templates/mapreduce.conf.j2       |    35 -
 .../YARN/package/templates/taskcontroller.cfg.j2   |    38 -
 .../services/YARN/package/templates/yarn.conf.j2   |    35 -
 .../BIGTOP/0.8/services/ZOOKEEPER/alerts.json      |    58 -
 .../ZOOKEEPER/configuration/zookeeper-env.xml      |   100 -
 .../ZOOKEEPER/configuration/zookeeper-log4j.xml    |   102 -
 .../BIGTOP/0.8/services/ZOOKEEPER/metainfo.xml     |    95 -
 .../0.8/services/ZOOKEEPER/package/files/zkEnv.sh  |    96 -
 .../services/ZOOKEEPER/package/files/zkServer.sh   |   120 -
 .../services/ZOOKEEPER/package/files/zkService.sh  |    26 -
 .../services/ZOOKEEPER/package/files/zkSmoke.sh    |    78 -
 .../services/ZOOKEEPER/package/scripts/__init__.py |    20 -
 .../services/ZOOKEEPER/package/scripts/params.py   |    86 -
 .../ZOOKEEPER/package/scripts/service_check.py     |    46 -
 .../ZOOKEEPER/package/scripts/status_params.py     |    26 -
 .../ZOOKEEPER/package/scripts/zookeeper.py         |   110 -
 .../ZOOKEEPER/package/scripts/zookeeper_client.py  |    42 -
 .../ZOOKEEPER/package/scripts/zookeeper_server.py  |    54 -
 .../ZOOKEEPER/package/scripts/zookeeper_service.py |    42 -
 .../package/templates/configuration.xsl.j2         |    42 -
 .../ZOOKEEPER/package/templates/zoo.cfg.j2         |    69 -
 .../templates/zookeeper_client_jaas.conf.j2        |    23 -
 .../package/templates/zookeeper_jaas.conf.j2       |    26 -
 .../stacks/BIGTOP/0.8/services/stack_advisor.py    |   534 -
 .../stacks/HDP/2.0.6.GlusterFS/metainfo.xml        |    23 -
 .../stacks/HDP/2.0.6.GlusterFS/repos/repoinfo.xml  |    89 -
 .../HDP/2.0.6.GlusterFS/role_command_order.json    |    73 -
 .../services/GLUSTERFS/configuration/core-site.xml |    49 -
 .../GLUSTERFS/configuration/hadoop-env.xml         |   211 -
 .../services/GLUSTERFS/metainfo.xml                |    64 -
 .../GLUSTERFS/package/scripts/glusterfs.py         |    29 -
 .../GLUSTERFS/package/scripts/glusterfs_client.py  |    34 -
 .../services/GLUSTERFS/package/scripts/params.py   |    29 -
 .../GLUSTERFS/package/scripts/service_check.py     |    37 -
 .../package/templates/glusterfs-env.sh.j2          |    18 -
 .../package/templates/glusterfs.properties.j2      |    36 -
 .../services/HBASE/configuration/hbase-site.xml    |   400 -
 .../2.0.6.GlusterFS/services/HBASE/metainfo.xml    |    34 -
 .../services/HDFS/configuration/core-site.xml      |   162 -
 .../services/HDFS/configuration/global.xml         |   227 -
 .../services/HDFS/configuration/hadoop-policy.xml  |   130 -
 .../services/HDFS/configuration/hdfs-site.xml      |   501 -
 .../HDP/2.0.6.GlusterFS/services/HDFS/metainfo.xml |    27 -
 .../services/HIVE/configuration/hive-site.xml      |   504 -
 .../services/HIVE/etc/hive-schema-0.13.0.mysql.sql |   889 -
 .../HIVE/etc/hive-schema-0.13.0.oracle.sql         |   835 -
 .../HIVE/etc/hive-schema-0.13.0.postgres.sql       |  1538 --
 .../HIVE/etc/upgrade-0.12.0-to-0.13.0.oracle.sql   |   165 -
 .../services/HIVE/etc/upgrade-0.13.0.oracle.sql    |    38 -
 .../HDP/2.0.6.GlusterFS/services/HIVE/metainfo.xml |   100 -
 .../services/OOZIE/configuration/oozie-site.xml    |   319 -
 .../2.0.6.GlusterFS/services/OOZIE/metainfo.xml    |    75 -
 .../services/PIG/configuration/pig-properties.xml  |    93 -
 .../HDP/2.0.6.GlusterFS/services/PIG/metainfo.xml  |    27 -
 .../2.0.6.GlusterFS/services/SQOOP/metainfo.xml    |    32 -
 .../YARN/configuration-mapred/core-site.xml.2      |    20 -
 .../YARN/configuration-mapred/mapred-site.xml      |    89 -
 .../YARN/configuration-mapred/mapred-site.xml.2    |    68 -
 .../YARN/configuration/capacity-scheduler.xml      |   130 -
 .../services/YARN/configuration/mapred-site.xml.2  |    68 -
 .../services/YARN/configuration/yarn-env.xml       |   194 -
 .../services/YARN/configuration/yarn-site.xml      |   420 -
 .../HDP/2.0.6.GlusterFS/services/YARN/metainfo.xml |   144 -
 .../HDP/2.0.6.GlusterFS/services/YARN/metrics.json |  2548 --
 .../package/files/validateYarnComponentStatus.py   |   170 -
 .../services/YARN/package/scripts/__init__.py      |    20 -
 .../package/scripts/application_timeline_server.py |    55 -
 .../services/YARN/package/scripts/historyserver.py |    53 -
 .../YARN/package/scripts/mapred_service_check.py   |    73 -
 .../YARN/package/scripts/mapreduce2_client.py      |    42 -
 .../services/YARN/package/scripts/nodemanager.py   |    59 -
 .../services/YARN/package/scripts/params.py        |   147 -
 .../YARN/package/scripts/resourcemanager.py        |    97 -
 .../services/YARN/package/scripts/service.py       |    62 -
 .../services/YARN/package/scripts/service_check.py |    67 -
 .../services/YARN/package/scripts/status_params.py |    35 -
 .../services/YARN/package/scripts/yarn.py          |   166 -
 .../services/YARN/package/scripts/yarn_client.py   |    42 -
 .../package/templates/container-executor.cfg.j2    |    22 -
 .../YARN/package/templates/exclude_hosts_list.j2   |    21 -
 .../YARN/package/templates/include_hosts_list.j2   |    21 -
 .../YARN/package/templates/mapreduce.conf.j2       |    17 -
 .../services/YARN/package/templates/yarn-env.sh.j2 |   128 -
 .../services/YARN/package/templates/yarn.conf.j2   |    17 -
 .../services/ZOOKEEPER/metainfo.xml                |    28 -
 .../stacks/HDP/2.0.6/configuration/cluster-env.xml |   376 -
 .../main/resources/stacks/HDP/2.0.6/metainfo.xml   |    22 -
 .../resources/stacks/HDP/2.0.6/repos/repoinfo.xml  |    47 -
 .../stacks/HDP/2.0.6/role_command_order.json       |    75 -
 .../2.0.6/services/AMBARI_INFRA_SOLR/metainfo.xml  |    26 -
 .../HDP/2.0.6/services/AMBARI_METRICS/metainfo.xml |    27 -
 .../stacks/HDP/2.0.6/services/FLUME/metainfo.xml   |    26 -
 .../stacks/HDP/2.0.6/services/GANGLIA/metainfo.xml |    26 -
 .../stacks/HDP/2.0.6/services/HBASE/metainfo.xml   |    33 -
 .../services/HBASE/quicklinks/quicklinks.json      |   103 -
 .../stacks/HDP/2.0.6/services/HDFS/metainfo.xml    |    33 -
 .../2.0.6/services/HDFS/quicklinks/quicklinks.json |    80 -
 .../stacks/HDP/2.0.6/services/HIVE/metainfo.xml    |    26 -
 .../HDP/2.0.6/services/KERBEROS/metainfo.xml       |    26 -
 .../services/OOZIE/configuration/oozie-env.xml     |   102 -
 .../stacks/HDP/2.0.6/services/OOZIE/metainfo.xml   |    32 -
 .../services/OOZIE/quicklinks/quicklinks.json      |    46 -
 .../stacks/HDP/2.0.6/services/PIG/metainfo.xml     |    26 -
 .../stacks/HDP/2.0.6/services/SQOOP/metainfo.xml   |    26 -
 .../stacks/HDP/2.0.6/services/YARN/metainfo.xml    |    44 -
 .../YARN/quicklinks-mapred/quicklinks.json         |    80 -
 .../2.0.6/services/YARN/quicklinks/quicklinks.json |    80 -
 .../HDP/2.0.6/services/ZOOKEEPER/metainfo.xml      |    27 -
 .../stacks/HDP/2.0.6/services/stack_advisor.py     |   871 -
 .../src/main/resources/stacks/HDP/2.0/metainfo.xml |    25 -
 .../resources/stacks/HDP/2.0/repos/repoinfo.xml    |    48 -
 .../stacks/HDP/2.0/role_command_order.json         |    62 -
 .../stacks/HDP/2.0/services/HBASE/metainfo.xml     |    26 -
 .../stacks/HDP/2.0/services/HDFS/metainfo.xml      |    26 -
 .../stacks/HDP/2.0/services/HIVE/metainfo.xml      |    26 -
 .../stacks/HDP/2.0/services/OOZIE/metainfo.xml     |    26 -
 .../stacks/HDP/2.0/services/PIG/metainfo.xml       |    26 -
 .../stacks/HDP/2.0/services/SQOOP/metainfo.xml     |    26 -
 .../stacks/HDP/2.0/services/YARN/metainfo.xml      |    31 -
 .../stacks/HDP/2.0/services/ZOOKEEPER/metainfo.xml |    26 -
 .../blueprints/multinode-default.json              |   176 -
 .../blueprints/singlenode-default.json             |   128 -
 .../stacks/HDP/2.1.GlusterFS/metainfo.xml          |    23 -
 .../stacks/HDP/2.1.GlusterFS/repos/repoinfo.xml    |    63 -
 .../HDP/2.1.GlusterFS/role_command_order.json      |    74 -
 .../configuration/falcon-client.properties.xml     |    29 -
 .../services/FALCON/configuration/falcon-env.xml   |    74 -
 .../configuration/falcon-runtime.properties.xml    |    53 -
 .../configuration/falcon-startup.properties.xml    |   242 -
 .../services/FALCON/configuration/oozie-site.xml   |   187 -
 .../HDP/2.1.GlusterFS/services/FALCON/metainfo.xml |   104 -
 .../services/FALCON/package/scripts/falcon.py      |    86 -
 .../FALCON/package/scripts/falcon_client.py        |    38 -
 .../FALCON/package/scripts/falcon_server.py        |    61 -
 .../services/FALCON/package/scripts/params.py      |    71 -
 .../FALCON/package/scripts/service_check.py        |    40 -
 .../FALCON/package/scripts/status_params.py        |    24 -
 .../FALCON/package/templates/client.properties.j2  |    42 -
 .../FALCON/package/templates/falcon-env.sh.j2      |    73 -
 .../FALCON/package/templates/runtime.properties.j2 |    50 -
 .../FALCON/package/templates/startup.properties.j2 |    89 -
 .../HDP/2.1.GlusterFS/services/FLUME/metainfo.xml  |    31 -
 .../services/GLUSTERFS/configuration/core-site.xml |    39 -
 .../GLUSTERFS/configuration/hadoop-env.xml         |   237 -
 .../2.1.GlusterFS/services/GLUSTERFS/metainfo.xml  |    64 -
 .../GLUSTERFS/package/scripts/glusterfs.py         |    29 -
 .../GLUSTERFS/package/scripts/glusterfs_client.py  |    34 -
 .../services/GLUSTERFS/package/scripts/params.py   |    29 -
 .../GLUSTERFS/package/scripts/service_check.py     |    37 -
 .../package/templates/glusterfs-env.sh.j2          |    18 -
 .../package/templates/glusterfs.properties.j2      |    36 -
 .../services/HBASE/configuration/hbase-site.xml    |   404 -
 .../HDP/2.1.GlusterFS/services/HBASE/metainfo.xml  |    34 -
 .../HDP/2.1.GlusterFS/services/HDFS/metainfo.xml   |    27 -
 .../services/HIVE/configuration/hive-site.xml      |   504 -
 .../services/HIVE/etc/hive-schema-0.13.0.mysql.sql |   889 -
 .../HIVE/etc/hive-schema-0.13.0.oracle.sql         |   835 -
 .../HIVE/etc/hive-schema-0.13.0.postgres.sql       |  1538 --
 .../HIVE/etc/upgrade-0.12.0-to-0.13.0.oracle.sql   |   165 -
 .../services/HIVE/etc/upgrade-0.13.0.oracle.sql    |    38 -
 .../HDP/2.1.GlusterFS/services/HIVE/metainfo.xml   |   100 -
 .../services/OOZIE/configuration/oozie-site.xml    |   319 -
 .../HDP/2.1.GlusterFS/services/OOZIE/metainfo.xml  |    75 -
 .../services/PIG/configuration/pig-properties.xml  |    93 -
 .../HDP/2.1.GlusterFS/services/PIG/metainfo.xml    |    27 -
 .../HDP/2.1.GlusterFS/services/SQOOP/metainfo.xml  |    32 -
 .../services/STORM/configuration/storm-env.xml     |    44 -
 .../services/STORM/configuration/storm-site.xml    |   676 -
 .../HDP/2.1.GlusterFS/services/STORM/metainfo.xml  |   122 -
 .../HDP/2.1.GlusterFS/services/STORM/metrics.json  |  1076 -
 .../services/STORM/package/files/wordCount.jar     |   Bin 690588 -> 0 bytes
 .../services/STORM/package/scripts/drpc_server.py  |    58 -
 .../services/STORM/package/scripts/nimbus.py       |    57 -
 .../services/STORM/package/scripts/nimbus_prod.py  |    55 -
 .../services/STORM/package/scripts/params.py       |    58 -
 .../services/STORM/package/scripts/rest_api.py     |    58 -
 .../services/STORM/package/scripts/service.py      |    77 -
 .../STORM/package/scripts/service_check.py         |    44 -
 .../STORM/package/scripts/status_params.py         |    36 -
 .../services/STORM/package/scripts/storm.py        |    55 -
 .../services/STORM/package/scripts/supervisor.py   |    62 -
 .../STORM/package/scripts/supervisor_prod.py       |    57 -
 .../STORM/package/scripts/supervisord_service.py   |    32 -
 .../services/STORM/package/scripts/ui_server.py    |    58 -
 .../services/STORM/package/scripts/yaml_config.py  |    69 -
 .../STORM/package/templates/config.yaml.j2         |    75 -
 .../STORM/package/templates/storm-env.sh.j2        |    45 -
 .../STORM/package/templates/storm_jaas.conf.j2     |    27 -
 .../services/TEZ/configuration/tez-env.xml         |    30 -
 .../services/TEZ/configuration/tez-site.xml        |   273 -
 .../HDP/2.1.GlusterFS/services/TEZ/metainfo.xml    |    73 -
 .../services/TEZ/package/scripts/params.py         |    32 -
 .../services/TEZ/package/scripts/tez.py            |    55 -
 .../services/TEZ/package/scripts/tez_client.py     |    41 -
 .../services/TEZ/package/templates/tez-env.sh.j2   |    23 -
 .../YARN/configuration-mapred/core-site.xml.2      |    20 -
 .../YARN/configuration-mapred/mapred-site.xml      |    89 -
 .../YARN/configuration-mapred/mapred-site.xml.2    |    68 -
 .../YARN/configuration-mapred/ssl-client.xml       |    70 -
 .../YARN/configuration-mapred/ssl-server.xml       |    80 -
 .../YARN/configuration/capacity-scheduler.xml      |   130 -
 .../services/YARN/configuration/mapred-site.xml.2  |    68 -
 .../services/YARN/configuration/yarn-env.xml       |   194 -
 .../services/YARN/configuration/yarn-site.xml      |   420 -
 .../HDP/2.1.GlusterFS/services/YARN/metainfo.xml   |   143 -
 .../HDP/2.1.GlusterFS/services/YARN/metrics.json   |  2548 --
 .../package/files/validateYarnComponentStatus.py   |   170 -
 .../services/YARN/package/scripts/__init__.py      |    20 -
 .../package/scripts/application_timeline_server.py |    55 -
 .../services/YARN/package/scripts/historyserver.py |    53 -
 .../YARN/package/scripts/mapred_service_check.py   |    73 -
 .../YARN/package/scripts/mapreduce2_client.py      |    42 -
 .../services/YARN/package/scripts/nodemanager.py   |    59 -
 .../services/YARN/package/scripts/params.py        |   146 -
 .../YARN/package/scripts/resourcemanager.py        |    97 -
 .../services/YARN/package/scripts/service.py       |    62 -
 .../services/YARN/package/scripts/service_check.py |    67 -
 .../services/YARN/package/scripts/status_params.py |    35 -
 .../services/YARN/package/scripts/yarn.py          |   166 -
 .../services/YARN/package/scripts/yarn_client.py   |    42 -
 .../package/templates/container-executor.cfg.j2    |    22 -
 .../YARN/package/templates/exclude_hosts_list.j2   |     3 -
 .../YARN/package/templates/include_hosts_list.j2   |    21 -
 .../YARN/package/templates/mapreduce.conf.j2       |    17 -
 .../services/YARN/package/templates/yarn-env.sh.j2 |   128 -
 .../services/YARN/package/templates/yarn.conf.j2   |    17 -
 .../2.1.GlusterFS/services/ZOOKEEPER/metainfo.xml  |    28 -
 .../HDP/2.1/blueprints/multinode-default.json      |   171 -
 .../HDP/2.1/blueprints/singlenode-default.json     |   125 -
 .../src/main/resources/stacks/HDP/2.1/metainfo.xml |    25 -
 .../resources/stacks/HDP/2.1/repos/repoinfo.xml    |    62 -
 .../stacks/HDP/2.1/role_command_order.json         |    23 -
 .../stacks/HDP/2.1/services/FALCON/metainfo.xml    |    26 -
 .../stacks/HDP/2.1/services/FLUME/metainfo.xml     |    26 -
 .../stacks/HDP/2.1/services/HBASE/metainfo.xml     |    26 -
 .../2.1/services/HDFS/configuration/hdfs-site.xml  |    33 -
 .../stacks/HDP/2.1/services/HDFS/metainfo.xml      |    26 -
 .../2.1/services/HIVE/configuration/hive-site.xml  |   631 -
 .../services/HIVE/etc/hive-schema-0.13.0.mysql.sql |   889 -
 .../HIVE/etc/hive-schema-0.13.0.oracle.sql         |   835 -
 .../HIVE/etc/hive-schema-0.13.0.postgres.sql       |  1538 --
 .../HIVE/etc/upgrade-0.12.0-to-0.13.0.oracle.sql   |   165 -
 .../services/HIVE/etc/upgrade-0.13.0.oracle.sql    |    38 -
 .../stacks/HDP/2.1/services/HIVE/metainfo.xml      |   108 -
 .../stacks/HDP/2.1/services/KERBEROS/metainfo.xml  |    26 -
 .../services/OOZIE/configuration/oozie-site.xml    |   318 -
 .../stacks/HDP/2.1/services/OOZIE/metainfo.xml     |    72 -
 .../services/PIG/configuration/pig-properties.xml  |    96 -
 .../stacks/HDP/2.1/services/PIG/metainfo.xml       |    26 -
 .../stacks/HDP/2.1/services/SQOOP/metainfo.xml     |    26 -
 .../stacks/HDP/2.1/services/STORM/metainfo.xml     |    28 -
 .../stacks/HDP/2.1/services/TEZ/metainfo.xml       |    26 -
 .../2.1/services/YARN/configuration/yarn-env.xml   |   162 -
 .../2.1/services/YARN/configuration/yarn-site.xml  |   111 -
 .../stacks/HDP/2.1/services/YARN/metainfo.xml      |    87 -
 .../stacks/HDP/2.1/services/ZOOKEEPER/metainfo.xml |    26 -
 .../stacks/HDP/2.1/services/stack_advisor.py       |   308 -
 .../stacks/HDP/2.2/configuration/cluster-env.xml   |    23 -
 .../src/main/resources/stacks/HDP/2.2/metainfo.xml |    25 -
 .../resources/stacks/HDP/2.2/repos/repoinfo.xml    |    62 -
 .../stacks/HDP/2.2/role_command_order.json         |    40 -
 .../configuration/falcon-startup.properties.xml    |   140 -
 .../stacks/HDP/2.2/services/FALCON/metainfo.xml    |    53 -
 .../stacks/HDP/2.2/services/FLUME/metainfo.xml     |    48 -
 .../2.2/services/HBASE/configuration/hbase-env.xml |   199 -
 .../services/HBASE/configuration/hbase-site.xml    |   328 -
 .../ranger-hbase-plugin-properties.xml             |   277 -
 .../stacks/HDP/2.2/services/HBASE/metainfo.xml     |    62 -
 .../HDP/2.2/services/HBASE/themes/theme.json       |   411 -
 .../2.2/services/HDFS/configuration/core-site.xml  |    55 -
 .../2.2/services/HDFS/configuration/hadoop-env.xml |   191 -
 .../2.2/services/HDFS/configuration/hdfs-log4j.xml |   268 -
 .../2.2/services/HDFS/configuration/hdfs-site.xml  |    83 -
 .../ranger-hdfs-plugin-properties.xml              |   267 -
 .../stacks/HDP/2.2/services/HDFS/metainfo.xml      |   114 -
 .../stacks/HDP/2.2/services/HDFS/themes/theme.json |   179 -
 .../2.2/services/HIVE/configuration/hive-env.xml   |   192 -
 .../2.2/services/HIVE/configuration/hive-site.xml  |  1852 --
 .../HIVE/configuration/hiveserver2-site.xml        |    84 -
 .../ranger-hive-plugin-properties.xml              |   260 -
 .../services/HIVE/configuration/webhcat-site.xml   |   129 -
 .../stacks/HDP/2.2/services/HIVE/metainfo.xml      |   142 -
 .../stacks/HDP/2.2/services/HIVE/themes/theme.json |   543 -
 .../stacks/HDP/2.2/services/KAFKA/metainfo.xml     |    27 -
 .../stacks/HDP/2.2/services/KERBEROS/metainfo.xml  |    26 -
 .../ranger-knox-plugin-properties.xml              |    29 -
 .../stacks/HDP/2.2/services/KNOX/metainfo.xml      |    44 -
 .../stacks/HDP/2.2/services/LOGSEARCH/metainfo.xml |    27 -
 .../2.2/services/OOZIE/configuration/oozie-env.xml |   113 -
 .../services/OOZIE/configuration/oozie-site.xml    |   106 -
 .../stacks/HDP/2.2/services/OOZIE/metainfo.xml     |   100 -
 .../stacks/HDP/2.2/services/PIG/metainfo.xml       |    48 -
 .../stacks/HDP/2.2/services/RANGER/metainfo.xml    |    30 -
 .../stacks/HDP/2.2/services/SLIDER/metainfo.xml    |    50 -
 .../stacks/HDP/2.2/services/SPARK/metainfo.xml     |    30 -
 .../stacks/HDP/2.2/services/SQOOP/metainfo.xml     |    44 -
 .../services/STORM/configuration/storm-site.xml    |    59 -
 .../stacks/HDP/2.2/services/STORM/metainfo.xml     |    29 -
 .../2.2/services/TEZ/configuration/tez-site.xml    |   441 -
 .../stacks/HDP/2.2/services/TEZ/metainfo.xml       |    47 -
 .../YARN/configuration-mapred/mapred-env.xml       |    52 -
 .../YARN/configuration-mapred/mapred-site.xml      |   106 -
 .../YARN/configuration/capacity-scheduler.xml      |    57 -
 .../2.2/services/YARN/configuration/yarn-env.xml   |    44 -
 .../2.2/services/YARN/configuration/yarn-site.xml  |   597 -
 .../stacks/HDP/2.2/services/YARN/kerberos.json     |   224 -
 .../stacks/HDP/2.2/services/YARN/metainfo.xml      |   105 -
 .../HDP/2.2/services/YARN/themes-mapred/theme.json |   132 -
 .../stacks/HDP/2.2/services/YARN/themes/theme.json |   250 -
 .../stacks/HDP/2.2/services/ZOOKEEPER/metainfo.xml |    27 -
 .../stacks/HDP/2.2/services/stack_advisor.py       |  1720 --
 .../main/resources/stacks/HDP/2.3.ECS/metainfo.xml |    23 -
 .../stacks/HDP/2.3.ECS/repos/repoinfo.xml          |   140 -
 .../stacks/HDP/2.3.ECS/role_command_order.json     |    10 -
 .../HDP/2.3.ECS/services/ACCUMULO/metainfo.xml     |    28 -
 .../stacks/HDP/2.3.ECS/services/ATLAS/metainfo.xml |    28 -
 .../services/ECS/configuration/core-site.xml       |   122 -
 .../services/ECS/configuration/hadoop-env.xml      |   148 -
 .../services/ECS/configuration/hdfs-site.xml       |    33 -
 .../stacks/HDP/2.3.ECS/services/ECS/kerberos.json  |    54 -
 .../stacks/HDP/2.3.ECS/services/ECS/metainfo.xml   |    84 -
 .../services/ECS/package/scripts/ecs_client.py     |   112 -
 .../2.3.ECS/services/ECS/package/scripts/params.py |    86 -
 .../services/ECS/package/scripts/service_check.py  |    47 -
 .../HDP/2.3.ECS/services/FALCON/metainfo.xml       |    27 -
 .../stacks/HDP/2.3.ECS/services/FLUME/metainfo.xml |    27 -
 .../services/HBASE/configuration/hbase-env.xml     |   110 -
 .../services/HBASE/configuration/hbase-site.xml    |    29 -
 .../HDP/2.3.ECS/services/HBASE/kerberos.json       |   135 -
 .../stacks/HDP/2.3.ECS/services/HBASE/metainfo.xml |    58 -
 .../stacks/HDP/2.3.ECS/services/HDFS/metainfo.xml  |    27 -
 .../stacks/HDP/2.3.ECS/services/HIVE/metainfo.xml  |    91 -
 .../stacks/HDP/2.3.ECS/services/KAFKA/metainfo.xml |    27 -
 .../HDP/2.3.ECS/services/KERBEROS/metainfo.xml     |    26 -
 .../stacks/HDP/2.3.ECS/services/KNOX/metainfo.xml  |    27 -
 .../HDP/2.3.ECS/services/MAHOUT/metainfo.xml       |    28 -
 .../stacks/HDP/2.3.ECS/services/OOZIE/metainfo.xml |    27 -
 .../HDP/2.3.ECS/services/RANGER/metainfo.xml       |    32 -
 .../HDP/2.3.ECS/services/RANGER_KMS/metainfo.xml   |    30 -
 .../HDP/2.3.ECS/services/SLIDER/metainfo.xml       |    27 -
 .../stacks/HDP/2.3.ECS/services/SPARK/metainfo.xml |    30 -
 .../stacks/HDP/2.3.ECS/services/SQOOP/metainfo.xml |    27 -
 .../stacks/HDP/2.3.ECS/services/STORM/metainfo.xml |    28 -
 .../services/TEZ/configuration/tez-site.xml        |    26 -
 .../stacks/HDP/2.3.ECS/services/TEZ/metainfo.xml   |    59 -
 .../YARN/configuration-mapred/mapred-site.xml      |    30 -
 .../services/YARN/configuration/yarn-site.xml      |    27 -
 .../stacks/HDP/2.3.ECS/services/YARN/kerberos.json |   228 -
 .../stacks/HDP/2.3.ECS/services/YARN/metainfo.xml  |   145 -
 .../HDP/2.3.ECS/services/ZOOKEEPER/metainfo.xml    |    51 -
 .../2.3.GlusterFS/configuration/cluster-env.xml    |    23 -
 .../configuration/cluster-env.xml.noversion        |    63 -
 .../configuration/cluster-env.xml.version          |    24 -
 .../stacks/HDP/2.3.GlusterFS/metainfo.xml          |    23 -
 .../stacks/HDP/2.3.GlusterFS/repos/repoinfo.xml    |    91 -
 .../HDP/2.3.GlusterFS/role_command_order.json      |     8 -
 .../ACCUMULO/configuration/accumulo-log4j.xml      |   114 -
 .../2.3.GlusterFS/services/ACCUMULO/kerberos.json  |   154 -
 .../2.3.GlusterFS/services/ACCUMULO/metainfo.xml   |    49 -
 .../HDP/2.3.GlusterFS/services/FALCON/metainfo.xml |    44 -
 .../HDP/2.3.GlusterFS/services/FLUME/metainfo.xml  |    50 -
 .../services/GLUSTERFS/configuration/core-site.xml |   194 -
 .../GLUSTERFS/configuration/hadoop-env.xml         |   237 -
 .../2.3.GlusterFS/services/GLUSTERFS/metainfo.xml  |    71 -
 .../GLUSTERFS/package/scripts/glusterfs.py         |    29 -
 .../GLUSTERFS/package/scripts/glusterfs_client.py  |    34 -
 .../services/GLUSTERFS/package/scripts/params.py   |    29 -
 .../GLUSTERFS/package/scripts/service_check.py     |    37 -
 .../package/templates/glusterfs-env.sh.j2          |    18 -
 .../package/templates/glusterfs.properties.j2      |    36 -
 .../services/HBASE/configuration/hbase-site.xml    |   412 -
 .../HDP/2.3.GlusterFS/services/HBASE/metainfo.xml  |    56 -
 .../HDP/2.3.GlusterFS/services/HDFS/metainfo.xml   |    90 -
 .../services/HIVE/configuration/hive-site.xml      |   462 -
 .../services/HIVE/configuration/webhcat-site.xml   |   132 -
 .../HDP/2.3.GlusterFS/services/HIVE/metainfo.xml   |    80 -
 .../HDP/2.3.GlusterFS/services/KAFKA/metainfo.xml  |    44 -
 .../2.3.GlusterFS/services/KERBEROS/metainfo.xml   |    25 -
 .../HDP/2.3.GlusterFS/services/KNOX/metainfo.xml   |    44 -
 .../HDP/2.3.GlusterFS/services/MAHOUT/metainfo.xml |    26 -
 .../services/OOZIE/configuration/oozie-site.xml    |   340 -
 .../HDP/2.3.GlusterFS/services/OOZIE/metainfo.xml  |    26 -
 .../HDP/2.3.GlusterFS/services/PIG/metainfo.xml    |    52 -
 .../HDP/2.3.GlusterFS/services/RANGER/metainfo.xml |    54 -
 .../HDP/2.3.GlusterFS/services/SLIDER/metainfo.xml |    56 -
 .../HDP/2.3.GlusterFS/services/SPARK/metainfo.xml  |    53 -
 .../HDP/2.3.GlusterFS/services/SQOOP/metainfo.xml  |    49 -
 .../HDP/2.3.GlusterFS/services/STORM/metainfo.xml  |    45 -
 .../services/TEZ/configuration/tez-site.xml        |    97 -
 .../HDP/2.3.GlusterFS/services/TEZ/kerberos.json   |    19 -
 .../HDP/2.3.GlusterFS/services/TEZ/metainfo.xml    |    46 -
 .../YARN/configuration-mapred/core-site.xml.2      |    20 -
 .../YARN/configuration-mapred/mapred-site.xml      |    89 -
 .../YARN/configuration-mapred/mapred-site.xml.2    |    68 -
 .../YARN/configuration/capacity-scheduler.xml      |    42 -
 .../services/YARN/configuration/yarn-site.xml      |   439 -
 .../HDP/2.3.GlusterFS/services/YARN/metainfo.xml   |   104 -
 .../2.3.GlusterFS/services/ZOOKEEPER/metainfo.xml  |    45 -
 .../HDP/2.3.GlusterFS/services/stack_advisor.py    |    21 -
 .../src/main/resources/stacks/HDP/2.3/metainfo.xml |    25 -
 .../resources/stacks/HDP/2.3/repos/repoinfo.xml    |   104 -
 .../stacks/HDP/2.3/role_command_order.json         |    26 -
 .../ACCUMULO/configuration/accumulo-env.xml        |    34 -
 .../ACCUMULO/configuration/accumulo-log4j.xml      |   115 -
 .../ACCUMULO/configuration/accumulo-site.xml       |    49 -
 .../stacks/HDP/2.3/services/ACCUMULO/kerberos.json |   121 -
 .../stacks/HDP/2.3/services/ACCUMULO/metainfo.xml  |    51 -
 .../services/ACCUMULO/quicklinks/quicklinks.json   |    40 -
 .../stacks/HDP/2.3/services/ACCUMULO/widgets.json  |   155 -
 .../stacks/HDP/2.3/services/ATLAS/metainfo.xml     |    51 -
 .../2.3/services/ATLAS/quicklinks/quicklinks.json  |    36 -
 .../services/FALCON/configuration/falcon-env.xml   |    29 -
 .../configuration/falcon-startup.properties.xml    |    29 -
 .../stacks/HDP/2.3/services/FALCON/metainfo.xml    |    26 -
 .../stacks/HDP/2.3/services/FLUME/metainfo.xml     |    26 -
 .../2.3/services/HBASE/configuration/hbase-env.xml |   111 -
 .../services/HBASE/configuration/hbase-site.xml    |    64 -
 .../HBASE/configuration/ranger-hbase-audit.xml     |   177 -
 .../ranger-hbase-plugin-properties.xml             |   152 -
 .../configuration/ranger-hbase-policymgr-ssl.xml   |    66 -
 .../HBASE/configuration/ranger-hbase-security.xml  |    74 -
 .../stacks/HDP/2.3/services/HBASE/metainfo.xml     |    60 -
 .../stacks/HDP/2.3/services/HBASE/metrics.json     |  9374 -------
 .../2.3/services/HBASE/quicklinks/quicklinks.json  |    97 -
 .../HDP/2.3/services/HBASE/themes/theme.json       |    33 -
 .../stacks/HDP/2.3/services/HBASE/widgets.json     |   510 -
 .../2.3/services/HDFS/configuration/hadoop-env.xml |   185 -
 .../2.3/services/HDFS/configuration/hdfs-site.xml  |    83 -
 .../HDFS/configuration/ranger-hdfs-audit.xml       |   177 -
 .../ranger-hdfs-plugin-properties.xml              |   147 -
 .../configuration/ranger-hdfs-policymgr-ssl.xml    |    66 -
 .../HDFS/configuration/ranger-hdfs-security.xml    |    70 -
 .../stacks/HDP/2.3/services/HDFS/metainfo.xml      |   146 -
 .../2.3/services/HDFS/quicklinks/quicklinks.json   |    76 -
 .../stacks/HDP/2.3/services/HDFS/widgets.json      |   649 -
 .../2.3/services/HIVE/configuration/hive-env.xml   |   115 -
 .../2.3/services/HIVE/configuration/hive-site.xml  |    57 -
 .../HIVE/configuration/ranger-hive-audit.xml       |   177 -
 .../ranger-hive-plugin-properties.xml              |   152 -
 .../configuration/ranger-hive-policymgr-ssl.xml    |    66 -
 .../HIVE/configuration/ranger-hive-security.xml    |    74 -
 .../services/HIVE/configuration/webhcat-site.xml   |    43 -
 .../stacks/HDP/2.3/services/HIVE/metainfo.xml      |   104 -
 .../configuration/ranger-kafka-policymgr-ssl.xml   |    34 -
 .../stacks/HDP/2.3/services/KAFKA/metainfo.xml     |    27 -
 .../stacks/HDP/2.3/services/KERBEROS/metainfo.xml  |    26 -
 .../KNOX/configuration/knoxsso-topology.xml        |    94 -
 .../KNOX/configuration/ranger-knox-audit.xml       |   177 -
 .../ranger-knox-plugin-properties.xml              |   147 -
 .../configuration/ranger-knox-policymgr-ssl.xml    |    66 -
 .../KNOX/configuration/ranger-knox-security.xml    |    64 -
 .../stacks/HDP/2.3/services/KNOX/metainfo.xml      |    26 -
 .../stacks/HDP/2.3/services/MAHOUT/metainfo.xml    |    27 -
 .../2.3/services/OOZIE/configuration/oozie-env.xml |   144 -
 .../stacks/HDP/2.3/services/OOZIE/metainfo.xml     |    38 -
 .../2.3/services/OOZIE/quicklinks/quicklinks.json  |    46 -
 .../HDP/2.3/services/OOZIE/themes/theme.json       |   116 -
 .../services/PIG/configuration/pig-properties.xml  |   639 -
 .../stacks/HDP/2.3/services/PIG/metainfo.xml       |    50 -
 .../RANGER/configuration/ranger-ugsync-site.xml    |    46 -
 .../stacks/HDP/2.3/services/RANGER/metainfo.xml    |    32 -
 .../configuration/ranger-kms-policymgr-ssl.xml     |    34 -
 .../HDP/2.3/services/RANGER_KMS/metainfo.xml       |    55 -
 .../RANGER_KMS/themes/theme_version_1.json         |   336 -
 .../stacks/HDP/2.3/services/SLIDER/metainfo.xml    |    26 -
 .../stacks/HDP/2.3/services/SPARK/metainfo.xml     |    30 -
 .../stacks/HDP/2.3/services/SQOOP/metainfo.xml     |    26 -
 .../configuration/ranger-storm-policymgr-ssl.xml   |    34 -
 .../STORM/configuration/ranger-storm-security.xml  |    28 -
 .../services/STORM/configuration/storm-site.xml    |    78 -
 .../stacks/HDP/2.3/services/STORM/metainfo.xml     |    28 -
 .../2.3/services/TEZ/configuration/tez-site.xml    |   123 -
 .../stacks/HDP/2.3/services/TEZ/kerberos.json      |    25 -
 .../stacks/HDP/2.3/services/TEZ/metainfo.xml       |    26 -
 .../stacks/HDP/2.3/services/YARN/YARN_widgets.json |   670 -
 .../YARN/configuration-mapred/mapred-site.xml      |    46 -
 .../YARN/configuration/capacity-scheduler.xml      |    24 -
 .../YARN/configuration/ranger-yarn-audit.xml       |   177 -
 .../ranger-yarn-plugin-properties.xml              |    82 -
 .../configuration/ranger-yarn-policymgr-ssl.xml    |    66 -
 .../YARN/configuration/ranger-yarn-security.xml    |    64 -
 .../2.3/services/YARN/configuration/yarn-env.xml   |   173 -
 .../2.3/services/YARN/configuration/yarn-log4j.xml |   124 -
 .../2.3/services/YARN/configuration/yarn-site.xml  |   143 -
 .../stacks/HDP/2.3/services/YARN/kerberos.json     |   233 -
 .../stacks/HDP/2.3/services/YARN/metainfo.xml      |    71 -
 .../YARN/quicklinks-mapred/quicklinks.json         |    76 -
 .../2.3/services/YARN/quicklinks/quicklinks.json   |    76 -
 .../stacks/HDP/2.3/services/ZOOKEEPER/metainfo.xml |    27 -
 .../stacks/HDP/2.3/services/stack_advisor.py       |  1288 -
 .../stacks/HDP/2.3/upgrades/config-upgrade.xml     |   766 -
 .../HDP/2.3/upgrades/nonrolling-upgrade-2.3.xml    |  1090 -
 .../HDP/2.3/upgrades/nonrolling-upgrade-2.4.xml    |  1316 -
 .../HDP/2.3/upgrades/nonrolling-upgrade-2.5.xml    |  1465 --
 .../HDP/2.3/upgrades/nonrolling-upgrade-2.6.xml    |  1568 --
 .../stacks/HDP/2.3/upgrades/upgrade-2.3.xml        |   901 -
 .../stacks/HDP/2.3/upgrades/upgrade-2.4.xml        |  1012 -
 .../stacks/HDP/2.3/upgrades/upgrade-2.5.xml        |  1230 -
 .../stacks/HDP/2.3/upgrades/upgrade-2.6.xml        |  1266 -
 .../src/main/resources/stacks/HDP/2.4/metainfo.xml |    25 -
 .../resources/stacks/HDP/2.4/repos/repoinfo.xml    |   104 -
 .../stacks/HDP/2.4/role_command_order.json         |     8 -
 .../stacks/HDP/2.4/services/ACCUMULO/metainfo.xml  |    26 -
 .../stacks/HDP/2.4/services/ATLAS/metainfo.xml     |    26 -
 .../stacks/HDP/2.4/services/FALCON/metainfo.xml    |    26 -
 .../stacks/HDP/2.4/services/FLUME/metainfo.xml     |    26 -
 .../stacks/HDP/2.4/services/HBASE/metainfo.xml     |    26 -
 .../2.4/services/HDFS/configuration/hadoop-env.xml |   185 -
 .../stacks/HDP/2.4/services/HDFS/metainfo.xml      |    26 -
 .../stacks/HDP/2.4/services/HIVE/metainfo.xml      |    26 -
 .../stacks/HDP/2.4/services/KAFKA/metainfo.xml     |    26 -
 .../stacks/HDP/2.4/services/KERBEROS/metainfo.xml  |    25 -
 .../stacks/HDP/2.4/services/KNOX/metainfo.xml      |    26 -
 .../stacks/HDP/2.4/services/MAHOUT/metainfo.xml    |    26 -
 .../stacks/HDP/2.4/services/OOZIE/metainfo.xml     |    26 -
 .../stacks/HDP/2.4/services/PIG/metainfo.xml       |    26 -
 .../stacks/HDP/2.4/services/RANGER/metainfo.xml    |    29 -
 .../HDP/2.4/services/RANGER_KMS/metainfo.xml       |    29 -
 .../stacks/HDP/2.4/services/SLIDER/metainfo.xml    |    26 -
 .../SPARK/configuration/spark-defaults.xml         |    32 -
 .../SPARK/configuration/spark-thrift-sparkconf.xml |    32 -
 .../stacks/HDP/2.4/services/SPARK/metainfo.xml     |    30 -
 .../stacks/HDP/2.4/services/SQOOP/metainfo.xml     |    26 -
 .../stacks/HDP/2.4/services/STORM/metainfo.xml     |    27 -
 .../stacks/HDP/2.4/services/TEZ/metainfo.xml       |    26 -
 .../2.4/services/YARN/configuration/yarn-site.xml  |    33 -
 .../stacks/HDP/2.4/services/YARN/metainfo.xml      |    34 -
 .../stacks/HDP/2.4/services/ZOOKEEPER/metainfo.xml |    26 -
 .../stacks/HDP/2.4/services/stack_advisor.py       |    22 -
 .../stacks/HDP/2.4/upgrades/config-upgrade.xml     |   652 -
 .../HDP/2.4/upgrades/nonrolling-upgrade-2.4.xml    |  1079 -
 .../HDP/2.4/upgrades/nonrolling-upgrade-2.5.xml    |  1424 --
 .../HDP/2.4/upgrades/nonrolling-upgrade-2.6.xml    |  1521 --
 .../stacks/HDP/2.4/upgrades/upgrade-2.4.xml        |   878 -
 .../stacks/HDP/2.4/upgrades/upgrade-2.5.xml        |  1181 -
 .../stacks/HDP/2.4/upgrades/upgrade-2.6.xml        |  1229 -
 .../src/main/resources/stacks/HDP/2.5/metainfo.xml |    25 -
 .../resources/stacks/HDP/2.5/repos/repoinfo.xml    |   132 -
 .../stacks/HDP/2.5/role_command_order.json         |    29 -
 .../stacks/HDP/2.5/services/ACCUMULO/metainfo.xml  |    26 -
 .../ATLAS/configuration/application-properties.xml |    42 -
 .../2.5/services/ATLAS/configuration/atlas-env.xml |   161 -
 .../ATLAS/configuration/ranger-atlas-audit.xml     |   131 -
 .../ranger-atlas-plugin-properties.xml             |    82 -
 .../configuration/ranger-atlas-policymgr-ssl.xml   |    73 -
 .../ATLAS/configuration/ranger-atlas-security.xml  |    77 -
 .../stacks/HDP/2.5/services/ATLAS/metainfo.xml     |   102 -
 .../falcon-atlas-application.properties.xml        |    61 -
 .../configuration/falcon-startup.properties.xml    |    75 -
 .../stacks/HDP/2.5/services/FALCON/kerberos.json   |    82 -
 .../stacks/HDP/2.5/services/FALCON/metainfo.xml    |    57 -
 .../stacks/HDP/2.5/services/FLUME/metainfo.xml     |    26 -
 .../services/HBASE/configuration/hbase-site.xml    |    32 -
 .../HBASE/configuration/ranger-hbase-audit.xml     |    57 -
 .../stacks/HDP/2.5/services/HBASE/kerberos.json    |   165 -
 .../stacks/HDP/2.5/services/HBASE/metainfo.xml     |    26 -
 .../HDFS/configuration/ranger-hdfs-audit.xml       |    59 -
 .../ranger-hdfs-plugin-properties.xml              |    31 -
 .../stacks/HDP/2.5/services/HDFS/kerberos.json     |   253 -
 .../stacks/HDP/2.5/services/HDFS/metainfo.xml      |    32 -
 .../services/HIVE/configuration/beeline-log4j2.xml |    80 -
 .../hive-atlas-application.properties.xml          |    61 -
 .../2.5/services/HIVE/configuration/hive-env.xml   |   147 -
 .../HIVE/configuration/hive-exec-log4j.xml         |   114 -
 .../HIVE/configuration/hive-exec-log4j2.xml        |   101 -
 .../HIVE/configuration/hive-interactive-env.xml    |   298 -
 .../HIVE/configuration/hive-interactive-site.xml   |   725 -
 .../2.5/services/HIVE/configuration/hive-log4j.xml |   126 -
 .../services/HIVE/configuration/hive-log4j2.xml    |   141 -
 .../2.5/services/HIVE/configuration/hive-site.xml  |    32 -
 .../HIVE/configuration/hivemetastore-site.xml      |    46 -
 .../configuration/hiveserver2-interactive-site.xml |    55 -
 .../HIVE/configuration/hiveserver2-site.xml        |    46 -
 .../HIVE/configuration/llap-cli-log4j2.xml         |   148 -
 .../HIVE/configuration/llap-daemon-log4j.xml       |   215 -
 .../HIVE/configuration/ranger-hive-audit.xml       |    57 -
 .../HIVE/configuration/ranger-hive-security.xml    |    28 -
 .../HIVE/configuration/tez-interactive-site.xml    |   198 -
 .../services/HIVE/configuration/webhcat-log4j.xml  |    83 -
 .../stacks/HDP/2.5/services/HIVE/kerberos.json     |   161 -
 .../stacks/HDP/2.5/services/HIVE/metainfo.xml      |   238 -
 .../2.5/services/HIVE/quicklinks/quicklinks.json   |    62 -
 .../stacks/HDP/2.5/services/HIVE/themes/theme.json |   297 -
 .../configuration/ranger-kafka-policymgr-ssl.xml   |    34 -
 .../stacks/HDP/2.5/services/KAFKA/metainfo.xml     |    27 -
 .../stacks/HDP/2.5/services/KERBEROS/metainfo.xml  |    25 -
 .../KNOX/configuration/knoxsso-topology.xml        |   126 -
 .../KNOX/configuration/ranger-knox-audit.xml       |    58 -
 .../stacks/HDP/2.5/services/KNOX/kerberos.json     |    82 -
 .../stacks/HDP/2.5/services/KNOX/metainfo.xml      |    26 -
 .../stacks/HDP/2.5/services/MAHOUT/metainfo.xml    |    26 -
 .../services/OOZIE/configuration/oozie-site.xml    |    34 -
 .../stacks/HDP/2.5/services/OOZIE/metainfo.xml     |    36 -
 .../HDP/2.5/services/OOZIE/themes/theme.json       |   116 -
 .../stacks/HDP/2.5/services/PIG/metainfo.xml       |    26 -
 .../RANGER/configuration/ranger-tagsync-site.xml   |    46 -
 .../RANGER/configuration/ranger-ugsync-site.xml    |    52 -
 .../stacks/HDP/2.5/services/RANGER/metainfo.xml    |    30 -
 .../RANGER_KMS/configuration/dbks-site.xml         |   104 -
 .../services/RANGER_KMS/configuration/kms-env.xml  |    44 -
 .../RANGER_KMS/configuration/ranger-kms-audit.xml  |    85 -
 .../HDP/2.5/services/RANGER_KMS/kerberos.json      |    81 -
 .../HDP/2.5/services/RANGER_KMS/metainfo.xml       |    37 -
 .../RANGER_KMS/themes/theme_version_2.json         |   125 -
 .../stacks/HDP/2.5/services/SLIDER/metainfo.xml    |    26 -
 .../2.5/services/SPARK/configuration/livy-conf.xml |    65 -
 .../2.5/services/SPARK/configuration/livy-env.xml  |   106 -
 .../SPARK/configuration/livy-log4j-properties.xml  |    41 -
 .../SPARK/configuration/livy-spark-blacklist.xml   |    40 -
 .../stacks/HDP/2.5/services/SPARK/kerberos.json    |   126 -
 .../stacks/HDP/2.5/services/SPARK/metainfo.xml     |   117 -
 .../stacks/HDP/2.5/services/SPARK2/metainfo.xml    |    31 -
 .../sqoop-atlas-application.properties.xml         |    47 -
 .../services/SQOOP/configuration/sqoop-site.xml    |    30 -
 .../stacks/HDP/2.5/services/SQOOP/kerberos.json    |    20 -
 .../stacks/HDP/2.5/services/SQOOP/metainfo.xml     |    29 -
 .../configuration/ranger-storm-policymgr-ssl.xml   |    34 -
 .../STORM/configuration/ranger-storm-security.xml  |    28 -
 .../services/STORM/configuration/storm-site.xml    |    61 -
 .../stacks/HDP/2.5/services/STORM/metainfo.xml     |    31 -
 .../2.5/services/TEZ/configuration/tez-site.xml    |    41 -
 .../stacks/HDP/2.5/services/TEZ/metainfo.xml       |    26 -
 .../YARN/configuration/capacity-scheduler.xml      |    34 -
 .../YARN/configuration/ranger-yarn-audit.xml       |    58 -
 .../2.5/services/YARN/configuration/yarn-site.xml  |    70 -
 .../stacks/HDP/2.5/services/YARN/kerberos.json     |   279 -
 .../stacks/HDP/2.5/services/YARN/metainfo.xml      |    32 -
 .../ZEPPELIN/configuration/zeppelin-env.xml        |   114 -
 .../stacks/HDP/2.5/services/ZEPPELIN/kerberos.json |    52 -
 .../stacks/HDP/2.5/services/ZEPPELIN/metainfo.xml  |    46 -
 .../stacks/HDP/2.5/services/ZOOKEEPER/metainfo.xml |    26 -
 .../stacks/HDP/2.5/services/stack_advisor.py       |  2210 --
 .../stacks/HDP/2.5/upgrades/config-upgrade.xml     |   624 -
 .../stacks/HDP/2.5/upgrades/host-upgrade-2.5.xml   |   594 -
 .../HDP/2.5/upgrades/nonrolling-upgrade-2.5.xml    |  1218 -
 .../HDP/2.5/upgrades/nonrolling-upgrade-2.6.xml    |  1631 --
 .../stacks/HDP/2.5/upgrades/upgrade-2.5.xml        |   981 -
 .../stacks/HDP/2.5/upgrades/upgrade-2.6.xml        |  1151 -
 .../stacks/HDP/2.6/kerberos_preconfigure.json      |    31 -
 .../src/main/resources/stacks/HDP/2.6/metainfo.xml |    23 -
 .../resources/stacks/HDP/2.6/repos/repoinfo.xml    |   146 -
 .../stacks/HDP/2.6/services/ACCUMULO/metainfo.xml  |    26 -
 .../ATLAS/configuration/application-properties.xml |   127 -
 .../2.6/services/ATLAS/configuration/atlas-env.xml |    53 -
 .../services/ATLAS/configuration/atlas-log4j.xml   |   156 -
 .../ATLAS/configuration/ranger-atlas-audit.xml     |    33 -
 .../ranger-atlas-plugin-properties.xml             |    91 -
 .../stacks/HDP/2.6/services/ATLAS/kerberos.json    |   103 -
 .../stacks/HDP/2.6/services/ATLAS/metainfo.xml     |    43 -
 .../2.6/services/ATLAS/themes/theme_version_2.json |   845 -
 .../stacks/HDP/2.6/services/DRUID/kerberos.json    |    75 -
 .../stacks/HDP/2.6/services/DRUID/metainfo.xml     |    27 -
 .../stacks/HDP/2.6/services/FALCON/metainfo.xml    |    52 -
 .../stacks/HDP/2.6/services/FLUME/metainfo.xml     |    26 -
 .../2.6/services/HBASE/configuration/hbase-env.xml |   124 -
 .../services/HBASE/configuration/hbase-site.xml    |    62 -
 .../HBASE/configuration/ranger-hbase-audit.xml     |    33 -
 .../ranger-hbase-plugin-properties.xml             |    88 -
 .../stacks/HDP/2.6/services/HBASE/metainfo.xml     |    26 -
 .../2.6/services/HDFS/configuration/core-site.xml  |    29 -
 .../2.6/services/HDFS/configuration/hadoop-env.xml |   186 -
 .../configuration/hadoop-metrics2.properties.xml   |   135 -
 .../HDFS/configuration/ranger-hdfs-audit.xml       |    33 -
 .../ranger-hdfs-plugin-properties.xml              |    88 -
 .../stacks/HDP/2.6/services/HDFS/kerberos.json     |   254 -
 .../stacks/HDP/2.6/services/HDFS/metainfo.xml      |    29 -
 .../2.6/services/HIVE/configuration/hive-env.xml   |   120 -
 .../HIVE/configuration/hive-interactive-env.xml    |   151 -
 .../HIVE/configuration/hive-interactive-site.xml   |   352 -
 .../2.6/services/HIVE/configuration/hive-site.xml  |    36 -
 .../HIVE/configuration/parquet-logging.xml         |   106 -
 .../HIVE/configuration/ranger-hive-audit.xml       |    33 -
 .../ranger-hive-plugin-properties.xml              |    89 -
 .../HIVE/configuration/ranger-hive-security.xml    |    33 -
 .../HIVE/configuration/tez-interactive-site.xml    |   148 -
 .../stacks/HDP/2.6/services/HIVE/metainfo.xml      |    32 -
 .../stacks/HDP/2.6/services/HIVE/themes/theme.json |    20 -
 .../KAFKA/configuration/ranger-kafka-audit.xml     |    33 -
 .../ranger-kafka-plugin-properties.xml             |    88 -
 .../stacks/HDP/2.6/services/KAFKA/metainfo.xml     |    26 -
 .../stacks/HDP/2.6/services/KERBEROS/metainfo.xml  |    25 -
 .../KNOX/configuration/ranger-knox-audit.xml       |    33 -
 .../ranger-knox-plugin-properties.xml              |    71 -
 .../2.6/services/KNOX/configuration/topology.xml   |   174 -
 .../stacks/HDP/2.6/services/KNOX/metainfo.xml      |    26 -
 .../stacks/HDP/2.6/services/MAHOUT/metainfo.xml    |    26 -
 .../stacks/HDP/2.6/services/OOZIE/kerberos.json    |    74 -
 .../stacks/HDP/2.6/services/OOZIE/metainfo.xml     |    66 -
 .../services/PIG/configuration/pig-properties.xml  |    93 -
 .../stacks/HDP/2.6/services/PIG/metainfo.xml       |    26 -
 .../RANGER/configuration/ranger-tagsync-site.xml   |    52 -
 .../RANGER/configuration/ranger-ugsync-site.xml    |    52 -
 .../stacks/HDP/2.6/services/RANGER/metainfo.xml    |    36 -
 .../RANGER_KMS/configuration/ranger-kms-audit.xml  |    33 -
 .../RANGER_KMS/configuration/ranger-kms-site.xml   |    68 -
 .../HDP/2.6/services/RANGER_KMS/metainfo.xml       |    29 -
 .../stacks/HDP/2.6/services/SLIDER/metainfo.xml    |    26 -
 .../2.6/services/SPARK/configuration/livy-conf.xml |   105 -
 .../2.6/services/SPARK/configuration/livy-env.xml  |   108 -
 .../SPARK/configuration/livy-spark-blacklist.xml   |    52 -
 .../SPARK/configuration/spark-thrift-sparkconf.xml |    40 -
 .../stacks/HDP/2.6/services/SPARK/kerberos.json    |   126 -
 .../stacks/HDP/2.6/services/SPARK/metainfo.xml     |    29 -
 .../services/SPARK2/configuration/livy2-conf.xml   |   114 -
 .../services/SPARK2/configuration/livy2-env.xml    |   109 -
 .../configuration/livy2-log4j-properties.xml       |    42 -
 .../SPARK2/configuration/livy2-spark-blacklist.xml |    52 -
 .../configuration/spark2-log4j-properties.xml      |    56 -
 .../configuration/spark2-thrift-sparkconf.xml      |    40 -
 .../stacks/HDP/2.6/services/SPARK2/kerberos.json   |   126 -
 .../stacks/HDP/2.6/services/SPARK2/metainfo.xml    |   123 -
 .../stacks/HDP/2.6/services/SQOOP/metainfo.xml     |    26 -
 .../STORM/configuration/ranger-storm-audit.xml     |    33 -
 .../ranger-storm-plugin-properties.xml             |    71 -
 .../services/STORM/configuration/storm-site.xml    |    61 -
 .../stacks/HDP/2.6/services/STORM/metainfo.xml     |    35 -
 .../stacks/HDP/2.6/services/SUPERSET/kerberos.json |    47 -
 .../stacks/HDP/2.6/services/SUPERSET/metainfo.xml  |    28 -
 .../HDP/2.6/services/TEZ/configuration/tez-env.xml |    56 -
 .../2.6/services/TEZ/configuration/tez-site.xml    |    42 -
 .../stacks/HDP/2.6/services/TEZ/metainfo.xml       |    26 -
 .../YARN/configuration-mapred/mapred-site.xml      |    37 -
 .../YARN/configuration/ranger-yarn-audit.xml       |    33 -
 .../ranger-yarn-plugin-properties.xml              |    88 -
 .../2.6/services/YARN/configuration/yarn-env.xml   |   195 -
 .../2.6/services/YARN/configuration/yarn-site.xml  |   138 -
 .../stacks/HDP/2.6/services/YARN/kerberos.json     |   297 -
 .../stacks/HDP/2.6/services/YARN/metainfo.xml      |    32 -
 .../ZEPPELIN/configuration/zeppelin-env.xml        |   195 -
 .../stacks/HDP/2.6/services/ZEPPELIN/kerberos.json |    52 -
 .../stacks/HDP/2.6/services/ZEPPELIN/metainfo.xml  |    46 -
 .../stacks/HDP/2.6/services/ZOOKEEPER/metainfo.xml |    26 -
 .../stacks/HDP/2.6/services/stack_advisor.py       |   798 -
 .../HDPWIN/2.1/configuration/cluster-env.xml       |   107 -
 .../HDPWIN/2.1/hooks/after-INSTALL/scripts/hook.py |    68 -
 .../2.1/hooks/after-INSTALL/scripts/params.py      |   102 -
 .../after-INSTALL/templates/Run-SmokeTests.cmd     |    19 -
 .../after-INSTALL/templates/Run-SmokeTests.ps1     |   624 -
 .../after-INSTALL/templates/cluster.properties.j2  |    38 -
 .../HDPWIN/2.1/hooks/before-ANY/scripts/hook.py    |    30 -
 .../HDPWIN/2.1/hooks/before-ANY/scripts/params.py  |    27 -
 .../2.1/hooks/before-ANY/scripts/setup_jdk.py      |    49 -
 .../2.1/hooks/before-INSTALL/scripts/hook.py       |    32 -
 .../2.1/hooks/before-INSTALL/scripts/params.py     |    22 -
 .../2.1/hooks/before-RESTART/scripts/hook.py       |    28 -
 .../HDPWIN/2.1/hooks/before-START/scripts/hook.py  |    42 -
 .../2.1/hooks/before-START/scripts/params.py       |    52 -
 .../templates/hadoop-metrics2-hbase.properties.j2  |    66 -
 .../templates/hadoop-metrics2.properties.j2        |    56 -
 .../main/resources/stacks/HDPWIN/2.1/metainfo.xml  |    22 -
 .../resources/stacks/HDPWIN/2.1/repos/repoinfo.xml |    27 -
 .../stacks/HDPWIN/2.1/role_command_order.json      |    94 -
 .../AMBARI_METRICS/configuration/ams-env.xml       |    75 -
 .../AMBARI_METRICS/configuration/ams-hbase-env.xml |    53 -
 .../configuration/ams-hbase-site.xml               |    48 -
 .../AMBARI_METRICS/configuration/ams-log4j.xml     |    60 -
 .../AMBARI_METRICS/configuration/ams-site.xml      |    34 -
 .../2.1/services/AMBARI_METRICS/metainfo.xml       |    26 -
 .../services/FALCON/configuration/falcon-env.xml   |    76 -
 .../stacks/HDPWIN/2.1/services/FALCON/metainfo.xml |    40 -
 .../services/FLUME/configuration/flume-conf.xml    |    38 -
 .../2.1/services/FLUME/configuration/flume-env.xml |    60 -
 .../stacks/HDPWIN/2.1/services/FLUME/metainfo.xml  |    26 -
 .../2.1/services/HBASE/configuration/hbase-env.xml |   123 -
 .../services/HBASE/configuration/hbase-site.xml    |    39 -
 .../stacks/HDPWIN/2.1/services/HBASE/metainfo.xml  |    39 -
 .../2.1/services/HDFS/configuration/core-site.xml  |    26 -
 .../2.1/services/HDFS/configuration/hadoop-env.xml |   161 -
 .../2.1/services/HDFS/configuration/hdfs-site.xml  |   102 -
 .../stacks/HDPWIN/2.1/services/HDFS/metainfo.xml   |    40 -
 .../2.1/services/HIVE/configuration/hcat-env.xml   |    34 -
 .../2.1/services/HIVE/configuration/hive-env.xml   |   128 -
 .../2.1/services/HIVE/configuration/hive-site.xml  |   328 -
 .../services/HIVE/configuration/webhcat-env.xml    |    34 -
 .../services/HIVE/configuration/webhcat-site.xml   |    65 -
 .../stacks/HDPWIN/2.1/services/HIVE/metainfo.xml   |    53 -
 .../2.1/services/OOZIE/configuration/oozie-env.xml |   147 -
 .../services/OOZIE/configuration/oozie-site.xml    |   112 -
 .../stacks/HDPWIN/2.1/services/OOZIE/metainfo.xml  |    39 -
 .../stacks/HDPWIN/2.1/services/PIG/metainfo.xml    |    40 -
 .../2.1/services/SQOOP/configuration/sqoop-env.xml |    75 -
 .../stacks/HDPWIN/2.1/services/SQOOP/metainfo.xml  |    39 -
 .../stacks/HDPWIN/2.1/services/STORM/alerts.json   |   147 -
 .../2.1/services/STORM/configuration/storm-env.xml |    49 -
 .../services/STORM/configuration/storm-site.xml    |    35 -
 .../stacks/HDPWIN/2.1/services/STORM/metainfo.xml  |    38 -
 .../2.1/services/TEZ/configuration/tez-env.xml     |    38 -
 .../2.1/services/TEZ/configuration/tez-site.xml    |    27 -
 .../stacks/HDPWIN/2.1/services/TEZ/metainfo.xml    |    39 -
 .../YARN/configuration-mapred/mapred-env.xml       |    61 -
 .../YARN/configuration-mapred/mapred-site.xml      |    20 -
 .../2.1/services/YARN/configuration/yarn-env.xml   |    61 -
 .../2.1/services/YARN/configuration/yarn-site.xml  |    84 -
 .../stacks/HDPWIN/2.1/services/YARN/metainfo.xml   |    60 -
 .../services/ZOOKEEPER/configuration/zoo.cfg.xml   |    30 -
 .../ZOOKEEPER/configuration/zookeeper-env.xml      |    89 -
 .../HDPWIN/2.1/services/ZOOKEEPER/metainfo.xml     |    40 -
 .../stacks/HDPWIN/2.1/services/stack_advisor.py    |   895 -
 .../main/resources/stacks/HDPWIN/2.2/metainfo.xml  |    23 -
 .../resources/stacks/HDPWIN/2.2/repos/repoinfo.xml |    27 -
 .../2.2/services/AMBARI_METRICS/metainfo.xml       |    26 -
 .../configuration/falcon-startup.properties.xml    |   130 -
 .../stacks/HDPWIN/2.2/services/FALCON/metainfo.xml |    26 -
 .../stacks/HDPWIN/2.2/services/FLUME/metainfo.xml  |    26 -
 .../2.2/services/HBASE/configuration/hbase-env.xml |    33 -
 .../services/HBASE/configuration/hbase-site.xml    |   187 -
 .../stacks/HDPWIN/2.2/services/HBASE/metainfo.xml  |    32 -
 .../HDPWIN/2.2/services/HBASE/themes/theme.json    |   367 -
 .../2.2/services/HDFS/configuration/core-site.xml  |    29 -
 .../2.2/services/HDFS/configuration/hadoop-env.xml |   130 -
 .../2.2/services/HDFS/configuration/hdfs-log4j.xml |   225 -
 .../2.2/services/HDFS/configuration/hdfs-site.xml  |    34 -
 .../stacks/HDPWIN/2.2/services/HDFS/metainfo.xml   |    32 -
 .../HDPWIN/2.2/services/HDFS/themes/theme.json     |   179 -
 .../2.2/services/HIVE/configuration/hive-env.xml   |   125 -
 .../2.2/services/HIVE/configuration/hive-site.xml  |  1822 --
 .../HIVE/configuration/hiveserver2-site.xml        |    72 -
 .../services/HIVE/configuration/webhcat-site.xml   |   104 -
 .../stacks/HDPWIN/2.2/services/HIVE/metainfo.xml   |    32 -
 .../HDPWIN/2.2/services/HIVE/themes/theme.json     |   393 -
 .../services/KNOX/configuration/gateway-site.xml   |    35 -
 .../2.2/services/KNOX/configuration/knox-env.xml   |    38 -
 .../ranger-knox-plugin-properties.xml              |    27 -
 .../stacks/HDPWIN/2.2/services/KNOX/metainfo.xml   |    26 -
 .../services/OOZIE/configuration/oozie-site.xml    |   106 -
 .../stacks/HDPWIN/2.2/services/OOZIE/metainfo.xml  |    26 -
 .../stacks/HDPWIN/2.2/services/PIG/metainfo.xml    |    26 -
 .../SLIDER/configurations/slider-client.xml        |    28 -
 .../stacks/HDPWIN/2.2/services/SLIDER/metainfo.xml |    26 -
 .../stacks/HDPWIN/2.2/services/SQOOP/metainfo.xml  |    26 -
 .../services/STORM/configuration/storm-site.xml    |    82 -
 .../stacks/HDPWIN/2.2/services/STORM/metainfo.xml  |    37 -
 .../2.2/services/TEZ/configuration/tez-site.xml    |   386 -
 .../stacks/HDPWIN/2.2/services/TEZ/metainfo.xml    |    26 -
 .../YARN/configuration-mapred/mapred-site.xml      |    96 -
 .../YARN/configuration/capacity-scheduler.xml      |    69 -
 .../2.2/services/YARN/configuration/yarn-env.xml   |    44 -
 .../2.2/services/YARN/configuration/yarn-site.xml  |   500 -
 .../stacks/HDPWIN/2.2/services/YARN/metainfo.xml   |    45 -
 .../2.2/services/YARN/themes-mapred/theme.json     |   132 -
 .../HDPWIN/2.2/services/YARN/themes/theme.json     |   250 -
 .../HDPWIN/2.2/services/ZOOKEEPER/metainfo.xml     |    26 -
 .../stacks/HDPWIN/2.2/services/stack_advisor.py    |  1154 -
 .../main/resources/stacks/HDPWIN/2.3/metainfo.xml  |    23 -
 .../resources/stacks/HDPWIN/2.3/repos/repoinfo.xml |    27 -
 .../configuration/falcon-startup.properties.xml    |    29 -
 .../stacks/HDPWIN/2.3/services/FALCON/metainfo.xml |    26 -
 .../stacks/HDPWIN/2.3/services/FLUME/metainfo.xml  |    26 -
 .../services/HBASE/configuration/hbase-site.xml    |    63 -
 .../stacks/HDPWIN/2.3/services/HBASE/metainfo.xml  |    33 -
 .../HDPWIN/2.3/services/HBASE/themes/theme.json    |    33 -
 .../2.3/services/HDFS/configuration/hdfs-site.xml  |    52 -
 .../stacks/HDPWIN/2.3/services/HDFS/metainfo.xml   |    26 -
 .../services/HIVE/configuration/webhcat-site.xml   |    29 -
 .../stacks/HDPWIN/2.3/services/HIVE/metainfo.xml   |    26 -
 .../stacks/HDPWIN/2.3/services/KNOX/metainfo.xml   |    26 -
 .../2.3/services/OOZIE/configuration/oozie-env.xml |   147 -
 .../services/OOZIE/configuration/oozie-site.xml    |   148 -
 .../stacks/HDPWIN/2.3/services/OOZIE/metainfo.xml  |    26 -
 .../stacks/HDPWIN/2.3/services/PIG/metainfo.xml    |    26 -
 .../stacks/HDPWIN/2.3/services/SLIDER/metainfo.xml |    26 -
 .../stacks/HDPWIN/2.3/services/SQOOP/metainfo.xml  |    26 -
 .../services/STORM/configuration/storm-site.xml    |    62 -
 .../stacks/HDPWIN/2.3/services/STORM/metainfo.xml  |    27 -
 .../2.3/services/TEZ/configuration/tez-site.xml    |   115 -
 .../stacks/HDPWIN/2.3/services/TEZ/metainfo.xml    |    26 -
 .../YARN/configuration/capacity-scheduler.xml      |    34 -
 .../stacks/HDPWIN/2.3/services/YARN/metainfo.xml   |    34 -
 .../HDPWIN/2.3/services/ZOOKEEPER/metainfo.xml     |    26 -
 .../stacks/HDPWIN/2.3/services/stack_advisor.py    |    60 -
 .../stacks/PERF/1.0/configuration/cluster-env.xml  |   138 -
 .../resources/stacks/PERF/1.0/hdp_urlinfo.json     |    12 -
 .../PERF/1.0/hooks/after-INSTALL/scripts/hook.py   |    28 -
 .../PERF/1.0/hooks/before-ANY/scripts/hook.py      |    37 -
 .../PERF/1.0/hooks/before-ANY/scripts/params.py    |    47 -
 .../before-ANY/scripts/shared_initialization.py    |   107 -
 .../hooks/before-INSTALL/scripts/conf-select.py    |    35 -
 .../hooks/before-INSTALL/scripts/distro-select.py  |   153 -
 .../PERF/1.0/hooks/before-INSTALL/scripts/hook.py  |    77 -
 .../1.0/hooks/before-INSTALL/scripts/params.py     |    23 -
 .../PERF/1.0/hooks/before-RESTART/scripts/hook.py  |    29 -
 .../PERF/1.0/hooks/before-START/scripts/hook.py    |    29 -
 .../main/resources/stacks/PERF/1.0/kerberos.json   |    79 -
 .../main/resources/stacks/PERF/1.0/metainfo.xml    |    22 -
 .../stacks/PERF/1.0/properties/stack_features.json |    21 -
 .../stacks/PERF/1.0/properties/stack_packages.json |   267 -
 .../stacks/PERF/1.0/properties/stack_tools.json    |    14 -
 .../resources/stacks/PERF/1.0/repos/repoinfo.xml   |   118 -
 .../stacks/PERF/1.0/role_command_order.json        |     8 -
 .../AMBARI_METRICS/configuration/ams-site.xml      |    36 -
 .../PERF/1.0/services/AMBARI_METRICS/metainfo.xml  |    46 -
 .../stacks/PERF/1.0/services/FAKEHBASE/alerts.json |    35 -
 .../FAKEHBASE/configuration/hbase-alert-config.xml |    80 -
 .../services/FAKEHBASE/configuration/hbase-env.xml |   293 -
 .../FAKEHBASE/configuration/hbase-log4j.xml        |   146 -
 .../FAKEHBASE/configuration/hbase-policy.xml       |    53 -
 .../FAKEHBASE/configuration/hbase-site.xml         |   558 -
 .../FAKEHBASE/configuration/ranger-hbase-audit.xml |   122 -
 .../configuration/ranger-hbase-policymgr-ssl.xml   |    66 -
 .../configuration/ranger-hbase-security.xml        |    68 -
 .../PERF/1.0/services/FAKEHBASE/kerberos.json      |   165 -
 .../PERF/1.0/services/FAKEHBASE/metainfo.xml       |   196 -
 .../PERF/1.0/services/FAKEHBASE/metrics.json       |  9374 -------
 .../package/alerts/hbase_master_process.py         |    59 -
 .../package/alerts/hbase_regionserver_process.py   |    59 -
 .../FAKEHBASE/package/scripts/hbase_client.py      |    38 -
 .../FAKEHBASE/package/scripts/hbase_master.py      |    45 -
 .../package/scripts/hbase_regionserver.py          |    45 -
 .../package/scripts/phoenix_queryserver.py         |    42 -
 .../FAKEHBASE/package/scripts/service_check.py     |    30 -
 .../package/templates/input.config-hbase.json.j2   |    79 -
 .../services/FAKEHBASE/quicklinks/quicklinks.json  |    97 -
 .../PERF/1.0/services/FAKEHBASE/themes/theme.json  |   411 -
 .../PERF/1.0/services/FAKEHBASE/widgets.json       |   510 -
 .../stacks/PERF/1.0/services/FAKEHDFS/alerts.json  |   120 -
 .../services/FAKEHDFS/configuration/core-site.xml  |   225 -
 .../services/FAKEHDFS/configuration/hadoop-env.xml |   420 -
 .../configuration/hadoop-metrics2.properties.xml   |   130 -
 .../FAKEHDFS/configuration/hadoop-policy.xml       |   130 -
 .../FAKEHDFS/configuration/hdfs-alert-config.xml   |    80 -
 .../services/FAKEHDFS/configuration/hdfs-log4j.xml |   225 -
 .../services/FAKEHDFS/configuration/hdfs-site.xml  |   635 -
 .../FAKEHDFS/configuration/ranger-hdfs-audit.xml   |   124 -
 .../ranger-hdfs-plugin-properties.xml              |    88 -
 .../configuration/ranger-hdfs-policymgr-ssl.xml    |    67 -
 .../configuration/ranger-hdfs-security.xml         |    65 -
 .../services/FAKEHDFS/configuration/ssl-client.xml |    70 -
 .../services/FAKEHDFS/configuration/ssl-server.xml |    80 -
 .../PERF/1.0/services/FAKEHDFS/kerberos.json       |   253 -
 .../stacks/PERF/1.0/services/FAKEHDFS/metainfo.xml |   264 -
 .../stacks/PERF/1.0/services/FAKEHDFS/metrics.json |  7905 ------
 .../package/alerts/alert_checkpoint_time.py        |    59 -
 .../alerts/alert_datanode_unmounted_data_dir.py    |    59 -
 .../package/alerts/alert_nfs_gateway_process.py    |    59 -
 .../package/alerts/alert_snamenode_process.py      |    59 -
 .../package/alerts/alert_upgrade_finalized.py      |    59 -
 .../services/FAKEHDFS/package/scripts/datanode.py  |    57 -
 .../FAKEHDFS/package/scripts/hdfs_client.py        |    38 -
 .../FAKEHDFS/package/scripts/journalnode.py        |    58 -
 .../services/FAKEHDFS/package/scripts/namenode.py  |    79 -
 .../FAKEHDFS/package/scripts/nfsgateway.py         |    42 -
 .../services/FAKEHDFS/package/scripts/params.py    |    33 -
 .../FAKEHDFS/package/scripts/service_check.py      |    30 -
 .../services/FAKEHDFS/package/scripts/snamenode.py |    42 -
 .../FAKEHDFS/package/scripts/zkfc_slave.py         |    43 -
 .../services/FAKEHDFS/quicklinks/quicklinks.json   |    76 -
 .../PERF/1.0/services/FAKEHDFS/themes/theme.json   |   179 -
 .../stacks/PERF/1.0/services/FAKEHDFS/widgets.json |   649 -
 .../PERF/1.0/services/FAKEYARN/YARN_metrics.json   |  3486 ---
 .../PERF/1.0/services/FAKEYARN/YARN_widgets.json   |   611 -
 .../stacks/PERF/1.0/services/FAKEYARN/alerts.json  |    77 -
 .../FAKEYARN/configuration-mapred/mapred-env.xml   |    50 -
 .../FAKEYARN/configuration-mapred/mapred-site.xml  |   134 -
 .../FAKEYARN/configuration/capacity-scheduler.xml  |    69 -
 .../FAKEYARN/configuration/ranger-yarn-audit.xml   |   121 -
 .../ranger-yarn-plugin-properties.xml              |    82 -
 .../configuration/ranger-yarn-policymgr-ssl.xml    |    66 -
 .../configuration/ranger-yarn-security.xml         |    58 -
 .../FAKEYARN/configuration/yarn-alert-config.xml   |    80 -
 .../services/FAKEYARN/configuration/yarn-env.xml   |   201 -
 .../services/FAKEYARN/configuration/yarn-log4j.xml |   103 -
 .../services/FAKEYARN/configuration/yarn-site.xml  |   784 -
 .../PERF/1.0/services/FAKEYARN/kerberos.json       |   290 -
 .../stacks/PERF/1.0/services/FAKEYARN/metainfo.xml |   355 -
 .../package/alerts/alert_history_process.py        |    59 -
 .../package/alerts/alert_nodemanager_health.py     |    59 -
 .../alerts/alert_resourcemanager_process.py        |    59 -
 .../package/alerts/alert_timeline_process.py       |    59 -
 .../package/scripts/application_timeline_server.py |    42 -
 .../FAKEYARN/package/scripts/historyserver.py      |    42 -
 .../package/scripts/mapred_service_check.py        |    30 -
 .../FAKEYARN/package/scripts/mapreduce2_client.py  |    38 -
 .../FAKEYARN/package/scripts/nodemanager.py        |    42 -
 .../FAKEYARN/package/scripts/resourcemanager.py    |    48 -
 .../FAKEYARN/package/scripts/service_check.py      |    30 -
 .../FAKEYARN/package/scripts/yarn_client.py        |    38 -
 .../FAKEYARN/quicklinks-mapred/quicklinks.json     |    76 -
 .../services/FAKEYARN/quicklinks/quicklinks.json   |    76 -
 .../1.0/services/FAKEYARN/themes-mapred/theme.json |   132 -
 .../PERF/1.0/services/FAKEYARN/themes/theme.json   |   250 -
 .../PERF/1.0/services/FAKEZOOKEEPER/alerts.json    |    20 -
 .../configuration/zk-alert-config.xml              |    80 -
 .../PERF/1.0/services/FAKEZOOKEEPER/kerberos.json  |    40 -
 .../PERF/1.0/services/FAKEZOOKEEPER/metainfo.xml   |    70 -
 .../package/alerts/alert_zk_server_process.py      |    59 -
 .../FAKEZOOKEEPER/package/scripts/service_check.py |    30 -
 .../package/scripts/zookeeper_client.py            |    38 -
 .../package/scripts/zookeeper_server.py            |    42 -
 .../services/GRUMPY/configuration/grumpy-site.xml  |    48 -
 .../stacks/PERF/1.0/services/GRUMPY/kerberos.json  |    82 -
 .../stacks/PERF/1.0/services/GRUMPY/metainfo.xml   |    58 -
 .../1.0/services/GRUMPY/package/scripts/dwarf.py   |    42 -
 .../GRUMPY/package/scripts/service_check.py        |    30 -
 .../PERF/1.0/services/GRUMPY/themes/theme.json     |    65 -
 .../stacks/PERF/1.0/services/HAPPY/alerts.json     |    20 -
 .../HAPPY/configuration/happy-alert-config.xml     |    80 -
 .../services/HAPPY/configuration/happy-site.xml    |    48 -
 .../stacks/PERF/1.0/services/HAPPY/kerberos.json   |    82 -
 .../stacks/PERF/1.0/services/HAPPY/metainfo.xml    |    63 -
 .../HAPPY/package/alerts/alert_happy_process.py    |    59 -
 .../1.0/services/HAPPY/package/scripts/dwarf.py    |    42 -
 .../HAPPY/package/scripts/service_check.py         |    30 -
 .../PERF/1.0/services/HAPPY/themes/theme.json      |    65 -
 .../KERBEROS/configuration/kerberos-env.xml        |   363 -
 .../services/KERBEROS/configuration/krb5-conf.xml  |    85 -
 .../PERF/1.0/services/KERBEROS/kerberos.json       |    18 -
 .../stacks/PERF/1.0/services/KERBEROS/metainfo.xml |    94 -
 .../KERBEROS/package/scripts/kerberos_client.py    |    56 -
 .../KERBEROS/package/scripts/kerberos_common.py    |   480 -
 .../services/KERBEROS/package/scripts/params.py    |   204 -
 .../KERBEROS/package/scripts/service_check.py      |    30 -
 .../KERBEROS/package/scripts/status_params.py      |    32 -
 .../1.0/services/KERBEROS/package/scripts/utils.py |   105 -
 .../1.0/services/KERBEROS/properties/krb5_conf.j2  |    63 -
 .../stacks/PERF/1.0/services/SLEEPY/alerts.json    |    20 -
 .../SLEEPY/configuration/sleepy-alert-config.xml   |    80 -
 .../services/SLEEPY/configuration/sleepy-site.xml  |    48 -
 .../stacks/PERF/1.0/services/SLEEPY/kerberos.json  |    82 -
 .../stacks/PERF/1.0/services/SLEEPY/metainfo.xml   |    63 -
 .../SLEEPY/package/alerts/alert_sleepy_process.py  |    59 -
 .../1.0/services/SLEEPY/package/scripts/dwarf.py   |    42 -
 .../SLEEPY/package/scripts/service_check.py        |    30 -
 .../PERF/1.0/services/SLEEPY/themes/theme.json     |    65 -
 .../stacks/PERF/1.0/services/SNOW/alerts.json      |    20 -
 .../SNOW/configuration/snow-alert-config.xml       |    80 -
 .../1.0/services/SNOW/configuration/snow-site.xml  |    48 -
 .../stacks/PERF/1.0/services/SNOW/kerberos.json    |    82 -
 .../stacks/PERF/1.0/services/SNOW/metainfo.xml     |    63 -
 .../SNOW/package/alerts/alert_snow_process.py      |    59 -
 .../services/SNOW/package/scripts/service_check.py |    30 -
 .../services/SNOW/package/scripts/snow_white.py    |    42 -
 .../PERF/1.0/services/SNOW/themes/theme.json       |    65 -
 .../stacks/PERF/1.0/services/stack_advisor.py      |    36 -
 .../stacks/PERF/1.0/upgrades/config-upgrade.xml    |    34 -
 .../PERF/1.0/upgrades/nonrolling-upgrade-2.0.xml   |   453 -
 .../stacks/PERF/1.0/upgrades/upgrade-2.0.xml       |   341 -
 .../src/main/resources/stacks/PERF/1.0/vdf.xml     |    73 -
 .../resources/stacks/PERF/2.0/hdp_urlinfo.json     |    12 -
 .../main/resources/stacks/PERF/2.0/metainfo.xml    |    23 -
 .../resources/stacks/PERF/2.0/repos/repoinfo.xml   |   118 -
 .../src/main/resources/stacks/PERF/2.0/vdf.xml     |    74 -
 .../main/resources/stacks/PERF/PythonExecutor.sed  |    19 -
 .../resources/stacks/PERF/install_packages.sed     |    38 -
 .../3.0/services/HDFS/configuration/core-site.xml  |    71 -
 .../3.0/services/HDFS/configuration/hdfs-site.xml  |   115 -
 .../stacks/PHD/3.3/services/HAWQ/metainfo.xml      |    26 -
 .../stacks/PHD/3.3/services/PXF/metainfo.xml       |    26 -
 .../server/api/services/AmbariMetaInfoTest.java    |    91 -
 .../api/services/KerberosServiceMetaInfoTest.java  |   311 -
 .../ambari/server/stack/StackManagerTest.java      |   219 +-
 .../KerberosDescriptorUpdateHelperTest.java        |    10 +
 .../state/stack/ConfigUpgradeValidityTest.java     |     8 +
 .../AMBARI_METRICS/test_service_advisor.py         |   599 -
 .../common-services/HAWQ/hawq_base_test_case.py    |   129 -
 .../HAWQ/test_alert_component_status.py            |   140 -
 .../HAWQ/test_alert_segment_registration_status.py |   169 -
 .../common-services/HAWQ/test_alert_sync_status.py |   193 -
 .../python/common-services/HAWQ/test_hawqmaster.py |   340 -
 .../common-services/HAWQ/test_hawqsegment.py       |   103 -
 .../common-services/HAWQ/test_hawqstandby.py       |   145 -
 .../common-services/HAWQ/test_service_advisor.py   |  1334 -
 .../test/python/common-services/HAWQ/test_utils.py |    28 -
 .../HIVE/test_jdbc_driver_config.py                |    51 -
 .../LOGSEARCH/test_service_advisor.py              |   202 -
 .../common-services/PXF/test_alerts_api_status.py  |    77 -
 .../test/python/common-services/PXF/test_pxf.py    |   160 -
 .../common-services/PXF/test_service_advisor.py    |   438 -
 .../RANGER/test_db_flavor_config.py                |    52 -
 .../RANGER_KMS/test_db_flavor_config.py            |    52 -
 .../SQOOP/test_jdbc_driver_config.py               |    51 -
 .../common-services/configs/hawq_default.json      |   254 -
 .../common-services/configs/hive_default.json      |   790 -
 .../configs/hive_unsupported_jdbc_type.json        |   780 -
 .../common-services/configs/hosts-1-host.json      |    93 -
 .../common-services/configs/hosts-3-hosts.json     |   269 -
 .../common-services/configs/pxf_default.json       |   222 -
 .../configs/ranger_admin_default.json              |   572 -
 .../ranger_admin_unsupported_db_flavor.json        |   386 -
 .../configs/ranger_kms_default.json                |   923 -
 .../configs/ranger_kms_unsupported_db_flavor.json  |   802 -
 .../configs/services-hawq-3-hosts.json             |   510 -
 .../configs/services-hawq-pxf-hdfs.json            |   312 -
 .../services-master_ambari_colo-3-hosts.json       |   511 -
 .../services-master_standby_colo-3-hosts.json      |   511 -
 .../configs/services-nohawq-3-hosts.json           |   160 -
 .../configs/services-normal-hawq-3-hosts.json      |   522 -
 .../configs/services-normal-nohawq-3-hosts.json    |   164 -
 .../services-standby_ambari_colo-3-hosts.json      |   511 -
 .../common-services/configs/sqoop_default.json     |  1010 -
 .../configs/sqoop_unsupported_jdbc_driver.json     |   879 -
 .../2.0.6/AMBARI_METRICS/test_metrics_collector.py |   391 -
 .../2.0.6/AMBARI_METRICS/test_metrics_grafana.py   |   113 -
 .../2.0.6/AMBARI_METRICS/test_metrics_monitor.py   |   142 -
 .../test/python/stacks/2.0.6/FLUME/test_flume.py   |   599 -
 .../stacks/2.0.6/FLUME/test_service_check.py       |    46 -
 .../stacks/2.0.6/GANGLIA/test_ganglia_monitor.py   |   290 -
 .../stacks/2.0.6/GANGLIA/test_ganglia_server.py    |   205 -
 .../python/stacks/2.0.6/HBASE/test_hbase_client.py |   250 -
 .../python/stacks/2.0.6/HBASE/test_hbase_master.py |   790 -
 .../stacks/2.0.6/HBASE/test_hbase_regionserver.py  |   592 -
 .../stacks/2.0.6/HBASE/test_hbase_service_check.py |   124 -
 .../stacks/2.0.6/HBASE/test_phoenix_queryserver.py |   450 -
 .../2.0.6/HDFS/test_alert_checkpoint_time.py       |    93 -
 .../HDFS/test_alert_datanode_unmounted_data_dir.py |   227 -
 .../2.0.6/HDFS/test_alert_metrics_deviation.py     |   276 -
 .../test/python/stacks/2.0.6/HDFS/test_datanode.py |   685 -
 .../python/stacks/2.0.6/HDFS/test_hdfs_client.py   |   129 -
 .../python/stacks/2.0.6/HDFS/test_journalnode.py   |   418 -
 .../test/python/stacks/2.0.6/HDFS/test_namenode.py |  1820 --
 .../python/stacks/2.0.6/HDFS/test_nfsgateway.py    |   301 -
 .../python/stacks/2.0.6/HDFS/test_service_check.py |   124 -
 .../python/stacks/2.0.6/HDFS/test_snamenode.py     |   287 -
 .../src/test/python/stacks/2.0.6/HDFS/test_zkfc.py |   399 -
 .../python/stacks/2.0.6/HIVE/test_hcat_client.py   |   129 -
 .../python/stacks/2.0.6/HIVE/test_hive_client.py   |   234 -
 .../stacks/2.0.6/HIVE/test_hive_metastore.py       |   575 -
 .../python/stacks/2.0.6/HIVE/test_hive_server.py   |   947 -
 .../stacks/2.0.6/HIVE/test_hive_service_check.py   |   348 -
 .../python/stacks/2.0.6/HIVE/test_mysql_server.py  |   197 -
 .../stacks/2.0.6/HIVE/test_webhcat_server.py       |   423 -
 .../python/stacks/2.0.6/OOZIE/test_oozie_client.py |   277 -
 .../python/stacks/2.0.6/OOZIE/test_oozie_server.py |  1761 --
 .../stacks/2.0.6/OOZIE/test_oozie_service_check.py |    74 -
 .../stacks/2.0.6/OOZIE/test_service_check.py       |   155 -
 .../python/stacks/2.0.6/PIG/test_pig_client.py     |   174 -
 .../stacks/2.0.6/PIG/test_pig_service_check.py     |   209 -
 .../stacks/2.0.6/SQOOP/test_service_check.py       |    62 -
 .../test/python/stacks/2.0.6/SQOOP/test_sqoop.py   |   146 -
 .../python/stacks/2.0.6/YARN/test_historyserver.py |   796 -
 .../stacks/2.0.6/YARN/test_mapreduce2_client.py    |   442 -
 .../2.0.6/YARN/test_mapreduce2_service_check.py    |   186 -
 .../python/stacks/2.0.6/YARN/test_nodemanager.py   |   669 -
 .../stacks/2.0.6/YARN/test_resourcemanager.py      |   715 -
 .../python/stacks/2.0.6/YARN/test_yarn_client.py   |   589 -
 .../stacks/2.0.6/YARN/test_yarn_service_check.py   |   102 -
 .../2.0.6/ZOOKEEPER/test_zookeeper_client.py       |   205 -
 .../2.0.6/ZOOKEEPER/test_zookeeper_server.py       |   319 -
 .../ZOOKEEPER/test_zookeeper_service_check.py      |    94 -
 .../stacks/2.0.6/common/test_stack_advisor.py      |  3410 ---
 .../stacks/2.0.6/configs/altfs_plus_hdfs.json      |   994 -
 .../stacks/2.0.6/configs/client-upgrade.json       |   872 -
 .../stacks/2.0.6/configs/default.hbasedecom.json   |   887 -
 .../2.0.6/configs/default.non_gmetad_host.json     |   900 -
 .../stacks/2.0.6/configs/default_ams_embedded.json |  1259 -
 .../stacks/2.0.6/configs/default_client.json       |  1009 -
 .../stacks/2.0.6/configs/default_hive_nn_ha.json   |   859 -
 .../stacks/2.0.6/configs/default_hive_nn_ha_2.json |   861 -
 .../2.0.6/configs/default_hive_non_hdfs.json       |  1304 -
 .../stacks/2.0.6/configs/default_no_install.json   |  1084 -
 .../stacks/2.0.6/configs/default_oozie_mysql.json  |   959 -
 .../configs/default_update_exclude_file_only.json  |   973 -
 .../stacks/2.0.6/configs/default_with_bucket.json  |  1331 -
 .../default_yarn_include_file_dont_manage.json     |  1385 -
 .../configs/default_yarn_include_file_manage.json  |  1385 -
 .../test/python/stacks/2.0.6/configs/flume_22.json |   942 -
 .../python/stacks/2.0.6/configs/flume_only.json    |   221 -
 .../python/stacks/2.0.6/configs/flume_target.json  |   866 -
 .../2.0.6/configs/ha_bootstrap_active_node.json    |   782 -
 .../2.0.6/configs/ha_bootstrap_standby_node.json   |   781 -
 .../ha_bootstrap_standby_node_initial_start.json   |   781 -
 ...tandby_node_initial_start_dfs_nameservices.json |   781 -
 .../python/stacks/2.0.6/configs/ha_default.json    |   784 -
 .../python/stacks/2.0.6/configs/ha_secured.json    |   802 -
 .../python/stacks/2.0.6/configs/hbase-2.2.json     |   881 -
 .../stacks/2.0.6/configs/hbase-check-2.2.json      |   878 -
 .../stacks/2.0.6/configs/hbase-preupgrade.json     |   372 -
 .../stacks/2.0.6/configs/hbase-rs-2.2-phoenix.json |   882 -
 .../python/stacks/2.0.6/configs/hbase-rs-2.2.json  |   881 -
 .../python/stacks/2.0.6/configs/hbase_no_phx.json  |   675 -
 .../stacks/2.0.6/configs/hbase_with_phx.json       |   687 -
 .../test/python/stacks/2.0.6/configs/nn_eu.json    |   508 -
 .../python/stacks/2.0.6/configs/nn_eu_standby.json |   436 -
 .../python/stacks/2.0.6/configs/nn_ru_lzo.json     |   378 -
 .../stacks/2.0.6/configs/oozie_existing_sqla.json  |   956 -
 .../2.0.6/configs/ranger-namenode-start.json       |   463 -
 .../2.0.6/configs/rebalancehdfs_default.json       |   556 -
 .../2.0.6/configs/rebalancehdfs_secured.json       |   558 -
 .../stacks/2.0.6/configs/secured_client.json       |  1032 -
 .../secured_yarn_include_file_dont_manage.json     |  1206 -
 .../configs/secured_yarn_include_file_manage.json  |  1206 -
 .../2.0.6/configs/structured-out-status-bad.json   |     1 -
 .../2.0.6/configs/structured-out-status.json       |     5 -
 .../stacks/2.0.6/configs/zk-service_check_2.2.json |   207 -
 .../python/stacks/2.1/FALCON/test_falcon_client.py |   134 -
 .../python/stacks/2.1/FALCON/test_falcon_server.py |   487 -
 .../python/stacks/2.1/FALCON/test_service_check.py |    62 -
 .../python/stacks/2.1/HIVE/test_hive_metastore.py  |   890 -
 .../python/stacks/2.1/STORM/test_service_check.py  |    53 -
 .../python/stacks/2.1/STORM/test_storm_base.py     |   175 -
 .../stacks/2.1/STORM/test_storm_drpc_server.py     |   172 -
 .../2.1/STORM/test_storm_jaas_configuration.py     |    98 -
 .../python/stacks/2.1/STORM/test_storm_nimbus.py   |   281 -
 .../stacks/2.1/STORM/test_storm_nimbus_prod.py     |   147 -
 .../2.1/STORM/test_storm_rest_api_service.py       |   139 -
 .../stacks/2.1/STORM/test_storm_supervisor.py      |   212 -
 .../stacks/2.1/STORM/test_storm_supervisor_prod.py |   189 -
 .../stacks/2.1/STORM/test_storm_ui_server.py       |   241 -
 .../python/stacks/2.1/TEZ/test_service_check.py    |   203 -
 .../test/python/stacks/2.1/TEZ/test_tez_client.py  |   135 -
 .../stacks/2.1/YARN/test_apptimelineserver.py      |   290 -
 .../src/test/python/stacks/2.1/common/hosts.json   |   563 -
 .../test/python/stacks/2.1/common/services.json    |   957 -
 .../python/stacks/2.1/common/test_stack_advisor.py |   594 -
 .../python/stacks/2.1/configs/client-upgrade.json  |   801 -
 .../stacks/2.1/configs/default-storm-start.json    |   559 -
 .../test/python/stacks/2.1/configs/default.json    |  1164 -
 .../stacks/2.1/configs/hive-metastore-upgrade.json |   657 -
 .../stacks/2.1/configs/secured-storm-start.json    |   555 -
 .../test/python/stacks/2.1/configs/secured.json    |  1141 -
 .../stacks/2.2/ACCUMULO/test_accumulo_client.py    |    74 -
 .../python/stacks/2.2/KAFKA/test_kafka_broker.py   |   166 -
 .../stacks/2.2/KERBEROS/test_kerberos_client.py    |   378 -
 .../test/python/stacks/2.2/KERBEROS/use_cases.py   |   260 -
 .../python/stacks/2.2/KNOX/test_knox_gateway.py    |   455 -
 .../stacks/2.2/PIG/test_pig_service_check.py       |   201 -
 .../python/stacks/2.2/RANGER/test_ranger_admin.py  |   227 -
 .../stacks/2.2/RANGER/test_ranger_usersync.py      |   213 -
 .../python/stacks/2.2/SLIDER/test_slider_client.py |   201 -
 .../stacks/2.2/SPARK/test_job_history_server.py    |   349 -
 .../python/stacks/2.2/SPARK/test_spark_client.py   |   198 -
 .../stacks/2.2/SPARK/test_spark_service_check.py   |    67 -
 .../src/test/python/stacks/2.2/common/1/hosts.json |  9743 -------
 .../test/python/stacks/2.2/common/1/services.json  |  1043 -
 .../src/test/python/stacks/2.2/common/2/hosts.json | 25453 -------------------
 .../test/python/stacks/2.2/common/2/services.json  |  1044 -
 .../python/stacks/2.2/common/test_conf_select.py   |   200 -
 .../python/stacks/2.2/common/test_stack_advisor.py |  4022 ---
 .../stacks/2.2/common/test_stack_advisor_perf.py   |    84 -
 .../test/python/stacks/2.2/configs/default.json    |   613 -
 .../2.2/configs/default_custom_path_config.json    |   528 -
 .../python/stacks/2.2/configs/falcon-upgrade.json  |   456 -
 .../python/stacks/2.2/configs/hive-upgrade.json    |   803 -
 .../configs/journalnode-upgrade-hdfs-secure.json   |  1395 -
 .../2.2/configs/journalnode-upgrade-jmx.json       |   889 -
 .../configs/journalnode-upgrade-namenode-jmx.json  |   997 -
 ...journalnode-upgrade-namenode-status-active.json |    10 -
 ...ournalnode-upgrade-namenode-status-standby.json |    10 -
 .../stacks/2.2/configs/journalnode-upgrade.json    |  1393 -
 .../python/stacks/2.2/configs/knox_upgrade.json    |   344 -
 .../python/stacks/2.2/configs/oozie-downgrade.json |   402 -
 .../python/stacks/2.2/configs/oozie-upgrade.json   |   443 -
 .../2.2/configs/pig-service-check-secure.json      |   783 -
 .../stacks/2.2/configs/ranger-admin-default.json   |   446 -
 .../stacks/2.2/configs/ranger-admin-secured.json   |   324 -
 .../stacks/2.2/configs/ranger-admin-upgrade.json   |  1142 -
 .../2.2/configs/ranger-usersync-upgrade.json       |  1142 -
 .../test/python/stacks/2.2/configs/secured.json    |   364 -
 .../2.2/configs/spark-job-history-server.json      |   155 -
 .../stacks/2.3/ATLAS/test_metadata_server.py       |   400 -
 .../python/stacks/2.3/ATLAS/test_service_check.py  |    63 -
 .../python/stacks/2.3/MAHOUT/test_mahout_client.py |   115 -
 .../stacks/2.3/MAHOUT/test_mahout_service_check.py |   128 -
 .../stacks/2.3/SPARK/test_spark_thrift_server.py   |   193 -
 .../python/stacks/2.3/STORM/test_service_check.py  |    51 -
 .../python/stacks/2.3/STORM/test_storm_base.py     |   127 -
 .../python/stacks/2.3/STORM/test_storm_upgrade.py  |    91 -
 .../test/python/stacks/2.3/YARN/test_ats_1_5.py    |   307 -
 .../stacks/2.3/common/services-sparkts-hive.json   | 10046 --------
 .../python/stacks/2.3/common/services-sparkts.json |  5853 -----
 .../python/stacks/2.3/common/sparkts-host.json     |   220 -
 .../python/stacks/2.3/common/test_stack_advisor.py |  2047 --
 .../test/python/stacks/2.3/configs/ats_1_5.json    |  1247 -
 .../stacks/2.3/configs/default.hbasedecom.json     |   717 -
 .../test/python/stacks/2.3/configs/default.json    |   492 -
 .../python/stacks/2.3/configs/hbase_default.json   |   533 -
 .../python/stacks/2.3/configs/hbase_secure.json    |   855 -
 .../src/test/python/stacks/2.3/configs/secure.json |   492 -
 .../python/stacks/2.3/configs/spark_default.json   |   629 -
 .../python/stacks/2.3/configs/storm_default.json   |   379 -
 .../stacks/2.3/configs/storm_default_secure.json   |   391 -
 .../stacks/2.4/AMBARI_INFRA/test_infra_solr.py     |   180 -
 .../python/stacks/2.4/LOGSEARCH/test_logfeeder.py  |   142 -
 .../python/stacks/2.4/LOGSEARCH/test_logsearch.py  |   192 -
 .../test/python/stacks/2.4/configs/default.json    |   601 -
 .../python/stacks/2.5/ATLAS/test_atlas_server.py   |   186 -
 .../test/python/stacks/2.5/HIVE/appComplete.json   |    10 -
 .../stacks/2.5/HIVE/appComplete_withMOTDmsg.txt    |    12 -
 .../test/python/stacks/2.5/HIVE/invalidApp.json    |     3 -
 .../stacks/2.5/HIVE/invalidApp_withMOTDmsg.txt     |     5 -
 .../python/stacks/2.5/HIVE/oneContainerDown.json   |    33 -
 .../python/stacks/2.5/HIVE/oneContainerDown1.json  |    33 -
 .../2.5/HIVE/oneContainerDown_withMOTDmsg.txt      |    35 -
 .../src/test/python/stacks/2.5/HIVE/running.json   |    41 -
 .../python/stacks/2.5/HIVE/running_withMOTDmsg.txt |    43 -
 .../HIVE/running_withMOTDmsg_andTrailingMsg.txt    |    46 -
 .../src/test/python/stacks/2.5/HIVE/starting.json  |    16 -
 .../stacks/2.5/HIVE/starting_withMOTDmsg.txt       |    18 -
 .../python/stacks/2.5/HIVE/test_hive_server_int.py |  1405 -
 .../python/stacks/2.5/RANGER/test_ranger_admin.py  |   525 -
 .../stacks/2.5/RANGER/test_ranger_tagsync.py       |   164 -
 .../stacks/2.5/RANGER/test_ranger_usersync.py      |   218 -
 .../stacks/2.5/RANGER_KMS/test_kms_server.py       |   968 -
 .../python/stacks/2.5/SPARK/test_spark_livy.py     |   152 -
 .../2.5/ZEPPELIN/interpreter_json_generated.py     |    27 -
 .../stacks/2.5/ZEPPELIN/test_zeppelin_060.py       |   344 -
 .../python/stacks/2.5/common/host-3-hosts.json     |   269 -
 .../2.5/common/services-normal-his-2-hosts.json    |  1251 -
 .../2.5/common/services-normal-his-valid.json      |  1187 -
 .../python/stacks/2.5/common/test_stack_advisor.py |  6282 -----
 .../test/python/stacks/2.5/configs/default.json    |   632 -
 .../python/stacks/2.5/configs/hsi_default.json     |  1379 -
 .../2.5/configs/hsi_default_for_restart.json       |  1384 -
 .../src/test/python/stacks/2.5/configs/hsi_ha.json |  1379 -
 .../stacks/2.5/configs/ranger-admin-default.json   |   823 -
 .../stacks/2.5/configs/ranger-admin-secured.json   |   890 -
 .../stacks/2.5/configs/ranger-kms-default.json     |   918 -
 .../stacks/2.5/configs/ranger-kms-secured.json     |   985 -
 .../test/python/stacks/2.5/configs/secured.json    |   551 -
 .../src/test/python/stacks/2.6/DRUID/test_druid.py |   740 -
 .../2.6/KAFKA/test_kafka_broker_other_sasl.py      |   115 -
 .../python/stacks/2.6/RANGER/test_ranger_admin.py  |   597 -
 .../stacks/2.6/RANGER/test_ranger_tagsync.py       |   296 -
 .../python/stacks/2.6/SPARK2/test_spark_livy2.py   |   154 -
 .../2.6/ZEPPELIN/interpreter_json_generated.py     |    27 -
 .../stacks/2.6/ZEPPELIN/test_zeppelin_070.py       |   513 -
 .../python/stacks/2.6/common/test_stack_advisor.py |  2614 --
 .../test/python/stacks/2.6/configs/default.json    |   734 -
 .../2.6/configs/default_kafka_plaintext.json       |   763 -
 .../2.6/configs/default_kafka_plaintextsasl.json   |   763 -
 .../2.6/configs/default_kafka_sasl_plaintext.json  |   763 -
 .../stacks/2.6/configs/default_kafka_sasl_ssl.json |   763 -
 .../stacks/2.6/configs/ranger-admin-default.json   |   853 -
 .../stacks/2.6/configs/ranger-admin-secured.json   |   919 -
 .../stacks/2.6/configs/secure_kafka_sasl_ssl.json  |   632 -
 .../python/stacks/{2.0.6 => }/configs/default.json |     0
 .../{2.0.6 => }/configs/repository_file.json       |     0
 .../python/stacks/{2.0.6 => }/configs/secured.json |     0
 .../{2.0.6 => }/configs/secured_no_jce_name.json   |     0
 .../python/stacks/configs}/stack_features.json     |     0
 .../python/stacks/configs}/stack_packages.json     |     0
 .../python/stacks/configs}/stack_tools.json        |     0
 .../after-INSTALL/test_after_install.py            |    17 +-
 .../before-ANY/test_before_any.py                  |     2 -
 .../before-INSTALL/test_before_install.py          |     6 +-
 .../before-SET_KEYTAB/test_before_set_keytab.py    |     2 -
 .../before-START/test_before_start.py              |    10 +-
 .../src/test/python/stacks/utils/RMFTestCase.py    |    22 +-
 ambari-server/src/test/python/unitTests.py         |     9 -
 2676 files changed, 44 insertions(+), 680914 deletions(-)

diff --git a/ambari-agent/src/test/python/ambari_agent/TestCheckWebUI.py b/ambari-agent/src/test/python/ambari_agent/TestCheckWebUI.py
deleted file mode 100644
index 0cbc90e..0000000
--- a/ambari-agent/src/test/python/ambari_agent/TestCheckWebUI.py
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import unittest
-import StringIO
-import sys
-
-from mock.mock import MagicMock, patch
-
-# Needed to import checkWebUI.py
-sys.path.append("../../../../ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/files")
-import checkWebUI
-
-class TestMain(unittest.TestCase):
-
-  def setUp(self):
-    # disable stdout
-    out = StringIO.StringIO()
-    sys.stdout = out
-
-
-  def tearDown(self):
-    # enable stdout
-    sys.stdout = sys.__stdout__
-
-  @patch("optparse.OptionParser.parse_args")
-  @patch('httplib.HTTPConnection')
-  def test_check_web_ui(self, http_mock, parse_args_mock):
-      
-    #Positive scenario
-    options = MagicMock()
-    options.hosts = 'host1,host2'
-    options.port = '10000' 
-    parse_args_mock.return_value = (options, MagicMock)
-    http_conn = http_mock.return_value
-    http_conn.getresponse.return_value = MagicMock(status=200)
-
-    checkWebUI.main()
-
-    self.assertTrue(http_conn.request.called)
-    self.assertTrue(http_conn.getresponse.called)
-    self.assertTrue(http_conn.close.called)
-    
-    #Negative scenario
-    options = MagicMock()
-    options.hosts = 'host1,host2'
-    options.port = '10000'
-    parse_args_mock.return_value = (options, MagicMock)
-    http_conn.getresponse.return_value = MagicMock(status=404)
-
-    try:
-      checkWebUI.main()
-    except SystemExit, e:
-      self.assertEqual(e.code, 1)
-
-    self.assertTrue(http_conn.request.called)
-    self.assertTrue(http_conn.getresponse.called)
-    self.assertTrue(http_conn.close.called)
-
-if __name__ == "__main__":
-  unittest.main()
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/alerts.json b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/alerts.json
deleted file mode 100644
index fb2cf76..0000000
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/alerts.json
+++ /dev/null
@@ -1,139 +0,0 @@
-{
-  "ACCUMULO": {
-    "ACCUMULO_MASTER": [
-      {
-        "name": "accumulo_master_process",
-        "label": "Accumulo Master Process",
-        "description": "This alert is triggered if the Accumulo master process cannot be confirmed to be up and listening on the network for the configured critical threshold, given in seconds.",
-        "interval": 1,
-        "scope": "ANY",
-        "source": {
-          "type": "PORT",
-          "uri": "{{accumulo-site/master.port.client}}",
-          "default_port": 9999,
-          "reporting": {
-            "ok": {
-              "text": "TCP OK - {0:.3f}s response on port {1}"
-            },
-            "warning": {
-              "text": "TCP OK - {0:.3f}s response on port {1}",
-              "value": 1.5
-            },
-            "critical": {
-              "text": "Connection failed: {0} to {1}:{2}",
-              "value": 5.0
-            }
-          }
-        }
-      }
-    ],
-    "ACCUMULO_TSERVER": [
-      {
-        "name": "accumulo_tserver_process",
-        "label": "Accumulo TServer Process",
-        "description": "This host-level alert is triggered if the TServer processes cannot be confirmed to be up and listening on the network for the configured critical threshold, given in seconds.",
-        "interval": 1,
-        "scope": "HOST",
-        "source": {
-          "type": "PORT",
-          "uri": "{{accumulo-site/tserver.port.client}}",
-          "default_port": 9997,
-          "reporting": {
-            "ok": {
-              "text": "TCP OK - {0:.3f}s response on port {1}"
-            },
-            "warning": {
-              "text": "TCP OK - {0:.3f}s response on port {1}",
-              "value": 1.5
-            },
-            "critical": {
-              "text": "Connection failed: {0} to {1}:{2}",
-              "value": 5.0
-            }
-          }
-        }
-      }
-    ],
-    "ACCUMULO_GC": [
-      {
-        "name": "accumulo_gc_process",
-        "label": "Accumulo GC Process",
-        "description": "This host-level alert is triggered if the GC process cannot be confirmed to be up and listening on the network for the configured critical threshold, given in seconds.",
-        "interval": 1,
-        "scope": "HOST",
-        "source": {
-          "type": "PORT",
-          "uri": "{{accumulo-site/gc.port.client}}",
-          "default_port": 50092,
-          "reporting": {
-            "ok": {
-              "text": "TCP OK - {0:.3f}s response on port {1}"
-            },
-            "warning": {
-              "text": "TCP OK - {0:.3f}s response on port {1}",
-              "value": 1.5
-            },
-            "critical": {
-              "text": "Connection failed: {0} to {1}:{2}",
-              "value": 5.0
-            }
-          }
-        }
-      }
-    ],
-    "ACCUMULO_MONITOR": [
-      {
-        "name": "accumulo_monitor_process",
-        "label": "Accumulo Monitor Process",
-        "description": "This host-level alert is triggered if the Monitor process cannot be confirmed to be up and listening on the network for the configured critical threshold, given in seconds.",
-        "interval": 1,
-        "scope": "HOST",
-        "source": {
-          "type": "PORT",
-          "uri": "{{accumulo-site/monitor.port.client}}",
-          "default_port": 50095,
-          "reporting": {
-            "ok": {
-              "text": "TCP OK - {0:.3f}s response on port {1}"
-            },
-            "warning": {
-              "text": "TCP OK - {0:.3f}s response on port {1}",
-              "value": 1.5
-            },
-            "critical": {
-              "text": "Connection failed: {0} to {1}:{2}",
-              "value": 5.0
-            }
-          }
-        }
-      }
-    ],
-    "ACCUMULO_TRACER": [
-      {
-        "name": "accumulo_tracer_process",
-        "label": "Accumulo Tracer Process",
-        "description": "This host-level alert is triggered if the Tracer process cannot be confirmed to be up and listening on the network for the configured critical threshold, given in seconds.",
-        "interval": 1,
-        "scope": "HOST",
-        "source": {
-          "type": "PORT",
-          "uri": "{{accumulo-site/trace.port.client}}",
-          "default_port": 12234,
-          "reporting": {
-            "ok": {
-              "text": "TCP OK - {0:.3f}s response on port {1}"
-            },
-            "warning": {
-              "text": "TCP OK - {0:.3f}s response on port {1}",
-              "value": 1.5
-            },
-            "critical": {
-              "text": "Connection failed: {0} to {1}:{2}",
-              "value": 5.0
-            }
-          }
-        }
-      }
-    ]
-  }
-}
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/configuration/accumulo-env.xml b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/configuration/accumulo-env.xml
deleted file mode 100644
index af053b2..0000000
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/configuration/accumulo-env.xml
+++ /dev/null
@@ -1,256 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_adding_forbidden="true">
-  <property require-input="true">
-    <name>accumulo_root_password</name>
-    <value/>
-    <property-type>PASSWORD</property-type>
-    <display-name>Accumulo root password</display-name>
-    <description>Password for the Accumulo root user. This password will be
-      used to initialize Accumulo and to create the trace user. Changing this
-      will not change the password for the Accumulo root user.</description>
-    <value-attributes>
-      <type>password</type>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property require-input="true">
-    <name>instance_secret</name>
-    <value/>
-    <display-name>Instance Secret</display-name>
-    <property-type>PASSWORD</property-type>
-    <description>A secret unique to a given instance that all Accumulo server
-      processes must know in order to communicate with one
-      another.</description>
-    <value-attributes>
-      <type>password</type>
-      <editable-only-at-install>true</editable-only-at-install>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property require-input="false">
-    <name>trace_password</name>
-    <value>trace</value>
-    <display-name>Trace user password</display-name>
-    <property-type>PASSWORD</property-type>
-    <description>Password for the trace user. The trace user will be created
-      with this password if that user does not already exist. Changing this
-      will not change the password for the trace user.</description>
-    <value-attributes>
-      <type>password</type>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>accumulo_log_dir</name>
-    <value>/var/log/accumulo</value>
-    <display-name>Accumulo Log Dir</display-name>
-    <description>Log Directories for Accumulo.</description>
-    <value-attributes>
-      <type>directory</type>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>accumulo_pid_dir</name>
-    <value>/var/run/accumulo</value>
-    <display-name>Accumulo PID Dir</display-name>
-    <description>Pid Directory for Accumulo.</description>
-    <value-attributes>
-      <type>directory</type>
-      <editable-only-at-install>true</editable-only-at-install>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>accumulo_instance_name</name>
-    <value>accumulo</value>
-    <display-name>Instance Name</display-name>
-    <description>Accumulo Instance Name.</description>
-    <value-attributes>
-      <editable-only-at-install>true</editable-only-at-install>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>accumulo_user</name>
-    <display-name>Accumulo User</display-name>
-    <value>accumulo</value>
-    <property-type>USER</property-type>
-    <description>User for running Accumulo server processes.</description>
-    <value-attributes>
-      <type>user</type>
-      <overridable>false</overridable>
-      <user-groups>
-        <property>
-          <type>cluster-env</type>
-          <name>user_group</name>
-        </property>
-      </user-groups>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>accumulo_master_heapsize</name>
-    <value>1024</value>
-    <display-name>Accumulo Master Maximum Java heap size</display-name>
-    <description>Accumulo Master Heap Size.</description>
-    <value-attributes>
-      <type>int</type>
-      <unit>MB</unit>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>accumulo_tserver_heapsize</name>
-    <value>1536</value>
-    <display-name>Accumulo TServer Maximum Java heap size</display-name>
-    <description>Accumulo Tablet Server Heap Size.</description>
-    <value-attributes>
-      <type>int</type>
-      <unit>MB</unit>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>accumulo_monitor_heapsize</name>
-    <value>1024</value>
-    <display-name>Accumulo Monitor Maximum Java heap size</display-name>
-    <description>Accumulo Monitor Heap Size.</description>
-    <value-attributes>
-      <type>int</type>
-      <unit>MB</unit>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>accumulo_gc_heapsize</name>
-    <value>256</value>
-    <display-name>Accumulo GC Maximum Java heap size</display-name>
-    <description>Accumulo Garbage Collector Heap Size.</description>
-    <value-attributes>
-      <type>int</type>
-      <unit>MB</unit>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>accumulo_other_heapsize</name>
-    <value>1024</value>
-    <display-name>Accumulo Other Maximum Java heap size</display-name>
-    <description>Heap size for other Accumulo processes.</description>
-    <value-attributes>
-      <type>int</type>
-      <unit>MB</unit>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>accumulo_monitor_bind_all</name>
-    <value>false</value>
-    <display-name>Monitor Bind All Interfaces</display-name>
-    <description>Controls whether the monitor binds to all interfaces.</description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>accumulo_user_keytab</name>
-    <description>Accumulo keytab path</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>accumulo_principal_name</name>
-    <description>Accumulo principal name</description>
-    <property-type>KERBEROS_PRINCIPAL</property-type>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>content</name>
-    <display-name>accumulo-env template</display-name>
-    <description>The template for a client accumulo-env.sh file.</description>
-    <value>
-#! /usr/bin/env bash
-export HADOOP_PREFIX={{hadoop_prefix}}
-export HADOOP_CONF_DIR={{hadoop_conf_dir}}
-export JAVA_HOME={{java64_home}}
-export ZOOKEEPER_HOME={{zookeeper_home}}
-export ACCUMULO_LOG_DIR={{log_dir}}
-export ACCUMULO_CONF_DIR={{conf_dir}}
-export ACCUMULO_TSERVER_OPTS="-Xmx{{accumulo_tserver_heapsize}}m -Xms{{accumulo_tserver_heapsize}}m"
-export ACCUMULO_MASTER_OPTS="-Xmx{{accumulo_master_heapsize}}m -Xms{{accumulo_master_heapsize}}m"
-export ACCUMULO_MONITOR_OPTS="-Xmx{{accumulo_monitor_heapsize}}m -Xms{{accumulo_monitor_heapsize}}m"
-export ACCUMULO_GC_OPTS="-Xmx{{accumulo_gc_heapsize}}m -Xms{{accumulo_gc_heapsize}}m"
-export ACCUMULO_GENERAL_OPTS="-XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=75 -Djava.net.preferIPv4Stack=true ${ACCUMULO_GENERAL_OPTS}"
-export ACCUMULO_OTHER_OPTS="-Xmx{{accumulo_other_heapsize}}m -Xms{{accumulo_other_heapsize}}m ${ACCUMULO_OTHER_OPTS}"
-# what do when the JVM runs out of heap memory
-export ACCUMULO_KILL_CMD='kill -9 %p'
-    </value>
-    <value-attributes>
-      <type>content</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>server_content</name>
-    <description>The template for a server accumulo-env.sh file.</description>
-    <display-name>Server accumulo-env template</display-name>
-    <value>
-#! /usr/bin/env bash
-export HADOOP_PREFIX={{hadoop_prefix}}
-export HADOOP_CONF_DIR={{hadoop_conf_dir}}
-export JAVA_HOME={{java64_home}}
-export ZOOKEEPER_HOME={{zookeeper_home}}
-export ACCUMULO_PID_DIR={{pid_dir}}
-export ACCUMULO_LOG_DIR={{log_dir}}
-export ACCUMULO_CONF_DIR={{server_conf_dir}}
-export ACCUMULO_TSERVER_OPTS="-Xmx{{accumulo_tserver_heapsize}}m -Xms{{accumulo_tserver_heapsize}}m"
-export ACCUMULO_MASTER_OPTS="-Xmx{{accumulo_master_heapsize}}m -Xms{{accumulo_master_heapsize}}m"
-export ACCUMULO_MONITOR_OPTS="-Xmx{{accumulo_monitor_heapsize}}m -Xms{{accumulo_monitor_heapsize}}m"
-export ACCUMULO_GC_OPTS="-Xmx{{accumulo_gc_heapsize}}m -Xms{{accumulo_gc_heapsize}}m"
-export ACCUMULO_GENERAL_OPTS="-XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=75 -Djava.net.preferIPv4Stack=true ${ACCUMULO_GENERAL_OPTS}"
-export ACCUMULO_OTHER_OPTS="-Xmx{{accumulo_other_heapsize}}m -Xms{{accumulo_other_heapsize}}m ${ACCUMULO_OTHER_OPTS}"
-{% if security_enabled %}
-export ACCUMULO_TSERVER_OPTS="${ACCUMULO_TSERVER_OPTS} -Djava.security.auth.login.config={{accumulo_jaas_file}} -Djavax.security.auth.useSubjectCredsOnly=false"
-export ACCUMULO_MASTER_OPTS="${ACCUMULO_MASTER_OPTS} -Djava.security.auth.login.config={{accumulo_jaas_file}} -Djavax.security.auth.useSubjectCredsOnly=false"
-export ACCUMULO_GC_OPTS="${ACCUMULO_GC_OPTS} -Djava.security.auth.login.config={{accumulo_jaas_file}} -Djavax.security.auth.useSubjectCredsOnly=false"
-{% endif %}
-export ACCUMULO_MONITOR_BIND_ALL={{monitor_bind_str}}
-# what do when the JVM runs out of heap memory
-export ACCUMULO_KILL_CMD='kill -9 %p'
-    </value>
-    <value-attributes>
-      <type>content</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/configuration/accumulo-log4j.xml b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/configuration/accumulo-log4j.xml
deleted file mode 100644
index 4337cde..0000000
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/configuration/accumulo-log4j.xml
+++ /dev/null
@@ -1,115 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_adding_forbidden="true">
-  <property>
-    <name>audit_log_level</name>
-    <value>OFF</value>
-    <description>Log level for audit logging</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>monitor_forwarding_log_level</name>
-    <value>WARN</value>
-    <description>Log level for logging forwarded to the Accumulo
-      Monitor</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>debug_log_size</name>
-    <value>512M</value>
-    <description>Size of each debug rolling log file</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>debug_num_logs</name>
-    <value>10</value>
-    <description>Number of rolling debug log files to keep</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>info_log_size</name>
-    <value>512M</value>
-    <description>Size of each info rolling log file</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>info_num_logs</name>
-    <value>10</value>
-    <description>Number of rolling info log files to keep</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>content</name>
-    <display-name>accumulo-log4j template</display-name>
-    <description>Custom log4j.properties</description>
-    <value>
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# default logging properties:
-#  by default, log everything at INFO or higher to the console
-log4j.rootLogger=INFO,A1
-
-# hide Jetty junk
-log4j.logger.org.mortbay.log=WARN,A1
-
-# hide "Got brand-new compressor" messages
-log4j.logger.org.apache.hadoop.io.compress=WARN,A1
-log4j.logger.org.apache.accumulo.core.file.rfile.bcfile.Compression=WARN,A1
-
-# hide junk from TestRandomDeletes
-log4j.logger.org.apache.accumulo.test.TestRandomDeletes=WARN,A1
-
-# hide junk from VFS
-log4j.logger.org.apache.commons.vfs2.impl.DefaultFileSystemManager=WARN,A1
-
-# hide almost everything from zookeeper
-log4j.logger.org.apache.zookeeper=ERROR,A1
-
-# hide AUDIT messages in the shell, alternatively you could send them to a different logger
-log4j.logger.org.apache.accumulo.core.util.shell.Shell.audit=WARN,A1
-
-# Send most things to the console
-log4j.appender.A1=org.apache.log4j.ConsoleAppender
-log4j.appender.A1.layout.ConversionPattern=%d{ISO8601} [%-8c{2}] %-5p: %m%n
-log4j.appender.A1.layout=org.apache.log4j.PatternLayout
-
-    </value>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/configuration/accumulo-site.xml b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/configuration/accumulo-site.xml
deleted file mode 100644
index a7e7f92..0000000
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/configuration/accumulo-site.xml
+++ /dev/null
@@ -1,190 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<configuration>
-  <!-- Put your site-specific accumulo configurations here. The available configuration values along with their defaults are documented in docs/config.html Unless
-    you are simply testing at your workstation, you will most definitely need to change the three entries below. -->
-  <property>
-    <name>instance.volumes</name>
-    <value>hdfs://localhost:8020/apps/accumulo/data</value>
-    <description>Location of Accumulo data files in HDFS.</description>
-    <value-attributes>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>instance.zookeeper.host</name>
-    <value>localhost:2181</value>
-    <description>Comma-separated list of ZooKeeper server:port
-      pairs.</description>
-    <value-attributes>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>instance.zookeeper.timeout</name>
-    <value>30s</value>
-    <description>Timeout for zookeeper connections.</description>
-    <value-attributes>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>tserver.memory.maps.max</name>
-    <value>1G</value>
-    <description>Amount of memory to use for Accumulo's in-memory
-      map, where incoming writes are sorted.  If native maps are enabled
-      (tserver.memory.maps.native.enabled) this memory is
-      off-heap. Ensure that table.compaction.minor.logs.threshold *
-      tserver.walog.max.size &gt;= tserver.memory.maps.max.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>tserver.memory.maps.native.enabled</name>
-    <value>true</value>
-    <description>Controls whether or not native maps are enabled
-      for Accumulo's in-memory map, where incoming writes are
-      sorted (see also tserver.memory.maps.max).</description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>tserver.cache.data.size</name>
-    <value>128M</value>
-    <description>Size of on-heap block cache for data blocks.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>tserver.cache.index.size</name>
-    <value>256M</value>
-    <description>Size of on-heap block cache for index blocks.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>tserver.sort.buffer.size</name>
-    <value>200M</value>
-    <description>Amount of memory to use for sorting walogs when
-      recovering from tserver failure.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>tserver.walog.max.size</name>
-    <value>1G</value>
-    <description>Maximum size of each write-ahead log.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>master.port.client</name>
-    <value>9999</value>
-    <description>Port for client connections to the master.</description>
-    <value-attributes>
-      <type>int</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>trace.port.client</name>
-    <value>12234</value>
-    <description>Port for delivering tracing data to the tracer
-      process.</description>
-    <value-attributes>
-      <type>int</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>tserver.port.client</name>
-    <value>9997</value>
-    <description>Port for client connections to the tservers.</description>
-    <value-attributes>
-      <type>int</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>gc.port.client</name>
-    <value>50092</value>
-    <description>Port for monitoring the Accumulo garbage
-      collector.</description>
-    <value-attributes>
-      <type>int</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>monitor.port.client</name>
-    <value>50095</value>
-    <description>Port for the Accumulo monitor UI.</description>
-    <value-attributes>
-      <type>int</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>monitor.port.log4j</name>
-    <value>4560</value>
-    <description>Port for delivering logs for aggregation by the
-      Accumulo monitor.</description>
-    <value-attributes>
-      <type>int</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>trace.user</name>
-    <value>trace</value>
-    <display-name>Trace user</display-name>
-    <description>User that the tracer process uses to write
-      tracing data to Accumulo.</description>
-    <value-attributes>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>general.classpaths</name>
-    <value>
-$ACCUMULO_HOME/lib/accumulo-server.jar,
-$ACCUMULO_HOME/lib/accumulo-core.jar,
-$ACCUMULO_HOME/lib/accumulo-start.jar,
-$ACCUMULO_HOME/lib/accumulo-fate.jar,
-$ACCUMULO_HOME/lib/accumulo-proxy.jar,
-$ACCUMULO_HOME/lib/[^.].*.jar,
-$ZOOKEEPER_HOME/zookeeper[^.].*.jar,
-$HADOOP_CONF_DIR,
-$HADOOP_PREFIX/share/hadoop/common/[^.].*.jar,
-$HADOOP_PREFIX/share/hadoop/common/lib/(?!slf4j)[^.].*.jar,
-$HADOOP_PREFIX/share/hadoop/hdfs/[^.].*.jar,
-$HADOOP_PREFIX/share/hadoop/mapreduce/[^.].*.jar,
-$HADOOP_PREFIX/share/hadoop/yarn/[^.].*.jar,
-$HADOOP_PREFIX/share/hadoop/yarn/lib/jersey.*.jar
-    </value>
-    <description>List of jars and directories that will be added to
-      Accumulo's classpath.  Ordering matters, as the entries will be
-      searched in order when attempting to load a class.</description>
-    <value-attributes>
-      <type>content</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/configuration/client.xml b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/configuration/client.xml
deleted file mode 100644
index 3ecd24c..0000000
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/configuration/client.xml
+++ /dev/null
@@ -1,23 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-</configuration>
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/kerberos.json b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/kerberos.json
deleted file mode 100644
index ed9c0fb..0000000
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/kerberos.json
+++ /dev/null
@@ -1,80 +0,0 @@
-{
-  "services": [
-    {
-      "name": "ACCUMULO",
-      "identities": [
-        {
-          "name": "accumulo",
-          "principal": {
-            "value": "${accumulo-env/accumulo_user}${principal_suffix}@${realm}",
-            "type" : "user",
-            "configuration": "accumulo-env/accumulo_principal_name",
-            "local_username": "${accumulo-env/accumulo_user}"
-          },
-          "keytab": {
-            "file": "${keytab_dir}/accumulo.headless.keytab",
-            "owner": {
-              "name": "${accumulo-env/accumulo_user}",
-              "access": "r"
-            },
-            "group": {
-              "name": "${cluster-env/user_group}",
-              "access": "r"
-            },
-            "configuration": "accumulo-env/accumulo_user_keytab"
-          }
-        },
-        {
-          "name": "accumulo_service",
-          "principal": {
-            "value": "${accumulo-env/accumulo_user}/_HOST@${realm}",
-            "type" : "service",
-            "configuration": "accumulo-site/general.kerberos.principal",
-            "local_username": "${accumulo-env/accumulo_user}"
-
-          },
-          "keytab": {
-            "file": "${keytab_dir}/accumulo.service.keytab",
-            "owner": {
-              "name": "${accumulo-env/accumulo_user}",
-              "access": "r"
-            },
-            "group": {
-              "name": "${cluster-env/user_group}",
-              "access": ""
-            },
-            "configuration": "accumulo-site/general.kerberos.keytab"
-          }
-        },
-        {
-          "name": "accumulo_smokeuser",
-          "reference": "/smokeuser"
-        }
-      ],
-      "components": [
-        {
-          "name": "ACCUMULO_MASTER",
-          "identities": [
-            {
-              "name": "accumulo_accumulo_master_hdfs",
-              "reference": "/HDFS/NAMENODE/hdfs"
-            }
-          ]
-        },
-        {
-          "name": "ACCUMULO_TSERVER"
-        },
-        {
-          "name": "ACCUMULO_MONITOR"
-        },
-        {
-          "name": "ACCUMULO_GC"
-        },
-        {
-          "name": "ACCUMULO_TRACER"
-        }
-      ]
-    }
-  ]
-}
-
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/metainfo.xml b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/metainfo.xml
deleted file mode 100644
index dd88832..0000000
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/metainfo.xml
+++ /dev/null
@@ -1,220 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>ACCUMULO</name>
-      <displayName>Accumulo</displayName>
-      <comment>Robust, scalable, high performance distributed key/value store.
-      </comment>
-      <version>1.6.1.2.2.0</version>
-      <components>
-        <component>
-          <name>ACCUMULO_MASTER</name>
-          <displayName>Accumulo Master</displayName>
-          <category>MASTER</category>
-          <cardinality>1+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <timelineAppid>ACCUMULO</timelineAppid>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
-              <scope>cluster</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-                <co-locate>ACCUMULO/ACCUMULO_MASTER</co-locate>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/accumulo_master.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <logs>
-            <log>
-              <logId>accumulo_master</logId>
-              <primary>true</primary>
-            </log>
-          </logs>
-        </component>
-
-        <component>
-          <name>ACCUMULO_MONITOR</name>
-          <displayName>Accumulo Monitor</displayName>
-          <category>MASTER</category>
-          <cardinality>1+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <timelineAppid>ACCUMULO</timelineAppid>
-          <commandScript>
-            <script>scripts/accumulo_monitor.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <logs>
-            <log>
-              <logId>accumulo_monitor</logId>
-              <primary>true</primary>
-            </log>
-          </logs>
-        </component>
-
-        <component>
-          <name>ACCUMULO_GC</name>
-          <displayName>Accumulo GC</displayName>
-          <category>MASTER</category>
-          <cardinality>1+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <timelineAppid>ACCUMULO</timelineAppid>
-          <commandScript>
-            <script>scripts/accumulo_gc.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <logs>
-            <log>
-              <logId>accumulo_gc</logId>
-              <primary>true</primary>
-            </log>
-          </logs>
-        </component>
-
-        <component>
-          <name>ACCUMULO_TRACER</name>
-          <displayName>Accumulo Tracer</displayName>
-          <category>MASTER</category>
-          <cardinality>1+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <timelineAppid>ACCUMULO</timelineAppid>
-          <commandScript>
-            <script>scripts/accumulo_tracer.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>180</timeout>
-          </commandScript>
-          <logs>
-            <log>
-              <logId>accumulo_tracer</logId>
-              <primary>true</primary>
-            </log>
-          </logs>
-        </component>
-
-        <component>
-          <name>ACCUMULO_TSERVER</name>
-          <displayName>Accumulo TServer</displayName>
-          <category>SLAVE</category>
-          <cardinality>1+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <timelineAppid>ACCUMULO</timelineAppid>
-          <commandScript>
-            <script>scripts/accumulo_tserver.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-          <logs>
-            <log>
-              <logId>accumulo_tserver</logId>
-              <primary>true</primary>
-            </log>
-          </logs>
-        </component>
-
-        <component>
-          <name>ACCUMULO_CLIENT</name>
-          <displayName>Accumulo Client</displayName>
-          <category>CLIENT</category>
-          <cardinality>1+</cardinality>
-          <versionAdvertised>true</versionAdvertised>
-          <timelineAppid>ACCUMULO</timelineAppid>
-          <commandScript>
-            <script>scripts/accumulo_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-          <configFiles>
-            <configFile>
-              <type>xml</type>
-              <fileName>accumulo-site.xml</fileName>
-              <dictionaryName>accumulo-site</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>accumulo-env.sh</fileName>
-              <dictionaryName>accumulo-env</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>log4j.properties</fileName>
-              <dictionaryName>accumulo-log4j</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>properties</type>
-              <fileName>client.conf</fileName>
-              <dictionaryName>client</dictionaryName>
-            </configFile>
-          </configFiles>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>accumulo</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <requiredServices>
-        <service>ZOOKEEPER</service>
-        <service>HDFS</service>
-      </requiredServices>
-
-      <configuration-dependencies>
-        <config-type>accumulo-env</config-type>
-        <config-type>accumulo-site</config-type>
-      </configuration-dependencies>
-
-      <themes>
-        <theme>
-          <fileName>credentials.json</fileName>
-          <default>true</default>
-        </theme>
-        <theme>
-          <fileName>directories.json</fileName>
-          <default>true</default>
-        </theme>
-      </themes>
-
-    </service>
-  </services>
-</metainfo>
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/metrics.json b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/metrics.json
deleted file mode 100644
index fbf7295..0000000
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/metrics.json
+++ /dev/null
@@ -1,1377 +0,0 @@
-{
-  "ACCUMULO_MASTER": {
-    "Component": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "default": {
-            "metrics/master/replication/files_pending_repl_avg_time": {
-              "metric": "master.MasterReplication.FilesPendingReplicationAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/master/replication/files_pending_repl_num_ops": {
-              "metric": "master.MasterReplication.FilesPendingReplicationNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/master/replication/max_repl_threads_avg_time": {
-              "metric": "master.MasterReplication.MaxReplicationThreadsAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/master/replication/max_repl_threads_num_ops": {
-              "metric": "master.MasterReplication.MaxReplicationThreadsNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/master/replication/num_peers_avg_time": {
-              "metric": "master.MasterReplication.NumPeersAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/master/replication/num_peers_num_ops": {
-              "metric": "master.MasterReplication.NumPeersNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/master/thrift/execute_avg_time": {
-              "metric": "thrift.Master.ExecuteAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/master/thrift/execute_num_ops": {
-              "metric": "thrift.Master.ExecuteNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/master/thrift/idle_avg_time": {
-              "metric": "thrift.Master.IdleAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/master/thrift/idle_num_ops": {
-              "metric": "thrift.Master.IdleNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/master/replication/thrift/coordinator_execute_avg_time": {
-              "metric": "thrift.MasterReplicationCoordinator.ExecuteAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/master/replication/thrift/coordinator_execute_num_ops": {
-              "metric": "thrift.MasterReplicationCoordinator.ExecuteNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/master/replication/thrift/coordinator_idle_avg_time": {
-              "metric": "thrift.MasterReplicationCoordinator.IdleAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/master/replication/thrift/coordinator_idle_num_ops": {
-              "metric": "thrift.MasterReplicationCoordinator.IdleNumOps",
-              "pointInTime": true,
-              "temporal": true
-            }
-          }
-        }
-      }
-    ],
-    "HostComponent": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "default": {
-            "metrics/master/replication/files_pending_repl_avg_time": {
-              "metric": "master.MasterReplication.FilesPendingReplicationAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/master/replication/files_pending_repl_num_ops": {
-              "metric": "master.MasterReplication.FilesPendingReplicationNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/master/replication/max_repl_threads_avg_time": {
-              "metric": "master.MasterReplication.MaxReplicationThreadsAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/master/replication/max_repl_threads_num_ops": {
-              "metric": "master.MasterReplication.MaxReplicationThreadsNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/master/replication/num_peers_avg_time": {
-              "metric": "master.MasterReplication.NumPeersAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/master/replication/num_peers_num_ops": {
-              "metric": "master.MasterReplication.NumPeersNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/master/thrift/execute_avg_time": {
-              "metric": "thrift.Master.ExecuteAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/master/thrift/execute_num_ops": {
-              "metric": "thrift.Master.ExecuteNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/master/thrift/idle_avg_time": {
-              "metric": "thrift.Master.IdleAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/master/thrift/idle_num_ops": {
-              "metric": "thrift.Master.IdleNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/master/replication/thrift/coordinator_execute_avg_time": {
-              "metric": "thrift.MasterReplicationCoordinator.ExecuteAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/master/replication/thrift/coordinator_execute_num_ops": {
-              "metric": "thrift.MasterReplicationCoordinator.ExecuteNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/master/replication/thrift/coordinator_idle_avg_time": {
-              "metric": "thrift.MasterReplicationCoordinator.IdleAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/master/replication/thrift/coordinator_idle_num_ops": {
-              "metric": "thrift.MasterReplicationCoordinator.IdleNumOps",
-              "pointInTime": true,
-              "temporal": true
-            }
-          }
-        }
-      }
-    ]
-  },
-  "ACCUMULO_GC": {
-    "Component": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "default": {
-            "metrics/gc/thrift/execute_avg_time": {
-              "metric": "thrift.SimpleGarbageCollector.ExecuteAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/gc/thrift/execute_num_ops": {
-              "metric": "thrift.SimpleGarbageCollector.ExecuteNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/gc/thrift/idle_avg_time": {
-              "metric": "thrift.SimpleGarbageCollector.IdleAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/gc/thrift/idle_num_ops": {
-              "metric": "thrift.SimpleGarbageCollector.IdleNumOps",
-              "pointInTime": true,
-              "temporal": true
-            }
-          }
-        }
-      }
-    ],
-    "HostComponent": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "default": {
-            "metrics/gc/thrift/execute_avg_time": {
-              "metric": "thrift.SimpleGarbageCollector.ExecuteAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/gc/thrift/execute_num_ops": {
-              "metric": "thrift.SimpleGarbageCollector.ExecuteNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/gc/thrift/idle_avg_time": {
-              "metric": "thrift.SimpleGarbageCollector.IdleAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/gc/thrift/idle_num_ops": {
-              "metric": "thrift.SimpleGarbageCollector.IdleNumOps",
-              "pointInTime": true,
-              "temporal": true
-            }
-          }
-        }
-      }
-    ]
-  },
-  "ACCUMULO_TSERVER": {
-    "Component": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "default": {
-            "metrics/tserver/general/active_majcs": {
-              "metric": "tserver.general.activeMajCs",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/general/active_mincs": {
-              "metric": "tserver.general.activeMinCs",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/general/entries": {
-              "metric": "tserver.general.entries",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/general/entries_in_mem": {
-              "metric": "tserver.general.entriesInMem",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/general/files_per_tablet": {
-              "metric": "tserver.general.filesPerTablet",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/general/hold_time": {
-              "metric": "tserver.general.holdTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/general/online_tablets": {
-              "metric": "tserver.general.onlineTablets",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/general/opening_tablets": {
-              "metric": "tserver.general.openingTablets",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/general/queries": {
-              "metric": "tserver.general.queries",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/general/queued_majcs": {
-              "metric": "tserver.general.queuedMajCs",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/general/queued_mincs": {
-              "metric": "tserver.general.queuedMinCs",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/general/total_mincs": {
-              "metric": "tserver.general.totalMinCs",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/general/unopened_tablets": {
-              "metric": "tserver.general.unopenedTablets",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/mincs/minc_avg_count": {
-              "metric": "tserver.MinorCompactions.MincAvgCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/mincs/minc_imax_count": {
-              "metric": "tserver.MinorCompactions.MincIMaxCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/mincs/minc_imin_count": {
-              "metric": "tserver.MinorCompactions.MincIMinCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/mincs/minc_max_count": {
-              "metric": "tserver.MinorCompactions.MincMaxCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/mincs/minc_min_count": {
-              "metric": "tserver.MinorCompactions.MincMinCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/mincs/minc_num_ops": {
-              "metric": "tserver.MinorCompactions.MincNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/mincs/minc_stdev_count": {
-              "metric": "tserver.MinorCompactions.MincStdevCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/mincs/queue_avg_count": {
-              "metric": "tserver.MinorCompactions.QueueAvgCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/mincs/queue_imax_count": {
-              "metric": "tserver.MinorCompactions.QueueIMaxCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/mincs/queue_imin_count": {
-              "metric": "tserver.MinorCompactions.QueueIMinCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/mincs/queue_max_count": {
-              "metric": "tserver.MinorCompactions.QueueMaxCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/mincs/queue_min_count": {
-              "metric": "tserver.MinorCompactions.QueueMinCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/mincs/queue_num_ops": {
-              "metric": "tserver.MinorCompactions.QueueNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/mincs/queue_stdev_count": {
-              "metric": "tserver.MinorCompactions.QueueStdevCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/scans/result_avg_count": {
-              "metric": "tserver.Scans.ResultAvgCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "tmetrics/tserver/scans/result_imax_count": {
-              "metric": "tserver.Scans.ResultIMaxCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/scans/result_imin_count": {
-              "metric": "tserver.Scans.ResultIMinCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/scans/result_max_count": {
-              "metric": "tserver.Scans.ResultMaxCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/scans/result_min_count": {
-              "metric": "tserver.Scans.ResultMinCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/scans/result_num_ops": {
-              "metric": "tserver.Scans.ResultNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/scans/result_stdev_count": {
-              "metric": "tserver.Scans.ResultStdevCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/scans/scan_avg_count": {
-              "metric": "tserver.Scans.ScanAvgCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/scans/scan_imax_count": {
-              "metric": "tserver.Scans.ScanIMaxCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/scans/scan_imin_count": {
-              "metric": "tserver.Scans.ScanIMinCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/scans/scan_max_count": {
-              "metric": "tserver.Scans.ScanMaxCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/scans/scan_min_count": {
-              "metric": "tserver.Scans.ScanMinCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/scans/scan_num_ops": {
-              "metric": "tserver.Scans.ScanNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/scans/scan_stdev_count": {
-              "metric": "tserver.Scans.ScanStdevCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/commit_prep_avg_time": {
-              "metric": "tserver.Updates.CommitPrepAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/commit_prep_imax_time": {
-              "metric": "tserver.Updates.CommitPrepIMaxTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/commit_prep_imin_time": {
-              "metric": "tserver.Updates.CommitPrepIMinTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/commit_prep_max_time": {
-              "metric": "tserver.Updates.CommitPrepMaxTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/commit_prep_min_time": {
-              "metric": "tserver.Updates.CommitPrepMinTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/commit_prep_num_ops": {
-              "metric": "tserver.Updates.CommitPrepNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/commit_prep_stdev_time": {
-              "metric": "tserver.Updates.CommitPrepStdevTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/commit_time_avg_time": {
-              "metric": "tserver.Updates.CommitTimeAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/commit_time_imax_time": {
-              "metric": "tserver.Updates.CommitTimeIMaxTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/commit_time_imin_time": {
-              "metric": "tserver.Updates.CommitTimeIMinTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/commit_time_max_time": {
-              "metric": "tserver.Updates.CommitTimeMaxTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/commit_time_min_time": {
-              "metric": "tserver.Updates.CommitTimeMinTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/commit_time_num_ops": {
-              "metric": "tserver.Updates.CommitTimeNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/commit_time_stdev_time": {
-              "metric": "tserver.Updates.CommitTimeStdevTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/mut_arrays_avg_size": {
-              "metric": "tserver.Updates.MutationArraysSizeAvgSize",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/mut_arrays_imax_size": {
-              "metric": "tserver.Updates.MutationArraysSizeIMaxSize",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/mut_arrays_imin_size": {
-              "metric": "tserver.Updates.MutationArraysSizeIMinSize",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/mut_arrays_max_size": {
-              "metric": "tserver.Updates.MutationArraysSizeMaxSize",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/mut_arrays_min_size": {
-              "metric": "tserver.Updates.MutationArraysSizeMinSize",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/mut_arrays_num_ops": {
-              "metric": "tserver.Updates.MutationArraysSizeNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/mut_arrays_stdev_size": {
-              "metric": "tserver.Updates.MutationArraysSizeStdevSize",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/walog_write_avg_time": {
-              "metric": "tserver.Updates.WaLogWriteTimeAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/walog_write_imax_time": {
-              "metric": "tserver.Updates.WaLogWriteTimeIMaxTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/walog_write_imin_time": {
-              "metric": "tserver.Updates.WaLogWriteTimeIMinTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/walog_write_max_time": {
-              "metric": "tserver.Updates.WaLogWriteTimeMaxTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/walog_write_min_time": {
-              "metric": "tserver.Updates.WaLogWriteTimeMinTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/walog_write_num_ops": {
-              "metric": "tserver.Updates.WaLogWriteTimeNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/walog_write_stdev_time": {
-              "metric": "tserver.Updates.WaLogWriteTimeStdevTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/constraint_violations": {
-              "metric": "tserver.Updates.constraintViolations",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/permission_errors": {
-              "metric": "tserver.Updates.permissionErrors",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/unknown_tablet_errors": {
-              "metric": "tserver.Updates.unknownTabletErrors",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/thrift/execute_avg_time": {
-              "metric": "thrift.TabletServer.ExecuteAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/thrift/execute_num_ops": {
-              "metric": "thrift.TabletServer.ExecuteNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/thrift/idle_avg_time": {
-              "metric": "thrift.TabletServer.IdleAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/thrift/idle_num_ops": {
-              "metric": "thrift.TabletServer.IdleNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/replication/thrift/servicer_handler_execute_avg_time": {
-              "metric": "thrift.ReplicationServicerHandler.ExecuteAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/replication/thrift/servicer_handler_execute_num_ops": {
-              "metric": "thrift.ReplicationServicerHandler.ExecuteNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/replication/thrift/servicer_handler_idle_avg_time": {
-              "metric": "thrift.ReplicationServicerHandler.IdleAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/replication/thrift/servicer_handler_idle_num_ops": {
-              "metric": "thrift.ReplicationServicerHandler.IdleNumOps",
-              "pointInTime": true,
-              "temporal": true
-            }
-          }
-        }
-      }
-    ],
-    "HostComponent": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "default": {
-            "metrics/cpu/cpu_idle": {
-              "metric": "cpu_idle",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/cpu/cpu_nice": {
-              "metric": "cpu_nice",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/cpu/cpu_system": {
-              "metric": "cpu_system",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/cpu/cpu_user": {
-              "metric": "cpu_user",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/cpu/cpu_wio": {
-              "metric": "cpu_wio",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/disk/disk_free": {
-              "metric": "disk_free",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/disk/disk_total": {
-              "metric": "disk_total",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/load/load_fifteen": {
-              "metric": "load_fifteen",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/load/load_five": {
-              "metric": "load_five",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/load/load_one": {
-              "metric": "load_one",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/memory/mem_buffers": {
-              "metric": "mem_buffers",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/memory/mem_cached": {
-              "metric": "mem_cached",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/memory/mem_free": {
-              "metric": "mem_free",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/memory/mem_shared": {
-              "metric": "mem_shared",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/memory/mem_total": {
-              "metric": "mem_total",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/memory/swap_free": {
-              "metric": "swap_free",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/memory/swap_total": {
-              "metric": "swap_total",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/network/bytes_in": {
-              "metric": "bytes_in",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/network/bytes_out": {
-              "metric": "bytes_out",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/network/pkts_in": {
-              "metric": "pkts_in",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/network/pkts_out": {
-              "metric": "pkts_out",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/process/proc_run": {
-              "metric": "proc_run",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/process/proc_total": {
-              "metric": "proc_total",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/disk/read_count": {
-              "metric": "read_count",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/disk/write_count": {
-              "metric": "write_count",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/disk/read_bytes": {
-              "metric": "read_bytes",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/disk/write_bytes": {
-              "metric": "write_bytes",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/disk/read_time": {
-              "metric": "read_time",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/disk/write_time": {
-              "metric": "write_time",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric": true
-            },
-            "metrics/jvm/memNonHeapUsedM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/disk/part_max_used": {
-              "metric": "part_max_used",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/ugi/loginSuccess_num_ops": {
-              "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/memNonHeapCommittedM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/ugi/loginFailure_num_ops": {
-              "metric": "ugi.UgiMetrics.LoginFailureNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/memHeapCommittedM": {
-              "metric": "jvm.JvmMetrics.MemHeapCommittedM",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsRunnable": {
-              "metric": "jvm.JvmMetrics.ThreadsRunnable",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsNew": {
-              "metric": "jvm.JvmMetrics.ThreadsNew",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/gcTimeMillis": {
-              "metric": "jvm.JvmMetrics.GcTimeMillis",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTerminated": {
-              "metric": "jvm.JvmMetrics.ThreadsTerminated",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/boottime": {
-              "metric": "boottime",
-              "pointInTime": true,
-              "temporal": true,
-              "amsHostMetric":true
-            },
-            "metrics/jvm/logError": {
-              "metric": "jvm.JvmMetrics.LogError",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsBlocked": {
-              "metric": "jvm.JvmMetrics.ThreadsBlocked",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_aidle": {
-              "metric": "cpu_aidle",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/cpu/cpu_speed": {
-              "metric": "cpu_speed",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/maxMemoryM": {
-              "metric": "jvm.metrics.maxMemoryM",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/logWarn": {
-              "metric": "jvm.JvmMetrics.LogWarn",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsTimedWaiting": {
-              "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/gcCount": {
-              "metric": "jvm.JvmMetrics.GcCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/memHeapUsedM": {
-              "metric": "jvm.JvmMetrics.MemHeapUsedM",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/threadsWaiting": {
-              "metric": "jvm.JvmMetrics.ThreadsWaiting",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/logInfo": {
-              "metric": "jvm.JvmMetrics.LogInfo",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/ugi/loginFailure_avg_time": {
-              "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/logFatal": {
-              "metric": "jvm.JvmMetrics.LogFatal",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/GcCountConcurrentMarkSweep": {
-              "metric": "jvm.JvmMetrics.GcCountConcurrentMarkSweep",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/GcCountParNew": {
-              "metric": "jvm.JvmMetrics.GcCountParNew",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/GcTimeMillisConcurrentMarkSweep": {
-              "metric": "jvm.JvmMetrics.GcTimeMillisConcurrentMarkSweep",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/GcTimeMillisParNew": {
-              "metric": "jvm.JvmMetrics.GcTimeMillisParNew",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/LogFatal": {
-              "metric": "jvm.JvmMetrics.LogFatal",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/MemHeapMaxM": {
-              "metric": "jvm.JvmMetrics.MemHeapMaxM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/MemMaxM": {
-              "metric": "jvm.JvmMetrics.MemMaxM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/jvm/JvmMetrics/MemNonHeapMaxM": {
-              "metric": "jvm.JvmMetrics.MemNonHeapMaxM",
-              "pointInTime": false,
-              "temporal": true
-            },
-            "metrics/tserver/general/active_majcs": {
-              "metric": "tserver.general.activeMajCs",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/general/active_mincs": {
-              "metric": "tserver.general.activeMinCs",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/general/entries": {
-              "metric": "tserver.general.entries",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/general/entries_in_mem": {
-              "metric": "tserver.general.entriesInMem",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/general/files_per_tablet": {
-              "metric": "tserver.general.filesPerTablet",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/general/hold_time": {
-              "metric": "tserver.general.holdTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/general/online_tablets": {
-              "metric": "tserver.general.onlineTablets",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/general/opening_tablets": {
-              "metric": "tserver.general.openingTablets",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/general/queries": {
-              "metric": "tserver.general.queries",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/general/queued_majcs": {
-              "metric": "tserver.general.queuedMajCs",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/general/queued_mincs": {
-              "metric": "tserver.general.queuedMinCs",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/general/total_mincs": {
-              "metric": "tserver.general.totalMinCs",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/general/unopened_tablets": {
-              "metric": "tserver.general.unopenedTablets",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/mincs/minc_avg_count": {
-              "metric": "tserver.MinorCompactions.MincAvgCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/mincs/minc_imax_count": {
-              "metric": "tserver.MinorCompactions.MincIMaxCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/mincs/minc_imin_count": {
-              "metric": "tserver.MinorCompactions.MincIMinCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/mincs/minc_max_count": {
-              "metric": "tserver.MinorCompactions.MincMaxCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/mincs/minc_min_count": {
-              "metric": "tserver.MinorCompactions.MincMinCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/mincs/minc_num_ops": {
-              "metric": "tserver.MinorCompactions.MincNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/mincs/minc_stdev_count": {
-              "metric": "tserver.MinorCompactions.MincStdevCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/mincs/queue_avg_count": {
-              "metric": "tserver.MinorCompactions.QueueAvgCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/mincs/queue_imax_count": {
-              "metric": "tserver.MinorCompactions.QueueIMaxCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/mincs/queue_imin_count": {
-              "metric": "tserver.MinorCompactions.QueueIMinCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/mincs/queue_max_count": {
-              "metric": "tserver.MinorCompactions.QueueMaxCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/mincs/queue_min_count": {
-              "metric": "tserver.MinorCompactions.QueueMinCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/mincs/queue_num_ops": {
-              "metric": "tserver.MinorCompactions.QueueNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/mincs/queue_stdev_count": {
-              "metric": "tserver.MinorCompactions.QueueStdevCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/scans/result_avg_count": {
-              "metric": "tserver.Scans.ResultAvgCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "tmetrics/tserver/scans/result_imax_count": {
-              "metric": "tserver.Scans.ResultIMaxCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/scans/result_imin_count": {
-              "metric": "tserver.Scans.ResultIMinCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/scans/result_max_count": {
-              "metric": "tserver.Scans.ResultMaxCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/scans/result_min_count": {
-              "metric": "tserver.Scans.ResultMinCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/scans/result_num_ops": {
-              "metric": "tserver.Scans.ResultNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/scans/result_stdev_count": {
-              "metric": "tserver.Scans.ResultStdevCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/scans/scan_avg_count": {
-              "metric": "tserver.Scans.ScanAvgCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/scans/scan_imax_count": {
-              "metric": "tserver.Scans.ScanIMaxCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/scans/scan_imin_count": {
-              "metric": "tserver.Scans.ScanIMinCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/scans/scan_max_count": {
-              "metric": "tserver.Scans.ScanMaxCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/scans/scan_min_count": {
-              "metric": "tserver.Scans.ScanMinCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/scans/scan_num_ops": {
-              "metric": "tserver.Scans.ScanNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/scans/scan_stdev_count": {
-              "metric": "tserver.Scans.ScanStdevCount",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/commit_prep_avg_time": {
-              "metric": "tserver.Updates.CommitPrepAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/commit_prep_imax_time": {
-              "metric": "tserver.Updates.CommitPrepIMaxTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/commit_prep_imin_time": {
-              "metric": "tserver.Updates.CommitPrepIMinTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/commit_prep_max_time": {
-              "metric": "tserver.Updates.CommitPrepMaxTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/commit_prep_min_time": {
-              "metric": "tserver.Updates.CommitPrepMinTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/commit_prep_num_ops": {
-              "metric": "tserver.Updates.CommitPrepNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/commit_prep_stdev_time": {
-              "metric": "tserver.Updates.CommitPrepStdevTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/commit_time_avg_time": {
-              "metric": "tserver.Updates.CommitTimeAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/commit_time_imax_time": {
-              "metric": "tserver.Updates.CommitTimeIMaxTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/commit_time_imin_time": {
-              "metric": "tserver.Updates.CommitTimeIMinTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/commit_time_max_time": {
-              "metric": "tserver.Updates.CommitTimeMaxTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/commit_time_min_time": {
-              "metric": "tserver.Updates.CommitTimeMinTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/commit_time_num_ops": {
-              "metric": "tserver.Updates.CommitTimeNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/commit_time_stdev_time": {
-              "metric": "tserver.Updates.CommitTimeStdevTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/mut_arrays_avg_size": {
-              "metric": "tserver.Updates.MutationArraysSizeAvgSize",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/mut_arrays_imax_size": {
-              "metric": "tserver.Updates.MutationArraysSizeIMaxSize",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/mut_arrays_imin_size": {
-              "metric": "tserver.Updates.MutationArraysSizeIMinSize",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/mut_arrays_max_size": {
-              "metric": "tserver.Updates.MutationArraysSizeMaxSize",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/mut_arrays_min_size": {
-              "metric": "tserver.Updates.MutationArraysSizeMinSize",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/mut_arrays_num_ops": {
-              "metric": "tserver.Updates.MutationArraysSizeNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/mut_arrays_stdev_size": {
-              "metric": "tserver.Updates.MutationArraysSizeStdevSize",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/walog_write_avg_time": {
-              "metric": "tserver.Updates.WaLogWriteTimeAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/walog_write_imax_time": {
-              "metric": "tserver.Updates.WaLogWriteTimeIMaxTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/walog_write_imin_time": {
-              "metric": "tserver.Updates.WaLogWriteTimeIMinTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/walog_write_max_time": {
-              "metric": "tserver.Updates.WaLogWriteTimeMaxTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/walog_write_min_time": {
-              "metric": "tserver.Updates.WaLogWriteTimeMinTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/walog_write_num_ops": {
-              "metric": "tserver.Updates.WaLogWriteTimeNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/walog_write_stdev_time": {
-              "metric": "tserver.Updates.WaLogWriteTimeStdevTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/constraint_violations": {
-              "metric": "tserver.Updates.constraintViolations",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/permission_errors": {
-              "metric": "tserver.Updates.permissionErrors",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/updates/unknown_tablet_errors": {
-              "metric": "tserver.Updates.unknownTabletErrors",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/thrift/execute_avg_time": {
-              "metric": "thrift.TabletServer.ExecuteAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/thrift/execute_num_ops": {
-              "metric": "thrift.TabletServer.ExecuteNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/thrift/idle_avg_time": {
-              "metric": "thrift.TabletServer.IdleAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/thrift/idle_num_ops": {
-              "metric": "thrift.TabletServer.IdleNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/replication/thrift/servicer_handler_execute_avg_time": {
-              "metric": "thrift.ReplicationServicerHandler.ExecuteAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/replication/thrift/servicer_handler_execute_num_ops": {
-              "metric": "thrift.ReplicationServicerHandler.ExecuteNumOps",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/replication/thrift/servicer_handler_idle_avg_time": {
-              "metric": "thrift.ReplicationServicerHandler.IdleAvgTime",
-              "pointInTime": true,
-              "temporal": true
-            },
-            "metrics/tserver/replication/thrift/servicer_handler_idle_num_ops": {
-              "metric": "thrift.ReplicationServicerHandler.IdleNumOps",
-              "pointInTime": true,
-              "temporal": true
-            }
-          }
-        }
-      }
-    ]
-  }
-}
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/files/accumulo-metrics.xml b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/files/accumulo-metrics.xml
deleted file mode 100644
index 3b97809..0000000
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/files/accumulo-metrics.xml
+++ /dev/null
@@ -1,60 +0,0 @@
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<!--
-  This file follows the conventions for XMLConfiguration files specified in the Apache Commons Configuration 1.5 Library. Changes to this file will be noticed
-  at runtime (see the FileChangedReloadingStrategy class in Commons Configuration).
--->
-<config>
-<!--
-   Metrics log directory
--->
-  <logging>
-    <dir>${ACCUMULO_HOME}/metrics</dir>
-  </logging>
-<!--
- Enable/Disable metrics accumulation on the different servers and their components
- NOTE: Turning on logging can be expensive because it will use several more file handles and will create a lot of short lived objects.
--->
-  <master>
-    <enabled type="boolean">false</enabled>
-    <logging type="boolean">false</logging>
-  </master>
-  <tserver>
-    <enabled type="boolean">false</enabled>
-    <logging type="boolean">false</logging>
-    <update>
-      <enabled type="boolean">false</enabled>
-      <logging type="boolean">false</logging>
-    </update>
-    <scan>
-      <enabled type="boolean">false</enabled>
-      <logging type="boolean">false</logging>
-    </scan>
-    <minc>
-      <enabled type="boolean">false</enabled>
-      <logging type="boolean">false</logging>
-    </minc>
-  </tserver>
-  <thrift>
-    <enabled type="boolean">false</enabled>
-    <logging type="boolean">false</logging>
-  </thrift>
-  <replication>
-    <enabled type="boolean">false</enabled>
-    <logging type="boolean">false</logging>
-  </replication>
-</config>
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/__init__.py b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/__init__.py
deleted file mode 100644
index 5561e10..0000000
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_client.py b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_client.py
deleted file mode 100644
index ae5181f..0000000
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_client.py
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.core.logger import Logger
-from resource_management.core.exceptions import ClientComponentHasNoStatus
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions import StackFeature
-
-from accumulo_configuration import setup_conf_dir
-
-
-class AccumuloClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    setup_conf_dir(name='client')
-
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-
-    # this function should not execute if the version can't be determined or
-    # the stack does not support rolling upgrade
-    if not (params.stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.stack_version_formatted)):
-      return
-
-    Logger.info("Executing Accumulo Client Upgrade pre-restart")
-    stack_select.select_packages(params.version)
-
-if __name__ == "__main__":
-  AccumuloClient().execute()
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_configuration.py b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_configuration.py
deleted file mode 100644
index 86a2f6d..0000000
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_configuration.py
+++ /dev/null
@@ -1,384 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-
-from resource_management import Fail
-from resource_management.core.resources.system import Directory, Execute, File
-from resource_management.core.source import InlineTemplate, StaticFile
-from resource_management.core.shell import as_user
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.resources.properties_file import PropertiesFile
-from resource_management.libraries.resources.template_config import TemplateConfig
-from resource_management.libraries.resources.xml_config import XmlConfig
-
-def setup_conf_dir(name=None): # 'master' or 'tserver' or 'monitor' or 'gc' or 'tracer' or 'client'
-  import params
-
-  # check if confdir is a link
-  if not os.path.exists(params.conf_dir) or not os.path.islink(params.conf_dir):
-    raise Fail("confdir {} must be a symlink".format(params.conf_dir))
-
-  if name == 'client':
-    dest_conf_dir = params.conf_dir
-
-    # create a site file for client processes
-    configs = {}
-    configs.update(params.config['configurations']['accumulo-site'])
-    if "instance.secret" in configs:
-      configs.pop("instance.secret")
-    if "trace.token.property.password" in configs:
-      configs.pop("trace.token.property.password")
-    XmlConfig("accumulo-site.xml",
-              conf_dir = dest_conf_dir,
-              configurations = configs,
-              configuration_attributes=params.config['configurationAttributes']['accumulo-site'],
-              owner = params.accumulo_user,
-              group = params.user_group,
-              mode = 0644
-    )
-
-    # create env file
-    File(format("{dest_conf_dir}/accumulo-env.sh"),
-         mode=0644,
-         group=params.user_group,
-         owner=params.accumulo_user,
-         content=InlineTemplate(params.env_sh_template)
-    )
-  else:
-    dest_conf_dir = params.server_conf_dir
-    # create server conf directory
-    Directory( params.server_conf_dir,
-               mode=0700,
-               owner = params.accumulo_user,
-               group = params.user_group,
-               create_parents = True
-    )
-    # create a site file for server processes
-    configs = {}
-    configs.update(params.config['configurations']['accumulo-site'])
-    configs["instance.secret"] = str(params.config['configurations']['accumulo-env']['instance_secret'])
-    configs["trace.token.property.password"] = str(params.trace_password)
-    XmlConfig( "accumulo-site.xml",
-               conf_dir = dest_conf_dir,
-               configurations = configs,
-               configuration_attributes=params.config['configurationAttributes']['accumulo-site'],
-               owner = params.accumulo_user,
-               group = params.user_group,
-               mode = 0600
-    )
-
-    # create pid dir
-    Directory( params.pid_dir,
-               owner = params.accumulo_user,
-               group = params.user_group,
-               create_parents = True,
-               cd_access = "a",
-               mode = 0755,
-    )
-
-    # create log dir
-    Directory (params.log_dir,
-               owner = params.accumulo_user,
-               group = params.user_group,
-               create_parents = True,
-               cd_access = "a",
-               mode = 0755,
-    )
-
-    # create env file
-    File(format("{dest_conf_dir}/accumulo-env.sh"),
-         mode=0644,
-         group=params.user_group,
-         owner=params.accumulo_user,
-         content=InlineTemplate(params.server_env_sh_template)
-    )
-
-    if  params.security_enabled:
-      accumulo_TemplateConfig("accumulo_jaas.conf", dest_conf_dir)
-
-  # create client.conf file
-  configs = {}
-  if 'client' in params.config['configurations']:
-    configs.update(params.config['configurations']['client'])
-  configs["instance.name"] = params.instance_name
-  configs["instance.zookeeper.host"] = params.config['configurations']['accumulo-site']['instance.zookeeper.host']
-  copy_site_property(configs, 'instance.rpc.sasl.enabled')
-  copy_site_property(configs, 'rpc.sasl.qop')
-  copy_site_property(configs, 'rpc.useJsse')
-  copy_site_property(configs, 'instance.rpc.ssl.clientAuth')
-  copy_site_property(configs, 'instance.rpc.ssl.enabled')
-  copy_site_property(configs, 'instance.zookeeper.timeout')
-  copy_site_property(configs, 'trace.span.receivers')
-  copy_site_property(configs, 'trace.zookeeper.path')
-  for key,value in params.config['configurations']['accumulo-site'].iteritems():
-    if key.startswith("trace.span.receiver."):
-      configs[key] = value
-  PropertiesFile(format("{dest_conf_dir}/client.conf"),
-                 properties = configs,
-                 owner = params.accumulo_user,
-                 group = params.user_group
-  )
-
-  # create log4j.properties files
-  if (params.log4j_props != None):
-    File(format("{dest_conf_dir}/log4j.properties"),
-         mode=0644,
-         group=params.user_group,
-         owner=params.accumulo_user,
-         content=params.log4j_props
-    )
-  else:
-    File(format("{dest_conf_dir}/log4j.properties"),
-         mode=0644,
-         group=params.user_group,
-         owner=params.hbase_user
-    )
-
-  # create logging configuration files
-  accumulo_TemplateConfig("auditLog.xml", dest_conf_dir)
-  accumulo_TemplateConfig("generic_logger.xml", dest_conf_dir)
-  accumulo_TemplateConfig("monitor_logger.xml", dest_conf_dir)
-  accumulo_StaticFile("accumulo-metrics.xml", dest_conf_dir)
-
-  # create host files
-  accumulo_TemplateConfig("tracers", dest_conf_dir)
-  accumulo_TemplateConfig("gc", dest_conf_dir)
-  accumulo_TemplateConfig("monitor", dest_conf_dir)
-  accumulo_TemplateConfig("slaves", dest_conf_dir)
-  accumulo_TemplateConfig("masters", dest_conf_dir)
-
-  # metrics configuration
-  if params.has_metric_collector:
-    accumulo_TemplateConfig( "hadoop-metrics2-accumulo.properties", dest_conf_dir)
-
-  # other server setup
-  if name == 'master':
-    params.HdfsResource(format("/user/{params.accumulo_user}"),
-                         type="directory",
-                         action="create_on_execute",
-                         owner=params.accumulo_user,
-                         mode=0700
-    )
-    params.HdfsResource(format("{params.parent_dir}"),
-                         type="directory",
-                         action="create_on_execute",
-                         owner=params.accumulo_user,
-                         mode=0700
-    )
-    params.HdfsResource(None, action="execute")
-    if params.security_enabled and params.has_secure_user_auth:
-      Execute( format("{params.kinit_cmd} "
-                      "{params.daemon_script} init "
-                      "--user {params.accumulo_principal_name} "
-                      "--instance-name {params.instance_name} "
-                      "--clear-instance-name "
-                      ">{params.log_dir}/accumulo-init.out "
-                      "2>{params.log_dir}/accumulo-init.err"),
-               not_if=as_user(format("{params.kinit_cmd} "
-                                     "{params.hadoop_bin_dir}/hadoop --config "
-                                     "{params.hadoop_conf_dir} fs -stat "
-                                     "{params.instance_volumes}"),
-                              params.accumulo_user),
-               logoutput=True,
-               user=params.accumulo_user)
-    else:
-      passfile = format("{params.exec_tmp_dir}/pass")
-      try:
-        File(passfile,
-             mode=0600,
-             group=params.user_group,
-             owner=params.accumulo_user,
-             content=InlineTemplate('{{root_password}}\n'
-                                    '{{root_password}}\n\n')
-        )
-        Execute( format("cat {passfile} | {params.daemon_script} init "
-                        "--instance-name {params.instance_name} "
-                        "--clear-instance-name "
-                        ">{params.log_dir}/accumulo-init.out "
-                        "2>{params.log_dir}/accumulo-init.err"),
-                 not_if=as_user(format("{params.kinit_cmd} "
-                                       "{params.hadoop_bin_dir}/hadoop --config "
-                                       "{params.hadoop_conf_dir} fs -stat "
-                                       "{params.instance_volumes}"),
-                                params.accumulo_user),
-                 logoutput=True,
-                 user=params.accumulo_user)
-      finally:
-        File(passfile, action = "delete")
-
-  if name == 'tracer':
-    if params.security_enabled and params.has_secure_user_auth:
-      Execute( format("{params.kinit_cmd} "
-                      "{params.daemon_script} init --reset-security "
-                      "--user {params.accumulo_principal_name} "
-                      "--password NA "
-                      ">{params.log_dir}/accumulo-reset.out "
-                      "2>{params.log_dir}/accumulo-reset.err"),
-               not_if=as_user(format("{params.kinit_cmd} "
-                                     "{params.daemon_script} shell -e "
-                                     "\"userpermissions -u "
-                                     "{params.accumulo_principal_name}\" | "
-                                     "grep System.CREATE_TABLE"),
-                              params.accumulo_user),
-               user=params.accumulo_user)
-      create_user(params.smokeuser_principal, params.smoke_test_password)
-    else:
-      # do not try to reset security in nonsecure mode, for now
-      # Execute( format("{params.daemon_script} init --reset-security "
-      #                 "--user root "
-      #                 ">{params.log_dir}/accumulo-reset.out "
-      #                 "2>{params.log_dir}/accumulo-reset.err"),
-      #          not_if=as_user(format("cat {rpassfile} | "
-      #                                "{params.daemon_script} shell -e "
-      #                                "\"userpermissions -u root\" | "
-      #                                "grep System.CREATE_TABLE"),
-      #                         params.accumulo_user),
-      #          user=params.accumulo_user)
-      create_user(params.smoke_test_user, params.smoke_test_password)
-    create_user(params.trace_user, params.trace_password)
-    rpassfile = format("{params.exec_tmp_dir}/pass0")
-    cmdfile = format("{params.exec_tmp_dir}/resetcmds")
-    try:
-      File(cmdfile,
-           mode=0600,
-           group=params.user_group,
-           owner=params.accumulo_user,
-           content=InlineTemplate('grant -t trace -u {{trace_user}} Table.ALTER_TABLE\n'
-                                  'grant -t trace -u {{trace_user}} Table.READ\n'
-                                  'grant -t trace -u {{trace_user}} Table.WRITE\n\n')
-      )
-      if params.security_enabled and params.has_secure_user_auth:
-        Execute( format("{params.kinit_cmd} {params.daemon_script} shell -f "
-                        "{cmdfile}"),
-                 only_if=as_user(format("{params.kinit_cmd} "
-                                        "{params.daemon_script} shell "
-                                        "-e \"table trace\""),
-                                 params.accumulo_user),
-                 not_if=as_user(format("{params.kinit_cmd} "
-                                       "{params.daemon_script} shell "
-                                       "-e \"userpermissions -u "
-                                       "{params.trace_user} | "
-                                       "grep Table.READ | grep trace"),
-                                params.accumulo_user),
-                 user=params.accumulo_user)
-      else:
-        File(rpassfile,
-             mode=0600,
-             group=params.user_group,
-             owner=params.accumulo_user,
-             content=InlineTemplate('{{root_password}}\n\n')
-        )
-        Execute( format("cat {rpassfile} | {params.daemon_script} shell -f "
-                        "{cmdfile} -u root"),
-                 only_if=as_user(format("cat {rpassfile} | "
-                                       "{params.daemon_script} shell -u root "
-                                       "-e \"table trace\""),
-                                params.accumulo_user),
-                 not_if=as_user(format("cat {rpassfile} | "
-                                       "{params.daemon_script} shell -u root "
-                                       "-e \"userpermissions -u "
-                                       "{params.trace_user} | "
-                                       "grep Table.READ | grep trace"),
-                                params.accumulo_user),
-                 user=params.accumulo_user)
-    finally:
-      try_remove(rpassfile)
-      try_remove(cmdfile)
-
-def copy_site_property(configs, name):
-  import params
-  if name in params.config['configurations']['accumulo-site']:
-    configs[name] = params.config['configurations']['accumulo-site'][name]
-
-def create_user(user, password):
-  import params
-  rpassfile = format("{params.exec_tmp_dir}/pass0")
-  passfile = format("{params.exec_tmp_dir}/pass")
-  cmdfile = format("{params.exec_tmp_dir}/cmds")
-  try:
-    File(cmdfile,
-         mode=0600,
-         group=params.user_group,
-         owner=params.accumulo_user,
-         content=InlineTemplate(format("createuser {user}\n"
-                                       "grant -s System.CREATE_TABLE -u {user}\n\n"))
-    )
-    if params.security_enabled and params.has_secure_user_auth:
-      Execute( format("{params.kinit_cmd} {params.daemon_script} shell -f "
-                      "{cmdfile}"),
-               not_if=as_user(format("{params.kinit_cmd} "
-                                     "{params.daemon_script} shell "
-                                     "-e \"userpermissions -u {user}\""),
-                              params.accumulo_user),
-               user=params.accumulo_user)
-    else:
-      File(rpassfile,
-           mode=0600,
-           group=params.user_group,
-           owner=params.accumulo_user,
-           content=InlineTemplate('{{root_password}}\n\n')
-      )
-      File(passfile,
-           mode=0600,
-           group=params.user_group,
-           owner=params.accumulo_user,
-           content=InlineTemplate(format("{params.root_password}\n"
-                                         "{password}\n"
-                                         "{password}\n\n"))
-      )
-      Execute( format("cat {passfile} | {params.daemon_script} shell -u root "
-                      "-f {cmdfile}"),
-               not_if=as_user(format("cat {rpassfile} | "
-                                     "{params.daemon_script} shell -u root "
-                                     "-e \"userpermissions -u {user}\""),
-                              params.accumulo_user),
-               user=params.accumulo_user)
-  finally:
-    try_remove(rpassfile)
-    try_remove(passfile)
-    try_remove(cmdfile)
-
-def try_remove(file):
-  try:
-    os.remove(file)
-  except:
-    pass
-
-# create file 'name' from template
-def accumulo_TemplateConfig(name, dest_conf_dir, tag=None):
-  import params
-
-  TemplateConfig( format("{dest_conf_dir}/{name}"),
-      owner = params.accumulo_user,
-      group = params.user_group,
-      template_tag = tag
-  )
-
-# create static file 'name'
-def accumulo_StaticFile(name, dest_conf_dir):
-  import params
-
-  File(format("{dest_conf_dir}/{name}"),
-    mode=0644,
-    group=params.user_group,
-    owner=params.accumulo_user,
-    content=StaticFile(name)
-  )
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_gc.py b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_gc.py
deleted file mode 100644
index f8fe499..0000000
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_gc.py
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from accumulo_script import AccumuloScript
-
-if __name__ == "__main__":
-  AccumuloScript('gc').execute()
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_master.py b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_master.py
deleted file mode 100644
index ea8935b..0000000
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_master.py
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from accumulo_script import AccumuloScript
-
-if __name__ == "__main__":
-  AccumuloScript('master').execute()
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_monitor.py b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_monitor.py
deleted file mode 100644
index c8e7bed..0000000
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_monitor.py
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from accumulo_script import AccumuloScript
-
-if __name__ == "__main__":
-  AccumuloScript('monitor').execute()
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_script.py b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_script.py
deleted file mode 100644
index 2a95820..0000000
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_script.py
+++ /dev/null
@@ -1,110 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from resource_management.core.exceptions import Fail
-from resource_management.core.logger import Logger
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions import check_process_status
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions.security_commons import build_expectations
-from resource_management.libraries.functions.security_commons import cached_kinit_executor
-from resource_management.libraries.functions.security_commons import get_params_from_filesystem
-from resource_management.libraries.functions.security_commons import validate_security_config_properties
-from resource_management.libraries.functions.security_commons import FILE_TYPE_XML
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions import StackFeature
-
-from accumulo_configuration import setup_conf_dir
-from accumulo_service import accumulo_service
-
-class AccumuloScript(Script):
-
-  def __init__(self, component):
-    Script.__init__(self)
-    self.component = component
-
-
-  def install(self, env):
-    self.install_packages(env)
-
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    setup_conf_dir(name=self.component)
-
-
-  def start(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    self.configure(env) # for security
-
-    accumulo_service( self.component, action = 'start')
-
-
-  def stop(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-
-    accumulo_service( self.component, action = 'stop')
-
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-
-    pid_file = self.get_pid_files()[0]
-    check_process_status(pid_file)
-
-
-  def pre_upgrade_restart(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-
-    # this function should not execute if the version can't be determined or
-    # the stack does not support rolling upgrade
-    if not (params.stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.stack_version_formatted)):
-      return
-
-    stack_component = stack_select.get_package_name()
-
-    Logger.info("Executing Accumulo Upgrade pre-restart for {0}".format(stack_component))
-    stack_select.select_packages(params.version)
-
-  def get_log_folder(self):
-    import params
-    return params.log_dir
-
-  def get_pid_files(self):
-    import status_params
-
-    pid_file = "{pid_dir}/accumulo-{accumulo_user}-{component}.pid".format(
-        pid_dir = status_params.pid_dir,
-        accumulo_user = status_params.accumulo_user,
-        component = self.component)
-    return [pid_file]
-
-  def get_user(self):
-    import params
-    return params.accumulo_user
-
-if __name__ == "__main__":
-  AccumuloScript().fail_with_error('component unspecified')
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_service.py b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_service.py
deleted file mode 100644
index d335adb..0000000
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_service.py
+++ /dev/null
@@ -1,75 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from resource_management.core.resources.system import Directory, Execute
-from resource_management.core.shell import as_user
-from resource_management.libraries.functions.show_logs import show_logs
-from resource_management.libraries.functions.format import format
-import time
-import os
-
-def accumulo_service( name,
-                      action = 'start'): # 'start' or 'stop' or 'status'
-    import params
-
-    role = name
-    pid_file = format("{pid_dir}/accumulo-{accumulo_user}-{role}.pid")
-
-    pid_exists = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
-
-    if action == 'start':
-      Directory(os.path.expanduser(format("~{accumulo_user}")),
-                owner = params.accumulo_user,
-                group = params.user_group,
-                recursive_ownership = True
-      )
-      
-      if name != 'tserver':
-        Execute(format("{daemon_script} org.apache.accumulo.master.state.SetGoalState NORMAL"),
-                not_if=as_user(pid_exists, params.accumulo_user),
-                user=params.accumulo_user
-        )
-      address = params.hostname
-      if name == 'monitor' and params.accumulo_monitor_bind_all:
-        address = '0.0.0.0'
-      daemon_cmd = format("{daemon_script} {role} --address {address} > {log_dir}/accumulo-{role}.out 2>{log_dir}/accumulo-{role}.err & echo $! > {pid_file}")
-      try:
-        Execute ( daemon_cmd,
-          not_if=as_user(pid_exists, params.accumulo_user),
-          user=params.accumulo_user
-        )
-      except:
-        show_logs(params.log_dir, params.accumulo_user)
-        raise
-
-    elif action == 'stop':
-      no_pid_exists = format("! ({pid_exists})")
-
-      pid = format("`cat {pid_file}` >/dev/null 2>&1")
-      Execute(format("kill {pid}"),
-        not_if=as_user(no_pid_exists, params.accumulo_user),
-        user=params.accumulo_user
-      )
-      Execute(format("kill -9 {pid}"),
-        not_if=as_user(format("sleep 2; {no_pid_exists} || sleep 20; {no_pid_exists}"), params.accumulo_user),
-        ignore_failures=True,
-        user=params.accumulo_user
-      )
-      Execute(format("rm -f {pid_file}"),
-        user=params.accumulo_user)
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_tserver.py b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_tserver.py
deleted file mode 100644
index 3117e35..0000000
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/accumulo_tserver.py
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from accumulo_script import AccumuloScript
-
-if __name__ == "__main__":
-  AccumuloScript('tserver').execute()
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
deleted file mode 100644
index 80cb4c2..0000000
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/params.py
+++ /dev/null
@@ -1,222 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.resources.hdfs_resource import HdfsResource
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
-from resource_management.libraries.functions.version import format_stack_version
-from resource_management.libraries.functions.default import default
-from resource_management.libraries.functions.get_bare_principal import get_bare_principal
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions import StackFeature
-from ambari_commons.ambari_metrics_helper import select_metric_collector_hosts_from_hostnames
-
-import status_params
-
-# server configurations
-config = Script.get_config()
-stack_root = status_params.stack_root
-exec_tmp_dir = status_params.tmp_dir
-
-# security enabled
-security_enabled = status_params.security_enabled
-
-# stack name
-stack_name = status_params.stack_name
-
-# stack version
-version = default("/commandParams/version", None)
-stack_version_unformatted = config['clusterLevelParams']['stack_version']
-stack_version_formatted = format_stack_version(stack_version_unformatted)
-
-has_secure_user_auth = False
-if stack_version_formatted and \
-    check_stack_feature(StackFeature.ACCUMULO_KERBEROS_USER_AUTH, stack_version_formatted):
-  has_secure_user_auth = True
-
-# configuration directories
-conf_dir = status_params.conf_dir
-server_conf_dir = status_params.server_conf_dir
-
-# service locations
-hadoop_prefix = stack_select.get_hadoop_dir("home")
-hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
-zookeeper_home = format("{stack_root}/current/zookeeper-client")
-
-# the configuration direction for HDFS/YARN/MapR is the hadoop config
-# directory, which is symlinked by hadoop-client only
-hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-
-# accumulo local directory structure
-log_dir = config['configurations']['accumulo-env']['accumulo_log_dir']
-client_script = format("{stack_root}/current/accumulo-client/bin/accumulo")
-daemon_script = format("ACCUMULO_CONF_DIR={server_conf_dir} {client_script}")
-
-# user and status
-accumulo_user = status_params.accumulo_user
-user_group = config['configurations']['cluster-env']['user_group']
-pid_dir = status_params.pid_dir
-
-# accumulo env
-java64_home = config['ambariLevelParams']['java_home']
-accumulo_master_heapsize = config['configurations']['accumulo-env']['accumulo_master_heapsize']
-accumulo_tserver_heapsize = config['configurations']['accumulo-env']['accumulo_tserver_heapsize']
-accumulo_monitor_heapsize = config['configurations']['accumulo-env']['accumulo_monitor_heapsize']
-accumulo_gc_heapsize = config['configurations']['accumulo-env']['accumulo_gc_heapsize']
-accumulo_other_heapsize = config['configurations']['accumulo-env']['accumulo_other_heapsize']
-accumulo_monitor_bind_all = config['configurations']['accumulo-env']['accumulo_monitor_bind_all']
-monitor_bind_str = "false"
-if accumulo_monitor_bind_all:
-  monitor_bind_str = "true"
-env_sh_template = config['configurations']['accumulo-env']['content']
-server_env_sh_template = config['configurations']['accumulo-env']['server_content']
-
-# accumulo initialization parameters
-instance_name = config['configurations']['accumulo-env']['accumulo_instance_name']
-instance_secret = config['configurations']['accumulo-env']['instance_secret']
-root_password = config['configurations']['accumulo-env']['accumulo_root_password']
-instance_volumes = config['configurations']['accumulo-site']['instance.volumes']
-parent_dir = instance_volumes[0:instance_volumes.rfind('/')]
-
-# tracer properties
-trace_user = config['configurations']['accumulo-site']['trace.user']
-trace_password = config['configurations']['accumulo-env']['trace_password']
-
-# credential provider
-credential_provider = parent_dir.replace("hdfs://", "jceks://hdfs@") + "/accumulo-site.jceks"
-
-# smoke test
-smoke_test_user = config['configurations']['cluster-env']['smokeuser']
-smoke_test_password = 'smoke'
-smokeuser_principal =  config['configurations']['cluster-env']['smokeuser_principal_name']
-smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-
-# log4j.properties
-log4j_props = config['configurations']['accumulo-log4j']['content']
-audit_log_level = config['configurations']['accumulo-log4j']['audit_log_level']
-monitor_forwarding_log_level = config['configurations']['accumulo-log4j']['monitor_forwarding_log_level']
-debug_log_size = config['configurations']['accumulo-log4j']['debug_log_size']
-debug_num_logs = config['configurations']['accumulo-log4j']['debug_num_logs']
-info_log_size = config['configurations']['accumulo-log4j']['info_log_size']
-info_num_logs = config['configurations']['accumulo-log4j']['info_num_logs']
-
-# metrics2 properties
-ganglia_server_hosts = default('/clusterHostInfo/ganglia_server_hosts', []) # is not passed when ganglia is not present
-ganglia_server_host = '' if len(ganglia_server_hosts) == 0 else ganglia_server_hosts[0]
-
-set_instanceId = "false"
-cluster_name = config["clusterName"]
-
-if 'cluster-env' in config['configurations'] and \
-        'metrics_collector_external_hosts' in config['configurations']['cluster-env']:
-  ams_collector_hosts = config['configurations']['cluster-env']['metrics_collector_external_hosts']
-  set_instanceId = "true"
-else:
-  ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
-has_metric_collector = not len(ams_collector_hosts) == 0
-if has_metric_collector:
-  if 'cluster-env' in config['configurations'] and \
-      'metrics_collector_external_port' in config['configurations']['cluster-env']:
-    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_external_port']
-  else:
-    metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
-    if metric_collector_web_address.find(':') != -1:
-      metric_collector_port = metric_collector_web_address.split(':')[1]
-    else:
-      metric_collector_port = '6188'
-  if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
-    metric_collector_protocol = 'https'
-  else:
-    metric_collector_protocol = 'http'
-  metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
-  metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
-  metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
-  metric_legacy_hadoop_sink = check_stack_feature(StackFeature.AMS_LEGACY_HADOOP_SINK, stack_version_formatted)
-  pass
-metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
-metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
-host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
-host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
-is_aggregation_https_enabled = False
-if default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
-  host_in_memory_aggregation_protocol = 'https'
-  is_aggregation_https_enabled = True
-else:
-  host_in_memory_aggregation_protocol = 'http'
-
-# if accumulo is selected accumulo_tserver_hosts should not be empty, but still default just in case
-if 'slave_hosts' in config['clusterHostInfo']:
-  tserver_hosts = default('/clusterHostInfo/accumulo_tserver_hosts', '/clusterHostInfo/datanode_hosts')
-else:
-  tserver_hosts = default('/clusterHostInfo/accumulo_tserver_hosts', '/clusterHostInfo/all_hosts')
-master_hosts = default('/clusterHostInfo/accumulo_master_hosts', [])
-monitor_hosts = default('/clusterHostInfo/accumulo_monitor_hosts', [])
-gc_hosts = default('/clusterHostInfo/accumulo_gc_hosts', [])
-tracer_hosts = default('/clusterHostInfo/accumulo_tracer_hosts', [])
-hostname = status_params.hostname
-
-# security properties
-accumulo_user_keytab = config['configurations']['accumulo-env']['accumulo_user_keytab']
-accumulo_principal_name = config['configurations']['accumulo-env']['accumulo_principal_name']
-
-# kinit properties
-kinit_path_local = status_params.kinit_path_local
-if security_enabled:
-  bare_accumulo_principal = get_bare_principal(config['configurations']['accumulo-site']['general.kerberos.principal'])
-  kinit_cmd = format("{kinit_path_local} -kt {accumulo_user_keytab} {accumulo_principal_name};")
-  general_kerberos_keytab = config['configurations']['accumulo-site']['general.kerberos.keytab']
-  general_kerberos_principal = config['configurations']['accumulo-site']['general.kerberos.principal'].replace('_HOST', hostname.lower())
-  accumulo_jaas_file = format("{server_conf_dir}/accumulo_jaas.conf")
-else:
-  kinit_cmd = ""
-
-#for create_hdfs_directory
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
-
-
-
-hdfs_site = config['configurations']['hdfs-site']
-default_fs = config['configurations']['core-site']['fs.defaultFS']
-
-dfs_type = default("/clusterLevelParams/dfs_type", "")
-
-# dfs.namenode.https-address
-import functools
-#create partial functions with common arguments for every HdfsResource call
-#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
-HdfsResource = functools.partial(
-  HdfsResource,
-  user=hdfs_user,
-  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
-  security_enabled = security_enabled,
-  keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local,
-  hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir,
-  principal_name = hdfs_principal_name,
-  hdfs_site = hdfs_site,
-  default_fs = default_fs,
-  immutable_paths = get_not_managed_resources(),
-  dfs_type = dfs_type
-)
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/service_check.py
deleted file mode 100644
index e23e18b..0000000
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/service_check.py
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-
-from resource_management.core.resources.system import Execute, File
-from resource_management.core.source import InlineTemplate
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.script.script import Script
-
-class AccumuloServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    smokeuser_kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};") if params.security_enabled else ""
-
-    cmdfile = format("{exec_tmp_dir}/cmds")
-    try:
-      File(cmdfile,
-           mode=0600,
-           group=params.user_group,
-           owner=params.smoke_test_user,
-           content=InlineTemplate('createtable testtable\n'
-                                  'insert row cf cq val\n'
-                                  'scan\n'
-                                  'flush -w\n'
-                                  'scan\n'
-                                  'deletetable -f testtable\n\n')
-      )
-      if params.security_enabled and params.has_secure_user_auth:
-        cmd = format("{smokeuser_kinit_cmd} "
-                        "{client_script} shell -f {cmdfile}")
-      else:
-        cmd = format("{client_script} shell -u {smoke_test_user} "
-                        "-p {smoke_test_password} -f {cmdfile}")
-      Execute(cmd,
-              timeout=120,
-              user=params.smoke_test_user,
-              logoutput=True)
-    finally:
-      try_remove(cmdfile)
-
-def try_remove(file):
-  try:
-    os.remove(file)
-  except:
-    pass
-
-if __name__ == "__main__":
-  AccumuloServiceCheck().execute()
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/status_params.py
deleted file mode 100644
index 9baee3c..0000000
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/scripts/status_params.py
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions.default import default
-from resource_management.libraries.functions import get_kinit_path
-from resource_management.libraries.script.script import Script
-
-# a map of the Ambari role to the component name
-# for use with <stack-root>/current/<component>
-SERVER_ROLE_DIRECTORY_MAP = {
-  'ACCUMULO_MASTER' : 'accumulo-master',
-  'ACCUMULO_MONITOR' : 'accumulo-monitor',
-  'ACCUMULO_GC' : 'accumulo-gc',
-  'ACCUMULO_TRACER' : 'accumulo-tracer',
-  'ACCUMULO_TSERVER' : 'accumulo-tablet',
-  'ACCUMULO_CLIENT' : 'accumulo-client'
-}
-
-component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "ACCUMULO_CLIENT")
-
-config = Script.get_config()
-stack_root = Script.get_stack_root()
-
-conf_dir = format('{stack_root}/current/{component_directory}/conf')
-server_conf_dir = format('{conf_dir}/server')
-pid_dir = config['configurations']['accumulo-env']['accumulo_pid_dir']
-accumulo_user = config['configurations']['accumulo-env']['accumulo_user']
-
-# Security related/required params
-hostname = config['agentLevelParams']['hostname']
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
-tmp_dir = Script.get_tmp_dir()
-
-# stack name
-stack_name = default("/clusterLevelParams/stack_name", None)
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/accumulo_jaas.conf.j2 b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/accumulo_jaas.conf.j2
deleted file mode 100644
index 1ac5cea..0000000
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/accumulo_jaas.conf.j2
+++ /dev/null
@@ -1,29 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-com.sun.security.jgss.krb5.initiate {
-com.sun.security.auth.module.Krb5LoginModule required
-renewTGT=false
-doNotPrompt=true
-useKeyTab=true
-storeKey=true
-useTicketCache=false
-debug=true
-keyTab="{{general_kerberos_keytab}}"
-principal="{{general_kerberos_principal}}";
-};
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/auditLog.xml.j2 b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/auditLog.xml.j2
deleted file mode 100644
index 020bc1d..0000000
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/auditLog.xml.j2
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<!DOCTYPE log4j:configuration SYSTEM "log4j.dtd">
-<log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/">
-
-    <!--  Write out Audit info to an Audit file -->
-    <appender name="Audit" class="org.apache.log4j.DailyRollingFileAppender">
-        <param name="File"           value="${org.apache.accumulo.core.dir.log}/${org.apache.accumulo.core.ip.localhost.hostname}.audit"/>
-        <param name="MaxBackupIndex" value="10"/>
-        <param name="DatePattern" value="'.'yyyy-MM-dd"/>
-        <layout class="org.apache.log4j.PatternLayout">
-            <param name="ConversionPattern" value="%d{yyyy-MM-dd HH:mm:ss,SSS/Z} [%c{2}] %-5p: %m%n"/>
-        </layout>
-    </appender>
-    <logger name="Audit"  additivity="false">
-        <appender-ref ref="Audit" />
-        <level value="{{audit_log_level}}"/>
-    </logger>
-
-</log4j:configuration>
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/gc.j2 b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/gc.j2
deleted file mode 100644
index 1f55ff5..0000000
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/gc.j2
+++ /dev/null
@@ -1,20 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{% for host in gc_hosts %}{{host}}
-{% endfor %}
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/generic_logger.xml.j2 b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/generic_logger.xml.j2
deleted file mode 100644
index c2bb323..0000000
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/generic_logger.xml.j2
+++ /dev/null
@@ -1,83 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<!DOCTYPE log4j:configuration SYSTEM "log4j.dtd">
-<log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/">
-
-  <!-- Write out everything at the DEBUG level to the debug log -->
-  <appender name="A2" class="org.apache.log4j.RollingFileAppender">
-     <param name="File"           value="${org.apache.accumulo.core.dir.log}/${org.apache.accumulo.core.application}_${org.apache.accumulo.core.ip.localhost.hostname}.debug.log"/>
-     <param name="MaxFileSize"    value="{{debug_log_size}}"/>
-     <param name="MaxBackupIndex" value="{{debug_num_logs}}"/>
-     <param name="Threshold"      value="DEBUG"/>
-     <layout class="org.apache.log4j.PatternLayout">
-       <param name="ConversionPattern" value="%d{ISO8601} [%-8c{2}] %-5p: %m%n"/>
-     </layout>
-  </appender>
-
-  <!--  Write out INFO and higher to the regular log -->
-  <appender name="A3" class="org.apache.log4j.RollingFileAppender">
-     <param name="File"           value="${org.apache.accumulo.core.dir.log}/${org.apache.accumulo.core.application}_${org.apache.accumulo.core.ip.localhost.hostname}.log"/>
-     <param name="MaxFileSize"    value="{{info_log_size}}"/>
-     <param name="MaxBackupIndex" value="{{info_num_logs}}"/>
-     <param name="Threshold"      value="INFO"/>
-     <layout class="org.apache.log4j.PatternLayout">
-       <param name="ConversionPattern" value="%d{ISO8601} [%-8c{2}] %-5p: %m%n"/>
-     </layout>
-  </appender>
-
-  <!-- Send all logging data to a centralized logger -->
-  <appender name="N1" class="org.apache.log4j.net.SocketAppender">
-     <param name="remoteHost"     value="${org.apache.accumulo.core.host.log}"/>
-     <param name="port"           value="${org.apache.accumulo.core.host.log.port}"/>
-     <param name="application"    value="${org.apache.accumulo.core.application}:${org.apache.accumulo.core.ip.localhost.hostname}"/>
-     <param name="Threshold"      value="{{monitor_forwarding_log_level}}"/>
-  </appender>
-
-  <!--  If the centralized logger is down, buffer the log events, but drop them if it stays down -->
-  <appender name="ASYNC" class="org.apache.log4j.AsyncAppender">
-     <appender-ref ref="N1" />
-  </appender>
-
-  <!-- Log accumulo events to the debug, normal and remote logs. -->
-  <logger name="org.apache.accumulo" additivity="false">
-     <level value="DEBUG"/>
-     <appender-ref ref="A2" />
-     <appender-ref ref="A3" />
-     <appender-ref ref="ASYNC" />
-  </logger>
-
-  <logger name="org.apache.accumulo.core.file.rfile.bcfile">
-     <level value="INFO"/>
-  </logger>
-
-  <logger name="org.mortbay.log">
-     <level value="WARN"/>
-  </logger>
-
-  <logger name="org.apache.zookeeper">
-     <level value="ERROR"/>
-  </logger>
-
-  <!-- Log non-accumulo events to the debug and normal logs. -->
-  <root>
-     <level value="INFO"/>
-     <appender-ref ref="A2" />
-     <appender-ref ref="A3" />
-  </root>
-
-</log4j:configuration>
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/hadoop-metrics2-accumulo.properties.j2 b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/hadoop-metrics2-accumulo.properties.j2
deleted file mode 100644
index 13def8e..0000000
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/hadoop-metrics2-accumulo.properties.j2
+++ /dev/null
@@ -1,66 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Poll collectors every {{metrics_report_interval}} seconds
-*.period={{metrics_collection_period}}
-
-*.host_in_memory_aggregation = {{host_in_memory_aggregation}}
-*.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
-{% if is_aggregation_https_enabled %}
-*.host_in_memory_aggregation_protocol = {{host_in_memory_aggregation_protocol}}
-{% endif %}
-
-{% if has_metric_collector %}
-
-{% if metric_legacy_hadoop_sink %}
-*.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink-legacy.jar
-{% else %}
-*.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
-{% endif %}
-*.sink.timeline.slave.host.name={{hostname}}
-accumulo.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
-accumulo.period={{metrics_collection_period}}
-accumulo.collector.hosts={{ams_collector_hosts}}
-accumulo.port={{metric_collector_port}}
-
-jvm.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
-jvm.period={{metrics_collection_period}}
-jvm.collector.hosts={{ams_collector_hosts}}
-jvm.port={{metric_collector_port}}
-
-rpc.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
-rpc.period={{metrics_collection_period}}
-rpc.collector.hosts={{ams_collector_hosts}}
-rpc.port={{metric_collector_port}}
-
-accumulo.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
-accumulo.sink.timeline.period={{metrics_collection_period}}
-accumulo.sink.timeline.sendInterval={{metrics_report_interval}}000
-accumulo.sink.timeline.collector.hosts={{ams_collector_hosts}}
-accumulo.sink.timeline.port={{metric_collector_port}}
-
-# HTTPS properties
-accumulo.sink.timeline.truststore.path = {{metric_truststore_path}}
-accumulo.sink.timeline.truststore.type = {{metric_truststore_type}}
-accumulo.sink.timeline.truststore.password = {{metric_truststore_password}}
-
-{% else %}
-
-accumulo.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
-accumulo.sink.ganglia.period=10
-accumulo.sink.ganglia.servers={{ganglia_server_host}}:8666
-
-{% endif %}
-
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/masters.j2 b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/masters.j2
deleted file mode 100644
index e0f7fa3..0000000
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/masters.j2
+++ /dev/null
@@ -1,20 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{% for host in master_hosts %}{{host}}
-{% endfor %}
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/monitor.j2 b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/monitor.j2
deleted file mode 100644
index 9281157..0000000
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/monitor.j2
+++ /dev/null
@@ -1,20 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{% for host in monitor_hosts %}{{host}}
-{% endfor %}
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/monitor_logger.xml.j2 b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/monitor_logger.xml.j2
deleted file mode 100644
index 952a9d2..0000000
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/monitor_logger.xml.j2
+++ /dev/null
@@ -1,64 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<!DOCTYPE log4j:configuration SYSTEM "log4j.dtd">
-<log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/">
-
-  <!-- Write out everything at the DEBUG level to the debug log -->
-  <appender name="A2" class="org.apache.log4j.RollingFileAppender">
-     <param name="File"           value="${org.apache.accumulo.core.dir.log}/${org.apache.accumulo.core.application}_${org.apache.accumulo.core.ip.localhost.hostname}.debug.log"/>
-     <param name="MaxFileSize"    value="100MB"/>
-     <param name="MaxBackupIndex" value="10"/>
-     <param name="Threshold"      value="DEBUG"/>
-     <layout class="org.apache.log4j.PatternLayout">
-       <param name="ConversionPattern" value="%d{ISO8601} [%-8c{2}] %-5p: %X{application} %m%n"/>
-     </layout>
-  </appender>
-
-  <!--  Write out INFO and higher to the regular log -->
-  <appender name="A3" class="org.apache.log4j.RollingFileAppender">
-     <param name="File"           value="${org.apache.accumulo.core.dir.log}/${org.apache.accumulo.core.application}_${org.apache.accumulo.core.ip.localhost.hostname}.log"/>
-     <param name="MaxFileSize"    value="100MB"/>
-     <param name="MaxBackupIndex" value="10"/>
-     <param name="Threshold"      value="INFO"/>
-     <layout class="org.apache.log4j.PatternLayout">
-       <param name="ConversionPattern" value="%d{ISO8601} [%-8c{2}] %-5p: %X{application} %m%n"/>
-     </layout>
-  </appender>
-
-  <!-- Keep the last few log messages for display to the user -->
-  <appender name="GUI" class="org.apache.accumulo.server.monitor.LogService">
-     <param name="keep"      value="40"/>
-     <param name="Threshold" value="{{monitor_forwarding_log_level}}"/>
-  </appender>
-
-  <!-- Log accumulo messages to debug, normal and GUI -->
-  <logger name="org.apache.accumulo" additivity="false">
-     <level value="DEBUG"/>
-     <appender-ref ref="A2" />
-     <appender-ref ref="A3" />
-     <appender-ref ref="GUI" />
-  </logger>
-
-  <!-- Log non-accumulo messages to debug, normal logs. -->
-  <root>
-     <level value="INFO"/>
-     <appender-ref ref="A2" />
-     <appender-ref ref="A3" />
-  </root>
-
-</log4j:configuration>
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/slaves.j2 b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/slaves.j2
deleted file mode 100644
index 5473d6e..0000000
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/slaves.j2
+++ /dev/null
@@ -1,20 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{% for host in tserver_hosts %}{{host}}
-{% endfor %}
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/tracers.j2 b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/tracers.j2
deleted file mode 100644
index 73f5356..0000000
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/package/templates/tracers.j2
+++ /dev/null
@@ -1,20 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{% for host in tracer_hosts %}{{host}}
-{% endfor %}
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/role_command_order.json b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/role_command_order.json
deleted file mode 100644
index c06aba1..0000000
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/role_command_order.json
+++ /dev/null
@@ -1,14 +0,0 @@
-{
-  "general_deps" : {
-    "_comment" : "dependencies for ACCUMUMLO",
-    "ACCUMULO_MASTER-START": ["ZOOKEEPER_SERVER-START", "NAMENODE-START", "DATANODE-START"],
-    "ACCUMULO_TSERVER-START": ["ACCUMULO_MASTER-START"],
-    "ACCUMULO_MONITOR-START": ["ACCUMULO_MASTER-START"],
-    "ACCUMULO_GC-START": ["ACCUMULO_MASTER-START"],
-    "ACCUMULO_TRACER-START": ["ACCUMULO_MASTER-START", "ACCUMULO_TSERVER-START"],
-    "ACCUMULO_MONITOR-STOP" : ["ACCUMULO_MASTER-STOP"],
-    "ACCUMULO_MASTER-STOP" : ["ACCUMULO_TSERVER-STOP"],
-    "ACCUMULO_TSERVER-STOP" : ["ACCUMULO_TRACER-STOP", "ACCUMULO_GC-STOP"],
-    "ACCUMULO_SERVICE_CHECK-SERVICE_CHECK": ["ACCUMULO_MASTER-START", "ACCUMULO_TSERVER-START", "ACCUMULO_TRACER-START"]
-  }
-}
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/themes/credentials.json b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/themes/credentials.json
deleted file mode 100644
index f05f06c..0000000
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/themes/credentials.json
+++ /dev/null
@@ -1,54 +0,0 @@
-{
-  "name": "credentials",
-  "configuration": {
-    "placement": {
-      "configs": [
-        {
-          "config": "accumulo-env/instance_secret",
-          "subsection-name": "subsection-accumulo-instance-secret"
-        },
-        {
-          "config": "accumulo-env/accumulo_instance_name",
-          "subsection-name": "subsection-accumulo-instance-secret"
-        },
-        {
-          "config": "accumulo-env/accumulo_root_password",
-          "subsection-name": "subsection-accumulo-root"
-        },
-        {
-          "config": "accumulo-env/accumulo_instance_name",
-          "subsection-name": "subsection-accumulo-root"
-        }
-      ],
-      "configuration-layout": "credentials"
-    },
-    "widgets": [],
-    "layouts": [
-      {
-        "name": "credentials",
-        "tabs": [
-          {
-            "name": "credentials",
-            "layout": {
-              "sections": [
-                {
-                  "subsections": [
-                    {
-                      "name": "subsection-accumulo-root",
-                      "display-name": "Accumulo Root"
-                    },
-                    {
-                      "name": "subsection-accumulo-instance-secret",
-                      "display-name": "Accumulo Instance Secret"
-                    }
-                  ],
-                  "name": "credentials"
-                }
-              ]
-            }
-          }
-        ]
-      }
-    ]
-  }
-}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/themes/directories.json b/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/themes/directories.json
deleted file mode 100644
index 2e4c1d7..0000000
--- a/ambari-server/src/main/resources/common-services/ACCUMULO/1.6.1.2.2.0/themes/directories.json
+++ /dev/null
@@ -1,88 +0,0 @@
-{
-  "name": "directories",
-  "description": "Directories theme for ACCUMULO service",
-  "configuration": {
-    "layouts": [
-      {
-        "name": "directories",
-        "tabs": [
-          {
-            "name": "directories",
-            "display-name": "Directories",
-            "layout": {
-              "tab-columns": "1",
-              "tab-rows": "4",
-              "sections": [
-                {
-                  "name": "subsection-log-dirs",
-                  "display-name": "LOG DIRS",
-                  "row-index": "0",
-                  "column-index": "0",
-                  "row-span": "1",
-                  "column-span": "1",
-                  "section-columns": "1",
-                  "section-rows": "1",
-                  "subsections": [
-                    {
-                      "name": "subsection-log-dirs",
-                      "row-index": "0",
-                      "column-index": "0",
-                      "row-span": "1",
-                      "column-span": "1"
-                    }
-                  ]
-                },
-                {
-                  "name": "subsection-pid-dirs",
-                  "display-name": "PID DIRS",
-                  "row-index": "1",
-                  "column-index": "0",
-                  "row-span": "1",
-                  "column-span": "1",
-                  "section-columns": "1",
-                  "section-rows": "1",
-                  "subsections": [
-                    {
-                      "name": "subsection-pid-dirs",
-                      "row-index": "0",
-                      "column-index": "0",
-                      "row-span": "1",
-                      "column-span": "1"
-                    }
-                  ]
-                }
-              ]
-            }
-          }
-        ]
-      }
-    ],
-    "placement": {
-      "configuration-layout": "default",
-      "configs": [
-        {
-          "config": "accumulo-env/accumulo_log_dir",
-          "subsection-name": "subsection-log-dirs"
-        },
-        {
-          "config": "accumulo-env/accumulo_pid_dir",
-          "subsection-name": "subsection-pid-dirs"
-        }
-      ]
-    },
-    "widgets": [
-      {
-        "config": "accumulo-env/accumulo_log_dir",
-        "widget": {
-          "type": "text-field"
-        }
-      },
-      {
-        "config": "accumulo-env/accumulo_pid_dir",
-        "widget": {
-          "type": "text-field"
-        }
-      }
-    ]
-  }
-}
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/alerts.json b/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/alerts.json
deleted file mode 100644
index 55c21b6..0000000
--- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/alerts.json
+++ /dev/null
@@ -1,37 +0,0 @@
-{
-  "AMBARI_INFRA_SOLR": {
-    "INFRA_SOLR": [
-      {
-        "name": "infra_solr",
-        "label": "Infra Solr Web UI",
-        "description": "This host-level alert is triggered if the Solr Cloud Instance is unreachable.",
-        "interval": 1,
-        "scope": "ANY",
-        "source": {
-          "type": "WEB",
-          "uri": {
-            "http": "{{infra-solr-env/infra_solr_port}}",
-            "https": "{{infra-solr-env/infra_solr_port}}",
-            "https_property": "{{infra-solr-env/infra_solr_ssl_enabled}}",
-            "https_property_value": "true",
-            "connection_timeout": 5.0,
-            "kerberos_keytab": "{{infra-solr-env/infra_solr_web_kerberos_keytab}}",
-            "kerberos_principal": "{{infra-solr-env/infra_solr_web_kerberos_principal}}",
-            "default_port": 8886
-          },
-          "reporting": {
-            "ok": {
-              "text": "HTTP {0} response in {2:.3f}s"
-            },
-            "warning": {
-              "text": "HTTP {0} response from {1} in {2:.3f}s ({3})"
-            },
-            "critical": {
-              "text": "Connection failed to {1} ({3})"
-            }
-          }
-        }
-      }
-    ]
-  }
-}
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/configuration/infra-solr-client-log4j.xml b/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/configuration/infra-solr-client-log4j.xml
deleted file mode 100644
index 024c950..0000000
--- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/configuration/infra-solr-client-log4j.xml
+++ /dev/null
@@ -1,68 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<!-- This is a special config file for properties used to monitor status of the service -->
-<configuration supports_adding_forbidden="true">
-  <!-- log4j.xml -->
-  <property>
-     <name>infra_client_log_maxfilesize</name>
-      <value>80</value>
-    <description>The maximum size of backup file before the log is rotated</description>
-    <display-name>Ambari-Infra Solr Client Log: backup file size</display-name>
-    <value-attributes>
-      <unit>MB</unit>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-   </property>
-   <property>
-    <name>infra_client_log_maxbackupindex</name>
-    <value>60</value>
-    <description>The number of backup files</description>
-    <display-name>Ambari-Infra Solr Client Log: # of backup files</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>infra_solr_client_log_dir</name>
-    <value>/var/log/ambari-infra-solr-client</value>
-    <description>Directory for Solr client logs</description>
-    <display-name>Infra Solr Client log dir</display-name>
-    <value-attributes>
-      <type>directory</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>content</name>
-    <display-name>log4j template</display-name>
-    <description>This is the jinja template for log4j.properties file for infra solr client</description>
-    <value/>
-    <property-type>VALUE_FROM_PROPERTY_FILE</property-type>
-    <value-attributes>
-      <property-file-name>solr-client-log4j.properties.j2</property-file-name>
-      <property-file-type>text</property-file-type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/configuration/infra-solr-env.xml b/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/configuration/infra-solr-env.xml
deleted file mode 100644
index afe17a2..0000000
--- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/configuration/infra-solr-env.xml
+++ /dev/null
@@ -1,334 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<!-- This is a special config file for properties used to monitor status of the service -->
-<configuration supports_adding_forbidden="true">
-  <property>
-    <name>infra_solr_port</name>
-    <value>8886</value>
-    <description>Solr port</description>
-    <display-name>Infra Solr port</display-name>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>infra_solr_jmx_port</name>
-    <value>18886</value>
-    <description>Solr JMX port</description>
-    <display-name>Infra Solr JMX port</display-name>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>infra_solr_pid_dir</name>
-    <value>/var/run/ambari-infra-solr</value>
-    <description>Solr Process ID Directory</description>
-    <display-name>Infra Solr pid dir</display-name>
-    <value-attributes>
-      <type>directory</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>infra_solr_log_dir</name>
-    <value>/var/log/ambari-infra-solr</value>
-    <description>Directory for Solr logs</description>
-    <display-name>Infra Solr log dir</display-name>
-    <value-attributes>
-      <type>directory</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>infra_solr_user</name>
-    <value>infra-solr</value>
-    <property-type>USER</property-type>
-    <description>Solr user</description>
-    <display-name>Infra Solr User</display-name>
-    <value-attributes>
-      <type>user</type>
-      <overridable>false</overridable>
-      <user-groups>
-        <property>
-          <type>cluster-env</type>
-          <name>user_group</name>
-        </property>
-      </user-groups>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>infra_solr_datadir</name>
-    <value>/var/lib/ambari-infra-solr/data</value>
-    <display-name>Infra Solr data dir</display-name>
-    <description>Directory for storting Solr index. Make sure you have enough disk space</description>
-    <value-attributes>
-      <type>directory</type>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>infra_solr_ssl_enabled</name>
-    <value>false</value>
-    <display-name>Enable SSL to Infra Solr</display-name>
-    <description>Enable ssl to Solr</description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>infra_solr_truststore_location</name>
-    <value>/etc/security/serverKeys/infra.solr.trustStore.jks</value>
-    <display-name>Infra Solr trust store location</display-name>
-    <description>Location of the trust store file. (default value is not generated)</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>infra_solr_truststore_type</name>
-    <value>jks</value>
-    <display-name>Infra Solr trust store type</display-name>
-    <description>Type of the trust store file.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>infra_solr_truststore_password</name>
-    <value>bigdata</value>
-    <property-type>PASSWORD</property-type>
-    <display-name>Infra Solr trust store password</display-name>
-    <description>Password to open the trust store file.</description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>infra_solr_keystore_location</name>
-    <value>/etc/security/serverKeys/infra.solr.keyStore.jks</value>
-    <display-name>Infra Solr key store location</display-name>
-    <description>Location of the key store file. (default value is not generated)</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>infra_solr_keystore_type</name>
-    <value>jks</value>
-    <display-name>Infra Solr key store type</display-name>
-    <description>Type of the key store file.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>infra_solr_keystore_password</name>
-    <value>bigdata</value>
-    <display-name>Infra Solr key store password</display-name>
-    <property-type>PASSWORD</property-type>
-    <description>Password to open the key store file.</description>
-    <value-attributes>
-      <type>password</type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>infra_solr_znode</name>
-    <value>/infra-solr</value>
-    <description>Zookeeper znode, e.g: /ambari-solr</description>
-    <display-name>Infra Solr ZNode</display-name>
-    <value-attributes>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>infra_solr_minmem</name>
-    <value>1024</value>
-    <display-name>Infra Solr Minimum Heap Size</display-name>
-    <description>Solr minimum heap size e.g. 512m</description>
-    <value-attributes>
-      <type>int</type>
-      <minimum>512</minimum>
-      <maximum>32768</maximum>
-      <unit>MB</unit>
-      <increment-step>256</increment-step>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>infra_solr_maxmem</name>
-    <value>2048</value>
-    <display-name>Infra Solr Maximum Heap Size</display-name>
-    <description>Solr maximum heap size e.g. 512m</description>
-    <value-attributes>
-      <type>int</type>
-      <minimum>512</minimum>
-      <maximum>32768</maximum>
-      <unit>MB</unit>
-      <increment-step>256</increment-step>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-  <property>
-    <name>infra_solr_java_stack_size</name>
-    <value>1</value>
-    <display-name>Infra Solr Java Stack Size</display-name>
-    <description>Java Stack Size of Infra Solr (-Xss) in MB.</description>
-    <value-attributes>
-      <type>int</type>
-      <minimum>1</minimum>
-      <maximum>128</maximum>
-      <unit>MB</unit>
-      <increment-step>1</increment-step>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>infra_solr_jmx_enabled</name>
-    <value>false</value>
-    <display-name>Enable JMX</display-name>
-    <description>Set to true to activate the JMX RMI connector to allow remote JMX client applications to monitor the JVM hosting Solr
-    </description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>infra_solr_kerberos_keytab</name>
-    <value>/etc/security/keytabs/infra_solr.service.keytab</value>
-    <display-name>Infra Solr keytab</display-name>
-    <description>The path to the Kerberos Keytab file containing service principal of the Infra Solr.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <property>
-    <name>infra_solr_kerberos_principal</name>
-    <value>infra-solr</value>
-    <display-name>Infra Solr principal</display-name>
-    <description>The service principal for Infra Solr.</description>
-    <property-type>KERBEROS_PRINCIPAL</property-type>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <property>
-    <name>infra_solr_web_kerberos_keytab</name>
-    <value>/etc/security/keytabs/spnego.service.keytab</value>
-    <display-name>Infra Solr Http keytab</display-name>
-    <description>The path to the Kerberos Keytab file containing service principal of the Infra Solr.</description>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <property>
-    <name>infra_solr_web_kerberos_principal</name>
-    <value>HTTP/_HOST@EXAMPLE.COM</value>
-    <display-name>Infra Solr Http principal</display-name>
-    <description>The service principal for the Infra Solr.</description>
-    <property-type>KERBEROS_PRINCIPAL</property-type>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <property>
-    <name>infra_solr_zookeeper_quorum</name>
-    <value>{zookeeper_quorum}</value>
-    <display-name>Infra Solr Znode</display-name>
-    <description>Placeholder for Infra Solr Zookeeper connection string. (Use the cluster one by default, you can override this with a custom one if ZK needs to be external)</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-
-  <property>
-    <name>infra_solr_zookeeper_external_principal</name>
-    <value>zookeeper/_HOST@EXAMPLE.COM</value>
-    <display-name>External ZK principal</display-name>
-    <description>The kerberos service principal name for external ZooKeeper.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-
-  <property>
-    <name>infra_solr_zookeeper_external_enabled</name>
-    <value>false</value>
-    <display-name>Enable External ZK</display-name>
-    <description>Enable external ZooKeeper. If the Solr is secure, the external ZK should be secure as well.</description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-
-  <property>
-    <name>infra_solr_kerberos_name_rules</name>
-    <value>DEFAULT</value>
-    <display-name>Infra Solr Kerberos name rules</display-name>
-    <description>Kerberos name rules for Spnego</description>
-    <value-attributes>
-      <overridable>false</overridable>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-
-  <property>
-    <name>infra_solr_user_nofile_limit</name>
-    <value>128000</value>
-    <description>Max open files limit setting for infra-solr user.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>infra_solr_user_nproc_limit</name>
-    <value>65536</value>
-    <description>Max number of processes limit setting for infra-solr user.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-
-  <property>
-    <name>infra_solr_extra_java_opts</name>
-    <value></value>
-    <display-name>Infra Solr extra java options</display-name>
-    <description>Extra Solr java options (e.g.: -Dproperty=value), that will be added to SOLR_OPTS environment variable</description>
-    <value-attributes>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-
-  <property>
-    <name>infra_solr_gc_log_opts</name>
-    <value>-verbose:gc -XX:+PrintHeapAtGC -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+PrintTenuringDistribution -XX:+PrintGCApplicationStoppedTime -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=15 -XX:GCLogFileSize=200M</value>
-    <display-name>Infra Solr GC log options</display-name>
-    <description>Infra Solr GC log options</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>infra_solr_gc_tune</name>
-    <value>-XX:NewRatio=3 -XX:SurvivorRatio=4 -XX:TargetSurvivorRatio=90 -XX:MaxTenuringThreshold=8 -XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:ConcGCThreads=4 -XX:ParallelGCThreads=4 -XX:+CMSScavengeBeforeRemark -XX:PretenureSizeThreshold=64m -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=50 -XX:CMSMaxAbortablePrecleanTime=6000 -XX:+CMSParallelRemarkEnabled -XX:+ParallelRefProcEnabled</value>
-    <display-name>Infra Solr GC Tune</display-name>
-    <description>Infra Solr GC Tune</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-
-  <!-- infra-solr-env.sh -->
-  <property>
-    <name>content</name>
-    <display-name>infra-solr-env template</display-name>
-    <description>This is the jinja template for infra-solr-env.sh file</description>
-    <value/>
-    <property-type>VALUE_FROM_PROPERTY_FILE</property-type>
-    <value-attributes>
-      <property-file-name>infra-solr-env.sh.j2</property-file-name>
-      <property-file-type>text</property-file-type>
-    </value-attributes>
-    <on-ambari-upgrade add="false" update="true"/>
-  </property>
-</configuration>
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/configuration/infra-solr-log4j.xml b/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/configuration/infra-solr-log4j.xml
deleted file mode 100644
index e797b37..0000000
--- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/configuration/infra-solr-log4j.xml
+++ /dev/null
@@ -1,56 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_adding_forbidden="true">
-  <property>
-    <name>infra_log_maxfilesize</name>
-    <value>10</value>
-    <description>The maximum size of backup file before the log is rotated</description>
-    <display-name>Ambari Infra Log: backup file size</display-name>
-    <value-attributes>
-      <unit>MB</unit>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>infra_log_maxbackupindex</name>
-    <value>9</value>
-    <description>The number of backup files</description>
-    <display-name>Ambari Infra Log: # of backup files</display-name>
-    <value-attributes>
-      <type>int</type>
-      <minimum>0</minimum>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>content</name>
-    <display-name>infra-solr-log4j template</display-name>
-    <description>This is the jinja template for log4j.properties</description>
-    <value/>
-    <property-type>VALUE_FROM_PROPERTY_FILE</property-type>
-    <value-attributes>
-      <property-file-name>solr-log4j.properties.j2</property-file-name>
-      <property-file-type>text</property-file-type>
-    </value-attributes>
-    <on-ambari-upgrade add="false"/>
-  </property>
-</configuration>
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/configuration/infra-solr-security-json.xml b/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/configuration/infra-solr-security-json.xml
deleted file mode 100644
index 73b74e1..0000000
--- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/configuration/infra-solr-security-json.xml
+++ /dev/null
@@ -1,153 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<!-- This is a special config file for properties used to monitor status of the service -->
-<configuration supports_adding_forbidden="true">
-  <property>
-    <name>infra_solr_ranger_audit_service_users</name>
-    <display-name>Ranger audit service users</display-name>
-    <value>{default_ranger_audit_users}</value>
-    <description>
-      List of comma separated kerberos service users who can write into ranger audit collections if the cluster is
-      secure. (atlas and rangeradmin supported by default)
-      Change values in that case of custom values are used for kerberos principals. (default_ranger_audit_users is
-      resolved ranger-*-audit/xasecure.audit.jaas.Client.option.principal,
-      by default namenode, hbase, hive knox, kafka, ranger kms and nifi are supported, to change it you can edit the
-      security content,
-      or add a new username next to the default value, e.g.: {default_ranger_audit_users},customuser)
-    </description>
-    <depends-on>
-      <property>
-        <type>ranger-hdfs-audit</type>
-        <name>xasecure.audit.jaas.Client.option.principal</name>
-      </property>
-      <property>
-        <type>ranger-hbase-audit</type>
-        <name>xasecure.audit.jaas.Client.option.principal</name>
-      </property>
-      <property>
-        <type>ranger-hive-audit</type>
-        <name>xasecure.audit.jaas.Client.option.principal</name>
-      </property>
-      <property>
-        <type>ranger-knox-audit</type>
-        <name>xasecure.audit.jaas.Client.option.principal</name>
-      </property>
-      <property>
-        <type>ranger-kafka-audit</type>
-        <name>xasecure.audit.jaas.Client.option.principal</name>
-      </property>
-      <property>
-        <type>ranger-kms-audit</type>
-        <name>xasecure.audit.jaas.Client.option.principal</name>
-      </property>
-      <property>
-        <type>ranger-storm-audit</type>
-        <name>xasecure.audit.jaas.Client.option.principal</name>
-      </property>
-      <property>
-        <type>ranger-yarn-audit</type>
-        <name>xasecure.audit.jaas.Client.option.principal</name>
-      </property>
-      <property>
-        <type>ranger-nifi-audit</type>
-        <name>xasecure.audit.jaas.Client.option.principal</name>
-      </property>
-      <property>
-        <type>application-properties</type>
-        <name>atlas.authentication.principal</name>
-      </property>
-      <property>
-        <type>ranger-admin-site</type>
-        <name>ranger.admin.kerberos.principal</name>
-      </property>
-    </depends-on>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>infra_solr_role_ranger_admin</name>
-    <display-name>Ranger admin role</display-name>
-    <value>ranger_admin_user</value>
-    <description>Ranger admin role, it allows users to create collection, and perform any action on ranger audit collection.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>infra_solr_role_ranger_audit</name>
-    <display-name>Ranger audit role</display-name>
-    <value>ranger_audit_user</value>
-    <description>Ranger audit role, it allows users to perform any action on ranger audit collection.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>infra_solr_role_atlas</name>
-    <display-name>Atlas role</display-name>
-    <value>atlas_user</value>
-    <description>Atlas role, it allows users to create collection, and perform any action on atlas collections.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>infra_solr_role_logsearch</name>
-    <display-name>Log Search role</display-name>
-    <value>logsearch_user</value>
-    <description>Log Search role, it allows users to create collection, and perform any action on Log Search collections.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>infra_solr_role_logfeeder</name>
-    <display-name>Log Feeder role</display-name>
-    <value>logfeeder_user</value>
-    <description>Log Feeder role, it allows users to perform any action on Log Search collections.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>infra_solr_role_dev</name>
-    <display-name>Dev role</display-name>
-    <value>dev</value>
-    <description>Dev role, it allows to perform any read action on any collection.</description>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>infra_solr_security_manually_managed</name>
-    <value>false</value>
-    <display-name>Manually Managed</display-name>
-    <description>Manage /security.json manually (Service start wont override /security.json)</description>
-    <value-attributes>
-      <type>boolean</type>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-  <property>
-    <name>content</name>
-    <display-name>Custom security.json template</display-name>
-    <description>
-      This is the jinja template for custom security.json file on the solr znode
-      (only used if the cluster is secure and this property overrides the security.json which generated during solr
-      start).
-    </description>
-    <value/>
-    <value-attributes>
-      <type>content</type>
-      <show-property-name>false</show-property-name>
-      <empty-value-valid>true</empty-value-valid>
-    </value-attributes>
-    <on-ambari-upgrade add="true"/>
-  </property>
-</configuration>
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/configuration/infra-solr-xml.xml b/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/configuration/infra-solr-xml.xml
deleted file mode 100644
index a7c8b3e..0000000
--- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/configuration/infra-solr-xml.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration supports_adding_forbidden="true">
-  <property>
-    <name>content</name>
-    <display-name>infra-solr-xml template</display-name>
-    <description>This is the jinja template for Ambari Infrastructure solr.xml file</description>
-    <value/>
-    <property-type>VALUE_FROM_PROPERTY_FILE</property-type>
-    <value-attributes>
-      <property-file-name>solr.xml.j2</property-file-name>
-      <property-file-type>xml</property-file-type>
-    </value-attributes>
-    <on-ambari-upgrade add="false" update="true"/>
-  </property>
-</configuration>
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/kerberos.json b/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/kerberos.json
deleted file mode 100644
index cebf479..0000000
--- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/kerberos.json
+++ /dev/null
@@ -1,53 +0,0 @@
-{
-  "services": [
-    {
-      "name": "AMBARI_INFRA_SOLR",
-      "identities": [
-        {
-          "name": "ambari_infra_smokeuser",
-          "reference": "/smokeuser"
-        },
-        {
-          "name": "ambari_infra_spnego",
-          "reference": "/spnego",
-          "principal": {
-            "configuration": "infra-solr-env/infra_solr_web_kerberos_principal"
-          },
-          "keytab": {
-            "configuration": "infra-solr-env/infra_solr_web_kerberos_keytab"
-          }
-        }
-      ],
-      "components": [
-        {
-          "name": "INFRA_SOLR",
-          "identities": [
-            {
-              "name": "infra-solr",
-              "principal": {
-                "value": "infra-solr/_HOST@${realm}",
-                "type": "service",
-                "configuration": "infra-solr-env/infra_solr_kerberos_principal"
-              },
-              "keytab": {
-                "file": "${keytab_dir}/ambari-infra-solr.service.keytab",
-                "owner": {
-                  "name": "${infra-solr-env/infra_solr_user}",
-                  "access": "r"
-                },
-                "group": {
-                  "name": "${cluster-env/user_group}",
-                  "access": ""
-                },
-                "configuration": "infra-solr-env/infra_solr_kerberos_keytab"
-              }
-            }
-          ]
-        },
-        {
-          "name": "INFRA_SOLR_CLIENT"
-        }
-      ]
-    }
-  ]
-}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/metainfo.xml b/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/metainfo.xml
deleted file mode 100644
index cd22ecc..0000000
--- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/metainfo.xml
+++ /dev/null
@@ -1,207 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>AMBARI_INFRA_SOLR</name>
-      <displayName>Infra Solr</displayName>
-      <comment>Core shared service used by Ambari managed components.</comment>
-      <version>0.1.0</version>
-      <components>
-        <component>
-          <name>INFRA_SOLR</name>
-          <timelineAppid>infra-solr</timelineAppid>
-          <displayName>Infra Solr Instance</displayName>
-          <category>MASTER</category>
-          <cardinality>1+</cardinality>
-          <versionAdvertised>false</versionAdvertised>
-          <commandScript>
-            <script>scripts/infra_solr.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>1800</timeout>
-          </commandScript>
-          <logs>
-            <log>
-              <logId>infra_solr</logId>
-              <primary>true</primary>
-            </log>
-          </logs>
-          <customCommands>
-            <customCommand>
-              <name>BACKUP</name>
-              <hidden>true</hidden>
-              <commandScript>
-                <script>scripts/infra_solr.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>1200</timeout>
-              </commandScript>
-            </customCommand>
-            <customCommand>
-              <name>RESTORE</name>
-              <hidden>true</hidden>
-              <commandScript>
-                <script>scripts/infra_solr.py</script>
-                <scriptType>PYTHON</scriptType>
-                <background>true</background>
-                <timeout>36000</timeout>
-              </commandScript>
-            </customCommand>
-            <customCommand>
-              <name>MIGRATE</name>
-              <hidden>true</hidden>
-              <commandScript>
-                <script>scripts/infra_solr.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>36000</timeout>
-                <background>true</background>
-              </commandScript>
-            </customCommand>
-            <customCommand>
-              <name>UPGRADE_SOLR_INSTANCE</name>
-              <hidden>true</hidden>
-              <commandScript>
-                <script>scripts/infra_solr.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>1200</timeout>
-              </commandScript>
-            </customCommand>
-          </customCommands>
-          <dependencies>
-            <dependency>
-              <name>AMBARI_INFRA_SOLR/INFRA_SOLR_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
-              <scope>cluster</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-
-          </dependencies>
-          <configuration-dependencies>
-            <config-type>infra-solr-env</config-type>
-            <config-type>infra-solr-xml</config-type>
-            <config-type>infra-solr-log4j</config-type>
-            <config-type>infra-solr-security-json</config-type>
-          </configuration-dependencies>
-        </component>
-
-        <component>
-          <name>INFRA_SOLR_CLIENT</name>
-          <displayName>Infra Solr Client</displayName>
-          <category>CLIENT</category>
-          <cardinality>0+</cardinality>
-          <versionAdvertised>false</versionAdvertised>
-          <commandScript>
-            <script>scripts/infra_solr_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-          <customCommands>
-            <customCommand>
-              <name>UPGRADE_SOLR_CLIENT</name>
-              <hidden>true</hidden>
-              <commandScript>
-                <script>scripts/infra_solr_client.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>3600</timeout>
-              </commandScript>
-            </customCommand>
-          </customCommands>
-          <configFiles>
-            <configFile>
-              <type>env</type>
-              <fileName>log4j.properties</fileName>
-              <dictionaryName>infra-solr-client-log4j</dictionaryName>
-            </configFile>
-          </configFiles>
-          <configuration-dependencies>
-            <config-type>infra-solr-client-log4j</config-type>
-          </configuration-dependencies>
-        </component>
-
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>redhat7,amazonlinux2,redhat6,suse11,suse12</osFamily>
-          <packages>
-            <package>
-              <name>ambari-infra-solr-client</name>
-              <skipUpgrade>true</skipUpgrade>
-            </package>
-            <package>
-              <name>ambari-infra-solr</name>
-              <condition>should_install_infra_solr</condition>
-              <skipUpgrade>true</skipUpgrade>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>debian7,debian9,ubuntu12,ubuntu14,ubuntu16</osFamily>
-          <packages>
-            <package>
-              <name>ambari-infra-solr-client</name>
-              <skipUpgrade>true</skipUpgrade>
-            </package>
-            <package>
-              <name>ambari-infra-solr</name>
-              <condition>should_install_infra_solr</condition>
-              <skipUpgrade>true</skipUpgrade>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <requiredServices>
-        <service>ZOOKEEPER</service>
-      </requiredServices>
-
-      <themes>
-        <theme>
-          <fileName>theme.json</fileName>
-          <default>true</default>
-        </theme>
-        <theme>
-          <fileName>directories.json</fileName>
-          <default>true</default>
-        </theme>
-      </themes>
-
-      <quickLinksConfigurations>
-        <quickLinksConfiguration>
-          <fileName>quicklinks.json</fileName>
-          <default>true</default>
-        </quickLinksConfiguration>
-      </quickLinksConfigurations>
-
-    </service>
-  </services>
-</metainfo>
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/scripts/collection.py b/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/scripts/collection.py
deleted file mode 100644
index e16a1bc..0000000
--- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/scripts/collection.py
+++ /dev/null
@@ -1,295 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os
-import time
-from resource_management.core.logger import Logger
-from resource_management.core.resources.system import Directory, Execute, File
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions import solr_cloud_util
-from resource_management.libraries.resources.properties_file import PropertiesFile
-
-def backup_collection(env):
-  """
-  Backup collections using replication API (as Solr Cloud Backup API is not available in Solr 5)
-  If the cluster is not kerberized, it will be needed to resolve ip addresses to hostnames (as SOLR_HOST=`hostname -f` is not used by default in infra-solr-env)
-  """
-  import params, command_commons
-  env.set_params(command_commons)
-
-  Directory(command_commons.index_location,
-            mode=0755,
-            cd_access='a',
-            create_parents=True,
-            owner=params.infra_solr_user,
-            group=params.user_group
-            )
-
-  Logger.info(format("Backup Solr Collection {collection} to {index_location}"))
-
-  host_core_map = command_commons.solr_backup_host_cores_map
-
-  host_or_ip = params.hostname
-  # IP resolve - for unsecure cluster
-  host_ip_pairs = {}
-  if not params.security_enabled:
-    keys = host_core_map.keys()
-    for key in keys:
-      if command_commons.is_ip(key):
-        resolved_hostname = command_commons.resolve_ip_to_hostname(key)
-        host_ip_pairs[resolved_hostname] = key
-
-  if params.hostname in host_ip_pairs:
-    host_or_ip = host_ip_pairs[params.hostname]
-
-  cores = host_core_map[host_or_ip] if host_or_ip in host_core_map else []
-
-  for core in cores:
-    if core in command_commons.skip_cores:
-      Logger.info(format("Core '{core}' is filtered out."))
-      continue
-    solr_request_path = format("{core}/replication?command=BACKUP&location={index_location}&name={core}&wt=json")
-    backup_api_cmd = command_commons.create_solr_api_request_command(solr_request_path)
-
-    Execute(backup_api_cmd, user=params.infra_solr_user, logoutput=True)
-
-    if command_commons.request_async is False:
-      Logger.info("Sleep 5 seconds to wait until the backup request is executed.")
-      time.sleep(5)
-      Logger.info("Check backup status ...")
-      solr_status_request_path = format("{core}/replication?command=details&wt=json")
-      status_check_json_output = format("{index_location}/backup_status.json")
-      status_check_cmd = command_commons.create_solr_api_request_command(solr_status_request_path,
-                                                                         status_check_json_output)
-      command_commons.snapshot_status_check(status_check_cmd, status_check_json_output, core, True,
-                                            log_output=command_commons.log_output, tries=command_commons.request_tries,
-                                            time_interval=command_commons.request_time_interval)
-
-
-def restore_collection(env):
-  """
-  Restore collections - by copying snapshots with backup_* prefix, then remove old one and remove backup_* prefixes from the folder names.
-  """
-  import params, command_commons
-  env.set_params(command_commons)
-
-  if command_commons.solr_num_shards == 0:
-    raise Exception(format("The 'solr_shards' command parameter is required to set."))
-
-  if not command_commons.solr_restore_config_set:
-    raise Exception(format("The 'solr_restore_config_set' command parameter is required to set."))
-
-  Logger.info("Original core / host map: " + str(command_commons.solr_backup_core_host_map))
-  Logger.info("New core / host map: " + str(command_commons.solr_restore_core_host_map))
-
-  original_core_host_pairs = command_commons.sort_core_host_pairs(command_commons.solr_backup_core_host_map)
-  new_core_host_pairs = command_commons.sort_core_host_pairs(command_commons.solr_restore_core_host_map)
-
-  core_pairs = command_commons.create_core_pairs(original_core_host_pairs, new_core_host_pairs)
-  Logger.info("Generated core pairs: " + str(core_pairs))
-
-  Logger.info(format("Remove write.lock files from folder '{index_location}'"))
-  for write_lock_file in command_commons.get_files_by_pattern(format("{index_location}"), 'write.lock'):
-    File(write_lock_file, action="delete")
-
-  Logger.info(format("Restore Solr Collection {collection} from {index_location} ..."))
-
-  if command_commons.collection in ["ranger_audits", "history", "hadoop_logs", "audit_logs",
-                                    "vertex_index", "edge_index",
-                                    "fulltext_index"]:  # Make sure ambari wont delete an important collection
-    raise Exception(format(
-      "Selected collection for restore is: {collection}. It is not recommended to restore on default collections."))
-
-  hdfs_cores_on_host=[]
-
-  for core_pair in core_pairs:
-    src_core = core_pair['src_core']
-    target_core = core_pair['target_core']
-
-    if src_core in command_commons.skip_cores:
-      Logger.info(format("Core '{src_core}' (src) is filtered out."))
-      continue
-    elif target_core in command_commons.skip_cores:
-      Logger.info(format("Core '{target_core}' (target) is filtered out."))
-      continue
-
-    core_data = command_commons.solr_restore_core_data
-    only_if_cmd = format("test -d {index_location}/snapshot.{src_core}")
-    core_root_dir = format("{solr_datadir}/backup_{target_core}")
-    core_root_without_backup_dir = format("{solr_datadir}/{target_core}")
-
-    if command_commons.solr_hdfs_path:
-      Directory([core_root_dir],
-                mode=0755,
-                cd_access='a',
-                create_parents=True,
-                owner=params.infra_solr_user,
-                group=params.user_group,
-                only_if=only_if_cmd
-                )
-    else:
-      Directory([format("{core_root_dir}/data/index"),
-                 format("{core_root_dir}/data/tlog"),
-                 format("{core_root_dir}/data/snapshot_metadata")],
-                mode=0755,
-                cd_access='a',
-                create_parents=True,
-                owner=params.infra_solr_user,
-                group=params.user_group,
-                only_if=only_if_cmd
-                )
-
-    core_details = core_data[target_core]['properties']
-    core_properties = {}
-    core_properties['numShards'] = core_details['numShards']
-    core_properties['collection.configName'] = command_commons.solr_restore_config_set
-    core_properties['name'] = target_core
-    core_properties['replicaType'] = core_details['replicaType']
-    core_properties['collection'] = command_commons.collection
-    if command_commons.solr_hdfs_path:
-      core_properties['coreNodeName'] = 'backup_' + core_details['coreNodeName']
-    else:
-      core_properties['coreNodeName'] = core_details['coreNodeName']
-    core_properties['shard'] = core_details['shard']
-    if command_commons.solr_hdfs_path:
-      hdfs_solr_node_folder=command_commons.solr_hdfs_path + format("/backup_{collection}/") + core_details['coreNodeName']
-      source_folder=format("{index_location}/snapshot.{src_core}/")
-      if command_commons.check_folder_exists(source_folder):
-        hdfs_cores_on_host.append(target_core)
-        command_commons.HdfsResource(format("{hdfs_solr_node_folder}/data/index/"),
-                                   type="directory",
-                                   action="create_on_execute",
-                                   source=source_folder,
-                                   owner=params.infra_solr_user,
-                                   mode=0755,
-                                   recursive_chown=True,
-                                   recursive_chmod=True
-                                   )
-        command_commons.HdfsResource(format("{hdfs_solr_node_folder}/data/tlog"),
-                                   type="directory",
-                                   action="create_on_execute",
-                                   owner=params.infra_solr_user,
-                                   mode=0755
-                                   )
-        command_commons.HdfsResource(format("{hdfs_solr_node_folder}/data/snapshot_metadata"),
-                                   type="directory",
-                                   action="create_on_execute",
-                                   owner=params.infra_solr_user,
-                                   mode=0755
-                                   )
-    else:
-      copy_cmd = format("cp -r {index_location}/snapshot.{src_core}/* {core_root_dir}/data/index/") if command_commons.solr_keep_backup \
-        else format("mv {index_location}/snapshot.{src_core}/* {core_root_dir}/data/index/")
-      Execute(
-        copy_cmd, only_if=only_if_cmd,
-        user=params.infra_solr_user,
-        logoutput=True
-      )
-
-    PropertiesFile(
-      core_root_dir + '/core.properties',
-      properties=core_properties,
-      owner=params.infra_solr_user,
-      group=params.user_group,
-      mode=0644,
-      only_if=only_if_cmd
-    )
-
-  Execute(format("rm -rf {solr_datadir}/{collection}*"),
-          user=params.infra_solr_user,
-          logoutput=True)
-  for core_pair in core_pairs:
-    src_core = core_pair['src_core']
-    src_host = core_pair['src_host']
-    target_core = core_pair['target_core']
-
-    if src_core in command_commons.skip_cores:
-      Logger.info(format("Core '{src_core}' (src) is filtered out."))
-      continue
-    elif target_core in command_commons.skip_cores:
-      Logger.info(format("Core '{target_core}' (target) is filtered out."))
-      continue
-
-    if os.path.exists(format("{index_location}/snapshot.{src_core}")):
-      data_to_save = {}
-      host_core_data=command_commons.solr_restore_core_data
-      core_details=host_core_data[target_core]['properties']
-      core_node=core_details['coreNodeName']
-      data_to_save['core']=target_core
-      data_to_save['core_node']=core_node
-      data_to_save['old_host']=core_pair['target_host']
-      data_to_save['new_host']=src_host
-      if command_commons.solr_hdfs_path:
-        data_to_save['new_core_node']="backup_" + core_node
-      else:
-        data_to_save['new_core_node']=core_node
-
-      command_commons.write_core_file(target_core, data_to_save)
-      jaas_file = params.infra_solr_jaas_file if params.security_enabled else None
-      core_json_location = format("{index_location}/{target_core}.json")
-      znode_json_location = format("/restore_metadata/{collection}/{target_core}.json")
-      solr_cloud_util.copy_solr_znode_from_local(params.zookeeper_quorum, params.infra_solr_znode, params.java64_home, jaas_file, core_json_location, znode_json_location)
-
-    core_root_dir = format("{solr_datadir}/backup_{target_core}")
-    core_root_without_backup_dir = format("{solr_datadir}/{target_core}")
-
-    if command_commons.solr_hdfs_path:
-      if target_core in hdfs_cores_on_host:
-
-        Logger.info(format("Core data '{target_core}' is located on this host, processing..."))
-        host_core_data=command_commons.solr_restore_core_data
-        core_details=host_core_data[target_core]['properties']
-
-        core_node=core_details['coreNodeName']
-        collection_core_dir=command_commons.solr_hdfs_path + format("/{collection}/{core_node}")
-        backup_collection_core_dir=command_commons.solr_hdfs_path + format("/backup_{collection}/{core_node}")
-        command_commons.HdfsResource(collection_core_dir,
-                               type="directory",
-                               action="delete_on_execute",
-                               owner=params.infra_solr_user
-                               )
-        if command_commons.check_hdfs_folder_exists(backup_collection_core_dir):
-          collection_backup_core_dir=command_commons.solr_hdfs_path + format("/{collection}/backup_{core_node}")
-          command_commons.move_hdfs_folder(backup_collection_core_dir, collection_backup_core_dir)
-      else:
-        Logger.info(format("Core data '{target_core}' is not located on this host, skipping..."))
-
-    Execute(
-      format("mv {core_root_dir} {core_root_without_backup_dir}"),
-      user=params.infra_solr_user,
-      logoutput=True,
-      only_if=format("test -d {core_root_dir}")
-    )
-
-    Directory(
-      [format("{core_root_without_backup_dir}")],
-      mode=0755,
-      cd_access='a',
-      create_parents=True,
-      owner=params.infra_solr_user,
-      group=params.user_group,
-      recursive_ownership=True,
-      only_if=format("test -d {core_root_without_backup_dir}")
-    )
-
-    if command_commons.solr_hdfs_path and not command_commons.solr_keep_backup:
-      only_if_cmd = format("test -d {index_location}/snapshot.{src_core}")
-      Directory(format("{index_location}/snapshot.{src_core}"),
-            action="delete",
-            only_if=only_if_cmd,
-            owner=params.infra_solr_user)
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/scripts/command_commons.py b/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/scripts/command_commons.py
deleted file mode 100644
index 311fc0a..0000000
--- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/scripts/command_commons.py
+++ /dev/null
@@ -1,354 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import fnmatch
-import json
-import os
-import params
-import socket
-import time
-import traceback
-
-from resource_management.core.shell import call
-from resource_management.core.logger import Logger
-from resource_management.core.resources.system import Execute, File
-from resource_management.libraries.functions.default import default
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.resources.hdfs_resource import HdfsResource
-
-index_helper_script = '/usr/lib/ambari-infra-solr-client/solrIndexHelper.sh'
-
-# folder location which contains the snapshot/core folder
-index_location = default("/commandParams/solr_index_location", None)
-
-# index version (available index versions: 6.6.2 and 7.3.1, second one is used by default)
-index_version = default("/commandParams/solr_index_version", '6.6.2')
-
-# if this flag is false, skip upgrade if the version is proper, you can force to re-run the tool with setting the flag to true
-force = default("/commandParams/solr_index_upgrade_force", False)
-
-# if this flag is true, then it will generate specific folder for every backup with a hostname suffix
-# where "." chars replaced with "_"(e.g.: /my/path/backup_locationc7301_ambari_apache_org), that can be useful if different
-# hosts share the same filesystem where the backup is stored.
-shared_fs = default("/commandParams/solr_shared_fs", False)
-
-# set verbose log for index migration (default: true)
-debug = default("/commandParams/solr_migrate_debug", True)
-
-# used for filtering folders in backup location (like: if the filter is ranger, that will include snapshot.ranger folder but won't include snapshot.hadoop_logs)
-core_filter = default("/commandParams/solr_core_filter", None)
-
-# used to filer out comma separated cores - can be useful if backup/resotre failed in some point
-skip_cores = default("/commandParams/solr_skip_cores", "").split(",")
-
-# delete write.lock file at the start of lucene index migration process
-delete_lock_on_start = default("/commandParams/solr_delete_lock_on_start", True)
-# if it used, then core filter will be used with snapshot.* folder pattern
-backup_mode = default("/commandParams/solr_migrate_backup", True)
-
-log_output = default("/commandParams/solr_migrate_logoutput", True)
-# Solr colleection name (used for DELETE/BACKUP/RESTORE)
-collection = default("/commandParams/solr_collection", "ranger_audits")
-# it will be used in the snapshot name, if it's ranger, the snapshot folder will be snapshot.ranger
-backup_name = default("/commandParams/solr_backup_name", "ranger")
-
-request_async = default("/commandParams/solr_request_async", False)
-request_tries = int(default("/commandParams/solr_request_tries", 30))
-request_time_interval = int(default("/commandParams/solr_request_time_interval", 5))
-
-check_hosts_default = True if params.security_enabled else False
-check_hosts = default("/commandParams/solr_check_hosts", check_hosts_default)
-
-solr_protocol = "https" if params.infra_solr_ssl_enabled else "http"
-solr_port = format("{params.infra_solr_port}")
-solr_base_url = format("{solr_protocol}://{params.hostname}:{params.infra_solr_port}/solr")
-solr_datadir = params.infra_solr_datadir
-
-solr_keep_backup=default("/commandParams/solr_keep_backup", False)
-
-solr_num_shards = int(default("/commandParams/solr_shards", "0"))
-
-solr_hdfs_path=default("/commandParams/solr_hdfs_path", None)
-
-solr_backup_host_cores_map = json.loads(default("/commandParams/solr_backup_host_cores_map", "{}"))
-solr_backup_core_host_map = json.loads(default("/commandParams/solr_backup_core_host_map", "{}"))
-solr_restore_host_cores_map = json.loads(default("/commandParams/solr_restore_host_cores_map", "{}"))
-solr_restore_core_host_map = json.loads(default("/commandParams/solr_restore_core_host_map", "{}"))
-solr_restore_core_data = json.loads(default("/commandParams/solr_restore_core_data", "{}"))
-solr_restore_config_set = default("/commandParams/solr_restore_config_set", None)
-
-keytab = None
-principal = None
-if params.security_enabled:
-  keytab = params.infra_solr_kerberos_keytab
-  principal = params.infra_solr_kerberos_principal
-
-if solr_hdfs_path:
-
-  import functools
-  from resource_management.libraries.functions import conf_select
-  from resource_management.libraries.functions import stack_select
-  from resource_management.libraries.functions import get_klist_path
-  from resource_management.libraries.functions import get_kinit_path
-  from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
-
-  klist_path_local = get_klist_path(default('/configurations/kerberos-env/executable_search_paths', None))
-  kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
-
-  # hadoop default parameters
-  hdfs_user = params.config['configurations']['hadoop-env']['hdfs_user']
-  hadoop_bin = stack_select.get_hadoop_dir("sbin")
-  hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
-  hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-  hadoop_conf_secure_dir = os.path.join(hadoop_conf_dir, "secure")
-  hadoop_lib_home = stack_select.get_hadoop_dir("lib")
-  hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', None)
-  hdfs_user_keytab = params.config['configurations']['hadoop-env']['hdfs_user_keytab']
-
-  dfs_type = default("/clusterLevelParams/dfs_type", "")
-
-  hdfs_site = params.config['configurations']['hdfs-site']
-  default_fs = params.config['configurations']['core-site']['fs.defaultFS']
-  #create partial functions with common arguments for every HdfsResource call
-  #to create/delete/copyfromlocal hdfs directories/files we need to call params.HdfsResource in code
-  HdfsResource = functools.partial(
-    HdfsResource,
-    user=params.infra_solr_user,
-    hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
-    security_enabled = params.security_enabled,
-    keytab = keytab,
-    kinit_path_local = kinit_path_local,
-    hadoop_bin_dir = hadoop_bin_dir,
-    hadoop_conf_dir = hadoop_conf_dir,
-    principal_name = principal,
-    hdfs_site = hdfs_site,
-    default_fs = default_fs,
-    immutable_paths = get_not_managed_resources(),
-    dfs_type = dfs_type
-  )
-
-hostname_suffix = params.hostname.replace(".", "_")
-
-if shared_fs:
-  index_location = format("{index_location}_{hostname_suffix}")
-
-def get_files_by_pattern(directory, pattern):
-  for root, dirs, files in os.walk(directory):
-    for basename in files:
-      try:
-        matched = pattern.match(basename)
-      except AttributeError:
-        matched = fnmatch.fnmatch(basename, pattern)
-      if matched:
-        yield os.path.join(root, basename)
-
-def create_solr_api_request_command(request_path, output=None, override_solr_base_url=None):
-  solr_url = format("{solr_base_url}/{request_path}") if override_solr_base_url is None else format("{override_solr_base_url}/{request_path}")
-  grep_cmd = " | grep 'solr_rs_status: 200'"
-  api_cmd = format("kinit -kt {keytab} {principal} && curl -w'solr_rs_status: %{{http_code}}' -k --negotiate -u : '{solr_url}'") \
-    if params.security_enabled else format("curl -w'solr_rs_status: %{{http_code}}' -k '{solr_url}'")
-  if output is not None:
-    api_cmd+=format(" -o {output}")
-  api_cmd+=grep_cmd
-  return api_cmd
-
-def snapshot_status_check(request_cmd, json_output, snapshot_name, backup=True, log_output=True, tries=30, time_interval=5):
-  """
-  Check BACKUP/RESTORE status until the response status will be successful or failed.
-
-  :param request_cmd: backup or restore api path
-  :param json_output: json file which will store the response output
-  :param snapshot_name: snapshot name, it will be used to check the proper status in the status response (backup: <snapshot_name>, restore: snapshot.<snapshot_name>)
-  :param backup: this flag is true if the check is against backup, otherwise it will be restore
-  :param log_output: print the output of the downloaded json file (backup/restore response)
-  :param tries: number of tries of the requests - it stops after the response status is successful for backup/restore
-  :param time_interval: time to wait in seconds between retries
-  """
-  failed = True
-  num_tries = 0
-  for i in range(tries):
-    try:
-      num_tries+=1
-      if (num_tries > 1):
-        Logger.info(format("Number of tries: {num_tries} ..."))
-      Execute(request_cmd, user=params.infra_solr_user)
-      with open(json_output) as json_file:
-        json_data = json.load(json_file)
-        if backup:
-          details = json_data['details']
-          if 'backup' in details:
-            backup_list = details['backup']
-            if log_output:
-              Logger.info(str(backup_list))
-
-            if type(backup_list) == type(list()): # support map and list format as well
-              backup_data = dict(backup_list[i:i+2] for i in range(0, len(backup_list), 2))
-            else:
-              backup_data = backup_list
-
-            if (not 'snapshotName' in backup_data) or backup_data['snapshotName'] != snapshot_name:
-              snapshot = backup_data['snapshotName']
-              Logger.info(format("Snapshot name: {snapshot}, wait until {snapshot_name} will be available."))
-              time.sleep(time_interval)
-              continue
-
-            if backup_data['status'] == 'success':
-              Logger.info("Backup command status: success.")
-              failed = False
-            elif backup_data['status'] == 'failed':
-              Logger.info("Backup command status: failed.")
-            else:
-              Logger.info(format("Backup command is in progress... Sleep for {time_interval} seconds."))
-              time.sleep(time_interval)
-              continue
-          else:
-            Logger.info("Backup data is not found yet in details JSON response...")
-            time.sleep(time_interval)
-            continue
-
-        else:
-          if 'restorestatus' in json_data:
-            restorestatus_data = json_data['restorestatus']
-            if log_output:
-              Logger.info(str(restorestatus_data))
-
-            if (not 'snapshotName' in restorestatus_data) or restorestatus_data['snapshotName'] != format("snapshot.{snapshot_name}"):
-              snapshot = restorestatus_data['snapshotName']
-              Logger.info(format("Snapshot name: {snapshot}, wait until snapshot.{snapshot_name} will be available."))
-              time.sleep(time_interval)
-              continue
-
-            if restorestatus_data['status'] == 'success':
-              Logger.info("Restore command successfully finished.")
-              failed = False
-            elif restorestatus_data['status'] == 'failed':
-              Logger.info("Restore command failed.")
-            else:
-              Logger.info(format("Restore command is in progress... Sleep for {time_interval} seconds."))
-              time.sleep(time_interval)
-              continue
-          else:
-            Logger.info("Restore status data is not found yet in details JSON response...")
-            time.sleep(time_interval)
-            continue
-
-
-    except Exception:
-      traceback.print_exc()
-      time.sleep(time_interval)
-      continue
-    break
-
-  if failed:
-    raise Exception("Status Command failed.")
-  else:
-    Logger.info("Status command finished successfully.")
-
-def __get_domain_name(url):
-  spltAr = url.split("://")
-  i = (0,1)[len(spltAr) > 1]
-  dm = spltAr[i].split('/')[0].split(':')[0].lower()
-  return dm
-
-def write_core_file(core, core_data):
-  core_json_location = format("{index_location}/{core}.json")
-  File(core_json_location, content=json.dumps(core_data))
-
-def create_core_pairs(original_cores, new_cores):
-  """
-  Create core pairss from the original and new cores (backups -> restored ones), use alphabetic order
-  """
-  core_pairs_data=[]
-  if len(new_cores) < len(original_cores):
-    raise Exception("Old collection core size is: " + str(len(new_cores)) +
-                    ". You will need at least: " + str(len(original_cores)))
-  else:
-    for index, core_data in enumerate(original_cores):
-      value={}
-      value['src_core']=core_data[0]
-      value['src_host']=core_data[1]
-      value['target_core']=new_cores[index][0]
-      value['target_host']=new_cores[index][1]
-      core_pairs_data.append(value)
-    File(format("{index_location}/restore_core_pairs.json"), content=json.dumps(core_pairs_data))
-    return core_pairs_data
-
-def sort_core_host_pairs(host_core_map):
-  """
-  Sort host core map by key
-  """
-  core_host_pairs=[]
-  for key in sorted(host_core_map):
-    core_host_pairs.append((key, host_core_map[key]))
-  return core_host_pairs
-
-def is_ip(addr):
-  try:
-    socket.inet_aton(addr)
-    return True
-  except socket.error:
-    return False
-
-def resolve_ip_to_hostname(ip):
-  try:
-    host_name = socket.gethostbyaddr(ip)[0].lower()
-    Logger.info(format("Resolved {ip} to {host_name}"))
-    fqdn_name = socket.getaddrinfo(host_name, 0, 0, 0, 0, socket.AI_CANONNAME)[0][3].lower()
-    return host_name if host_name == fqdn_name else fqdn_name
-  except socket.error:
-    pass
-  return ip
-
-def create_command(command):
-  """
-  Create hdfs command. Append kinit to the command if required.
-  """
-  kinit_cmd = "{0} -kt {1} {2};".format(kinit_path_local, params.infra_solr_kerberos_keytab, params.infra_solr_kerberos_principal) if params.security_enabled else ""
-  return kinit_cmd + command
-
-def execute_commad(command):
-  """
-  Run hdfs command by infra-solr user
-  """
-  return call(command, user=params.infra_solr_user, timeout=300)
-
-def move_hdfs_folder(source_dir, target_dir):
-  cmd=create_command(format('hdfs dfs -mv {source_dir} {target_dir}'))
-  returncode, stdout = execute_commad(cmd)
-  if returncode:
-    raise Exception("Unable to move HDFS dir '{0}' to '{1}' (return code: {2})".format(source_dir, target_dir, str(returncode)))
-  return stdout.strip()
-
-def check_hdfs_folder_exists(hdfs_dir):
-  """
-  Check that hdfs folder exists or not
-  """
-  cmd=create_command(format("hdfs dfs -ls {hdfs_dir}"))
-  returncode, stdout = execute_commad(cmd)
-  if returncode:
-    return False
-  return True
-
-def check_folder_exists(dir):
-  """
-  Check that folder exists or not
-  """
-  returncode, stdout = call(format("test -d {dir}"), user=params.infra_solr_user, timeout=300)
-  if returncode:
-    return False
-  return True
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/scripts/infra_solr.py b/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/scripts/infra_solr.py
deleted file mode 100644
index 2fd4321..0000000
--- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/scripts/infra_solr.py
+++ /dev/null
@@ -1,171 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from ambari_commons.repo_manager import ManagerFactory
-from ambari_commons.shell import RepoCallContext
-from resource_management.core.logger import Logger
-from resource_management.core.source import Template
-from resource_management.core.resources.system import Execute, File
-from resource_management.core.resources.zkmigrator import ZkMigrator
-from resource_management.libraries.functions.check_process_status import check_process_status
-from resource_management.libraries.functions.default import default
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.get_user_call_output import get_user_call_output
-from resource_management.libraries.functions.show_logs import show_logs
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions.generate_logfeeder_input_config import generate_logfeeder_input_config
-
-from collection import backup_collection, restore_collection
-from migrate import migrate_index
-from setup_infra_solr import setup_infra_solr, setup_solr_znode_env
-
-class InfraSolr(Script):
-  def install(self, env):
-    import params
-    env.set_params(params)
-    self.install_packages(env)
-
-  def configure(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-
-    setup_infra_solr(name = 'server')
-
-  def start(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    self.configure(env)
-
-    generate_logfeeder_input_config('ambari-infra', Template("input.config-ambari-infra.json.j2", extra_imports=[default]))
-
-    setup_solr_znode_env()
-    start_cmd = format('{solr_bindir}/solr start -cloud -noprompt -s {infra_solr_datadir} -Dsolr.kerberos.name.rules=\'{infra_solr_kerberos_name_rules}\' 2>&1') \
-            if params.security_enabled else format('{solr_bindir}/solr start -cloud -noprompt -s {infra_solr_datadir} 2>&1')
-
-    check_process = format("{sudo} test -f {infra_solr_pidfile} && {sudo} pgrep -F {infra_solr_pidfile}")
-
-    piped_start_cmd = format('{start_cmd} | tee {infra_solr_log}') + '; (exit "${PIPESTATUS[0]}")'
-    Execute(
-      piped_start_cmd,
-      environment={'SOLR_INCLUDE': format('{infra_solr_conf}/infra-solr-env.sh')},
-      user=params.infra_solr_user,
-      not_if=check_process,
-      logoutput=True
-    )
-
-  def stop(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-
-    try:
-      stop_cmd=format('{solr_bindir}/solr stop -all')
-      piped_stop_cmd=format('{stop_cmd} | tee {infra_solr_log}') + '; (exit "${PIPESTATUS[0]}")'
-      Execute(piped_stop_cmd,
-              environment={'SOLR_INCLUDE': format('{infra_solr_conf}/infra-solr-env.sh')},
-              user=params.infra_solr_user,
-              logoutput=True
-              )
-
-      File(params.prev_infra_solr_pidfile,
-           action="delete"
-           )
-    except:
-      Logger.warning("Could not stop solr:" + str(sys.exc_info()[1]) + "\n Trying to kill it")
-      self.kill_process(params.prev_infra_solr_pidfile, params.infra_solr_user, params.infra_solr_log_dir)
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-
-    check_process_status(status_params.infra_solr_pidfile)
-
-  def kill_process(self, pid_file, user, log_dir):
-    """
-    Kill the process by pid file, then check the process is running or not. If the process is still running after the kill
-    command, it will try to kill with -9 option (hard kill)
-    """
-    pid = get_user_call_output(format("cat {pid_file}"), user=user, is_checked_call=False)[1]
-    process_id_exists_command = format("ls {pid_file} >/dev/null 2>&1 && ps -p {pid} >/dev/null 2>&1")
-
-    kill_cmd = format("{sudo} kill {pid}")
-    Execute(kill_cmd,
-          not_if=format("! ({process_id_exists_command})"))
-    wait_time = 5
-
-    hard_kill_cmd = format("{sudo} kill -9 {pid}")
-    Execute(hard_kill_cmd,
-          not_if=format("! ({process_id_exists_command}) || ( sleep {wait_time} && ! ({process_id_exists_command}) )"),
-          ignore_failures=True)
-    try:
-      Execute(format("! ({process_id_exists_command})"),
-            tries=20,
-            try_sleep=3,
-            )
-    except:
-      show_logs(log_dir, user)
-      raise
-
-    File(pid_file,
-       action="delete"
-       )
-
-  def disable_security(self, env):
-    import params
-    if not params.infra_solr_znode:
-      Logger.info("Skipping reverting ACL")
-      return
-    zkmigrator = ZkMigrator(
-      zk_host=params.zk_quorum,
-      java_exec=params.java_exec,
-      java_home=params.java64_home,
-      jaas_file=params.infra_solr_jaas_file,
-      user=params.infra_solr_user)
-    zkmigrator.set_acls(params.infra_solr_znode, 'world:anyone:crdwa')
-
-  def backup(self, env):
-    backup_collection(env)
-
-  def restore(self, env):
-    restore_collection(env)
-
-  def migrate(self, env):
-    migrate_index(env)
-
-  def upgrade_solr_instance(self, env):
-    pkg_provider = ManagerFactory.get()
-    context = RepoCallContext()
-    context.log_output = True
-    pkg_provider.remove_package('ambari-infra-solr', context, ignore_dependencies=True)
-    pkg_provider.upgrade_package('ambari-infra-solr', context)
-
-  def get_log_folder(self):
-    import params
-    return params.infra_solr_log_dir
-
-  def get_user(self):
-    import params
-    return params.infra_solr_user
-
-  def get_pid_files(self):
-    import status_params
-    return [status_params.infra_solr_pidfile]
-
-if __name__ == "__main__":
-  InfraSolr().execute()
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/scripts/infra_solr_client.py b/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/scripts/infra_solr_client.py
deleted file mode 100644
index 2ff997f..0000000
--- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/scripts/infra_solr_client.py
+++ /dev/null
@@ -1,60 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from ambari_commons.repo_manager import ManagerFactory
-from ambari_commons.shell import RepoCallContext
-from resource_management.core.exceptions import ClientComponentHasNoStatus
-from resource_management.libraries.script.script import Script
-
-from setup_infra_solr import setup_infra_solr
-
-class InfraSolrClient(Script):
-
-  def install(self, env):
-    import params
-    env.set_params(params)
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    setup_infra_solr(name ='client')
-
-  def start(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-    self.configure(env)
-
-  def stop(self, env, upgrade_type=None):
-    import params
-    env.set_params(params)
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-  def upgrade_solr_client(self, env):
-    pkg_provider = ManagerFactory.get()
-    context = RepoCallContext()
-    context.log_output = True
-    pkg_provider.remove_package('ambari-infra-solr-client', context, ignore_dependencies=True)
-    pkg_provider.upgrade_package('ambari-infra-solr-client', context)
-
-if __name__ == "__main__":
-  InfraSolrClient().execute()
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/scripts/migrate.py b/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/scripts/migrate.py
deleted file mode 100644
index 3947ddc..0000000
--- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/scripts/migrate.py
+++ /dev/null
@@ -1,62 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.core.logger import Logger
-from resource_management.core.resources.system import Execute, File
-from resource_management.libraries.functions.format import format
-
-def migrate_index(env):
-  """
-  Migrate lucene index in the background.
-  """
-  import params, command_commons
-  env.set_params(command_commons)
-
-  index_migrate_cmd = format("{index_helper_script} upgrade-index -d {index_location} -v {index_version}")
-
-  if command_commons.force is True:
-    index_migrate_cmd+=" -f"
-
-  if command_commons.backup_mode is True:
-    index_migrate_cmd+=" -b"
-
-  if command_commons.debug is True:
-    index_migrate_cmd+=" -g"
-
-  if command_commons.core_filter is not None:
-    index_migrate_cmd+=format(" -c {core_filter}")
-
-  deleted_write_locks=[]
-  if command_commons.delete_lock_on_start:
-    Logger.info(format("Remove write.lock files from folder '{index_location}'"))
-    for write_lock_file in command_commons.get_files_by_pattern(format("{index_location}"), 'write.lock'):
-      File(write_lock_file, action="delete")
-      deleted_write_locks.append(write_lock_file)
-  else:
-    Logger.info("Skip removing write.lock files")
-
-  Logger.info(format("Migrate index at location: {index_location}"))
-  # It can generate a write.lock file
-  Execute(index_migrate_cmd, user=params.infra_solr_user, environment={'JAVA_HOME': params.java64_home}, logoutput=command_commons.log_output)
-
-  if command_commons.delete_lock_on_start:
-    for write_lock_file in deleted_write_locks:
-      Logger.info(format("Put '{write_lock_file}' file back"))
-      File(write_lock_file, action="create", mode = 0644, owner=params.infra_solr_user, group=params.user_group, not_if=format("test -f {write_lock_file}"))
-
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/scripts/params.py
deleted file mode 100644
index 6bb6c1c..0000000
--- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/scripts/params.py
+++ /dev/null
@@ -1,219 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-from ambari_commons.constants import AMBARI_SUDO_BINARY
-from resource_management.libraries.functions.default import default
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.is_empty import is_empty
-from resource_management.libraries.script.script import Script
-
-import status_params
-
-
-def get_port_from_url(address):
-  if not is_empty(address):
-    return address.split(':')[-1]
-  else:
-    return address
-
-def get_name_from_principal(principal):
-  if not principal:  # return if empty
-    return principal
-  slash_split = principal.split('/')
-  if len(slash_split) == 2:
-    return slash_split[0]
-  else:
-    at_split = principal.split('@')
-    return at_split[0]
-
-# config object that holds the configurations declared in the -site.xml file
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-stack_version = default("/commandParams/version", None)
-sudo = AMBARI_SUDO_BINARY
-security_enabled = status_params.security_enabled
-
-hostname = config['agentLevelParams']['hostname'].lower()
-
-infra_solr_conf = "/etc/ambari-infra-solr/conf"
-
-infra_solr_port = status_params.infra_solr_port
-infra_solr_piddir = status_params.infra_solr_piddir
-infra_solr_pidfile = status_params.infra_solr_pidfile
-prev_infra_solr_pidfile = status_params.prev_infra_solr_pidfile
-
-user_group = config['configurations']['cluster-env']['user_group']
-fetch_nonlocal_groups = config['configurations']['cluster-env']["fetch_nonlocal_groups"]
-
-limits_conf_dir = "/etc/security/limits.d"
-infra_solr_user_nofile_limit = default("/configurations/infra-solr-env/infra_solr_user_nofile_limit", "128000")
-infra_solr_user_nproc_limit = default("/configurations/infra-solr-env/infra_solr_user_nproc_limit", "65536")
-
-# shared configs
-java_home = config['ambariLevelParams']['java_home']
-ambari_java_home = default("/ambariLevelParams/ambari_java_home", None)
-java64_home = ambari_java_home if ambari_java_home is not None else java_home
-java_exec = format("{java64_home}/bin/java")
-zookeeper_hosts_list = config['clusterHostInfo']['zookeeper_server_hosts']
-zookeeper_hosts_list.sort()
-# get comma separated list of zookeeper hosts from clusterHostInfo
-zookeeper_hosts = ",".join(zookeeper_hosts_list)
-
-#####################################
-# Solr configs
-#####################################
-
-# Only supporting SolrCloud mode - so hardcode those options
-solr_cloudmode = 'true'
-solr_dir = '/usr/lib/ambari-infra-solr'
-solr_client_dir = '/usr/lib/ambari-infra-solr-client'
-solr_bindir = solr_dir + '/bin'
-cloud_scripts = solr_dir + '/server/scripts/cloud-scripts'
-
-logsearch_hosts = default("/clusterHostInfo/logsearch_server_hosts", [])
-has_logsearch = len(logsearch_hosts) > 0
-
-zookeeper_port = default('/configurations/zoo.cfg/clientPort', None)
-# get comma separated list of zookeeper hosts from clusterHostInfo
-index = 0
-zookeeper_quorum = ""
-for host in config['clusterHostInfo']['zookeeper_server_hosts']:
-  zookeeper_quorum += host + ":" + str(zookeeper_port)
-  index += 1
-  if index < len(config['clusterHostInfo']['zookeeper_server_hosts']):
-    zookeeper_quorum += ","
-
-if "infra-solr-env" in config['configurations']:
-  infra_solr_hosts = config['clusterHostInfo']['infra_solr_hosts']
-  infra_solr_znode = config['configurations']['infra-solr-env']['infra_solr_znode']
-  infra_solr_min_mem = format(config['configurations']['infra-solr-env']['infra_solr_minmem'])
-  infra_solr_max_mem = format(config['configurations']['infra-solr-env']['infra_solr_maxmem'])
-  infra_solr_java_stack_size = format(config['configurations']['infra-solr-env']['infra_solr_java_stack_size'])
-  infra_solr_instance_count = len(config['clusterHostInfo']['infra_solr_hosts'])
-  infra_solr_datadir = format(config['configurations']['infra-solr-env']['infra_solr_datadir'])
-  infra_solr_data_resources_dir = os.path.join(infra_solr_datadir, 'resources')
-  infra_solr_jmx_port = config['configurations']['infra-solr-env']['infra_solr_jmx_port']
-  infra_solr_ssl_enabled = default('configurations/infra-solr-env/infra_solr_ssl_enabled', False)
-  infra_solr_keystore_location = config['configurations']['infra-solr-env']['infra_solr_keystore_location']
-  infra_solr_keystore_password = config['configurations']['infra-solr-env']['infra_solr_keystore_password']
-  infra_solr_keystore_type = config['configurations']['infra-solr-env']['infra_solr_keystore_type']
-  infra_solr_truststore_location = config['configurations']['infra-solr-env']['infra_solr_truststore_location']
-  infra_solr_truststore_password = config['configurations']['infra-solr-env']['infra_solr_truststore_password']
-  infra_solr_truststore_type = config['configurations']['infra-solr-env']['infra_solr_truststore_type']
-  infra_solr_user = config['configurations']['infra-solr-env']['infra_solr_user']
-  infra_solr_log_dir = config['configurations']['infra-solr-env']['infra_solr_log_dir']
-  infra_solr_log = format("{infra_solr_log_dir}/solr-install.log")
-  solr_env_content = config['configurations']['infra-solr-env']['content']
-  infra_solr_gc_log_opts = format(config['configurations']['infra-solr-env']['infra_solr_gc_log_opts'])
-  infra_solr_gc_tune = format(config['configurations']['infra-solr-env']['infra_solr_gc_tune'])
-  infra_solr_extra_java_opts = format(default('configurations/infra-solr-env/infra_solr_extra_java_opts', ""))
-
-  zk_quorum = format(default('configurations/infra-solr-env/infra_solr_zookeeper_quorum', zookeeper_quorum))
-
-if 'infra-solr-security-json' in config['configurations']:
-  infra_solr_security_manually_managed = default("/configurations/infra-solr-security-json/infra_solr_security_manually_managed", False)
-
-default_ranger_audit_users = 'nn,hbase,hive,knox,kafka,kms,storm,yarn,nifi'
-
-if security_enabled:
-  kinit_path_local = status_params.kinit_path_local
-  _hostname_lowercase = config['agentLevelParams']['hostname'].lower()
-  infra_solr_jaas_file = infra_solr_conf + '/infra_solr_jaas.conf'
-  infra_solr_kerberos_keytab = config['configurations']['infra-solr-env']['infra_solr_kerberos_keytab']
-  infra_solr_kerberos_principal = config['configurations']['infra-solr-env']['infra_solr_kerberos_principal'].replace('_HOST',_hostname_lowercase)
-  infra_solr_web_kerberos_keytab = config['configurations']['infra-solr-env']['infra_solr_web_kerberos_keytab']
-  infra_solr_web_kerberos_principal = config['configurations']['infra-solr-env']['infra_solr_web_kerberos_principal'].replace('_HOST',_hostname_lowercase)
-  infra_solr_kerberos_name_rules = config['configurations']['infra-solr-env']['infra_solr_kerberos_name_rules']
-  infra_solr_sasl_user = get_name_from_principal(infra_solr_kerberos_principal)
-  kerberos_realm = config['configurations']['kerberos-env']['realm']
-
-  zookeeper_principal_name = default("/configurations/zookeeper-env/zookeeper_principal_name", "zookeeper/_HOST@EXAMPLE.COM")
-  external_zk_principal_enabled = default("/configurations/infra-solr-env/infra_solr_zookeeper_external_enabled", False)
-  external_zk_principal_name = default("/configurations/infra-solr-env/infra_solr_zookeeper_external_principal", "zookeeper/_HOST@EXAMPLE.COM")
-  zk_principal_name = external_zk_principal_name if external_zk_principal_enabled else zookeeper_principal_name
-  zk_principal_user = zk_principal_name.split('/')[0]
-  zk_security_opts = format('-Dzookeeper.sasl.client=true -Dzookeeper.sasl.client.username={zk_principal_user} -Dzookeeper.sasl.clientconfig=Client')
-
-  ranger_audit_principal_conf_key = "xasecure.audit.jaas.Client.option.principal"
-  ranger_audit_principals = []
-  ranger_audit_principals.append(default('configurations/ranger-hdfs-audit/' + ranger_audit_principal_conf_key, 'nn'))
-  ranger_audit_principals.append(default('configurations/ranger-hbase-audit/' + ranger_audit_principal_conf_key, 'hbase'))
-  ranger_audit_principals.append(default('configurations/ranger-hive-audit/' + ranger_audit_principal_conf_key, 'hive'))
-  ranger_audit_principals.append(default('configurations/ranger-knox-audit/' + ranger_audit_principal_conf_key, 'knox'))
-  ranger_audit_principals.append(default('configurations/ranger-kafka-audit/' + ranger_audit_principal_conf_key, 'kafka'))
-  ranger_audit_principals.append(default('configurations/ranger-kms-audit/' + ranger_audit_principal_conf_key, 'rangerkms'))
-  ranger_audit_principals.append(default('configurations/ranger-storm-audit/' + ranger_audit_principal_conf_key, 'storm'))
-  ranger_audit_principals.append(default('configurations/ranger-yarn-audit/' + ranger_audit_principal_conf_key, 'yarn'))
-  ranger_audit_principals.append(default('configurations/ranger-nifi-audit/' + ranger_audit_principal_conf_key, 'nifi'))
-  ranger_audit_names_from_principals = [ get_name_from_principal(x) for x in ranger_audit_principals ]
-  default_ranger_audit_users = ','.join(ranger_audit_names_from_principals)
-
-  infra_solr_logsearch_service_users = []
-  logsearch_kerberos_service_user = get_name_from_principal(default('/configurations/logsearch-env/logsearch_kerberos_principal', 'logsearch'))
-  infra_solr_logsearch_service_users.append(logsearch_kerberos_service_user)
-  logsearch_kerberos_service_users_str = str(default('/configurations/logsearch-env/logsearch_kerberos_service_users', ''))
-  if logsearch_kerberos_service_users_str and logsearch_kerberos_service_users_str.strip():
-    logsearch_kerberos_service_users = logsearch_kerberos_service_users_str.split(',')
-    infra_solr_logsearch_service_users.extend(logsearch_kerberos_service_users)
-
-infra_solr_ranger_audit_service_users = format(config['configurations']['infra-solr-security-json']['infra_solr_ranger_audit_service_users']).split(',')
-infra_solr_security_json_content = config['configurations']['infra-solr-security-json']['content']
-
-infra_solr_jmx_enabled = str(default('/configurations/infra-solr-env/infra_solr_jmx_enabled', False)).lower()
-
-#Solr log4j
-infra_log_maxfilesize = default('configurations/infra-solr-log4j/infra_log_maxfilesize',10)
-infra_log_maxbackupindex = default('configurations/infra-solr-log4j/infra_log_maxbackupindex',9)
-
-solr_xml_content = default('configurations/infra-solr-xml/content', None)
-solr_log4j_content = default('configurations/infra-solr-log4j/content', None)
-
-smokeuser = config['configurations']['cluster-env']['smokeuser']
-smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
-
-ranger_solr_collection_name = default('configurations/ranger-env/ranger_solr_collection_name', 'ranger_audits')
-logsearch_service_logs_collection = default('configurations/logsearch-properties/logsearch.solr.collection.service.logs', 'hadoop_logs')
-logsearch_audit_logs_collection = default('configurations/logsearch-properties/logsearch.solr.collection.audit.logs', 'audit_logs')
-
-ranger_admin_kerberos_service_user = get_name_from_principal(default('configurations/ranger-admin-site/ranger.admin.kerberos.principal', 'rangeradmin'))
-atlas_kerberos_service_user = get_name_from_principal(default('configurations/application-properties/atlas.authentication.principal', 'atlas'))
-logfeeder_kerberos_service_user = get_name_from_principal(default('configurations/logfeeder-env/logfeeder_kerberos_principal', 'logfeeder'))
-infra_solr_kerberos_service_user = get_name_from_principal(default('configurations/infra-solr-env/infra_solr_kerberos_principal', 'infra-solr'))
-
-infra_solr_role_ranger_admin = default('configurations/infra-solr-security-json/infra_solr_role_ranger_admin', 'ranger_user')
-infra_solr_role_ranger_audit = default('configurations/infra-solr-security-json/infra_solr_role_ranger_audit', 'ranger_audit_user')
-infra_solr_role_atlas = default('configurations/infra-solr-security-json/infra_solr_role_atlas', 'atlas_user')
-infra_solr_role_logsearch = default('configurations/infra-solr-security-json/infra_solr_role_logsearch', 'logsearch_user')
-infra_solr_role_logfeeder = default('configurations/infra-solr-security-json/infra_solr_role_logfeeder', 'logfeeder_user')
-infra_solr_role_dev = default('configurations/infra-solr-security-json/infra_solr_role_dev', 'dev')
-
-ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
-metrics_enabled = ams_collector_hosts != ''
-if metrics_enabled:
-  metrics_http_policy = config['configurations']['ams-site']['timeline.metrics.service.http.policy']
-  ams_collector_protocol = 'http'
-  if metrics_http_policy == 'HTTPS_ONLY':
-    ams_collector_protocol = 'https'
-  ams_collector_port = str(get_port_from_url(config['configurations']['ams-site']['timeline.metrics.service.webapp.address']))
-else:
-  ams_collector_port = ''
-  ams_collector_protocol = ''
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/scripts/service_check.py
deleted file mode 100644
index f98379c3..0000000
--- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/scripts/service_check.py
+++ /dev/null
@@ -1,48 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.core.logger import Logger
-from resource_management.core.resources.system import Execute
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.script.script import Script
-
-class InfraServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    Logger.info('Infra Service Check ...')
-    if "infra-solr-env" in params.config['configurations'] \
-      and params.infra_solr_hosts is not None \
-      and len(params.infra_solr_hosts) > 0:
-      solr_protocol = "https" if params.infra_solr_ssl_enabled else "http"
-      solr_host = params.infra_solr_hosts[0] # choose the first solr host
-      solr_port = params.infra_solr_port
-      solr_url = format("{solr_protocol}://{solr_host}:{solr_port}/solr/#/")
-
-      smokeuser_kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};") if params.security_enabled else ""
-      smoke_infra_solr_cmd = format("{smokeuser_kinit_cmd} curl -s -o /dev/null -w'%{{http_code}}' --negotiate -u: -k {solr_url} | grep 200")
-      Execute(smoke_infra_solr_cmd,
-              tries = 40,
-              try_sleep=3,
-              user=params.smokeuser,
-              logoutput=True)
-
-if __name__ == "__main__":
-  InfraServiceCheck().execute()
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/scripts/setup_infra_solr.py b/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/scripts/setup_infra_solr.py
deleted file mode 100644
index b6055ea..0000000
--- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/scripts/setup_infra_solr.py
+++ /dev/null
@@ -1,163 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-from resource_management.core.exceptions import Fail
-from resource_management.core.resources.system import Directory, File
-from resource_management.core.source import InlineTemplate, Template
-from resource_management.libraries.functions import solr_cloud_util
-from resource_management.libraries.functions.decorator import retry
-from resource_management.libraries.functions.format import format
-
-def setup_infra_solr(name = None):
-  import params
-
-  if name == 'server':
-    Directory([params.infra_solr_log_dir, params.infra_solr_piddir,
-               params.infra_solr_datadir, params.infra_solr_data_resources_dir],
-              mode=0755,
-              cd_access='a',
-              create_parents=True,
-              owner=params.infra_solr_user,
-              group=params.user_group
-              )
-
-    Directory([params.solr_dir, params.infra_solr_conf],
-              mode=0755,
-              cd_access='a',
-              owner=params.infra_solr_user,
-              group=params.user_group,
-              create_parents=True,
-              recursive_ownership=True
-              )
-
-    File(params.infra_solr_log,
-         mode=0644,
-         owner=params.infra_solr_user,
-         group=params.user_group,
-         content=''
-         )
-
-    File(format("{infra_solr_conf}/infra-solr-env.sh"),
-         content=InlineTemplate(params.solr_env_content),
-         mode=0755,
-         owner=params.infra_solr_user,
-         group=params.user_group
-         )
-
-    File(format("{infra_solr_datadir}/solr.xml"),
-         content=InlineTemplate(params.solr_xml_content),
-         owner=params.infra_solr_user,
-         group=params.user_group
-         )
-
-    File(format("{infra_solr_conf}/log4j.properties"),
-         content=InlineTemplate(params.solr_log4j_content),
-         owner=params.infra_solr_user,
-         group=params.user_group
-         )
-
-    custom_security_json_location = format("{infra_solr_conf}/custom-security.json")
-    File(custom_security_json_location,
-         content=InlineTemplate(params.infra_solr_security_json_content),
-         owner=params.infra_solr_user,
-         group=params.user_group,
-         mode=0640
-         )
-
-    if params.security_enabled:
-      File(format("{infra_solr_jaas_file}"),
-           content=Template("infra_solr_jaas.conf.j2"),
-           owner=params.infra_solr_user)
-
-      File(format("{infra_solr_conf}/security.json"),
-           content=Template("infra-solr-security.json.j2"),
-           owner=params.infra_solr_user,
-           group=params.user_group,
-           mode=0640)
-    if os.path.exists(params.limits_conf_dir):
-      File(os.path.join(params.limits_conf_dir, 'infra-solr.conf'),
-           owner='root',
-           group='root',
-           mode=0644,
-           content=Template("infra-solr.conf.j2")
-      )
-
-  elif name == 'client':
-    solr_cloud_util.setup_solr_client(params.config)
-
-  else :
-    raise Fail('Nor client or server were selected to install.')
-
-def setup_solr_znode_env():
-  """
-  Setup SSL, ACL and authentication / authorization related Zookeeper settings for Solr (checkout: /clustersprops.json and /security.json)
-  """
-  import params
-
-  custom_security_json_location = format("{infra_solr_conf}/custom-security.json")
-  jaas_file = params.infra_solr_jaas_file if params.security_enabled else None
-  java_opts = params.zk_security_opts if params.security_enabled else None
-  url_scheme = 'https' if params.infra_solr_ssl_enabled else 'http'
-
-  security_json_file_location = custom_security_json_location \
-    if params.infra_solr_security_json_content and str(params.infra_solr_security_json_content).strip() \
-    else format("{infra_solr_conf}/security.json") # security.json file to upload
-
-  create_ambari_solr_znode(java_opts, jaas_file)
-
-  solr_cloud_util.set_cluster_prop(
-    zookeeper_quorum=params.zk_quorum,
-    solr_znode=params.infra_solr_znode,
-    java64_home=params.java64_home,
-    prop_name="urlScheme",
-    prop_value=url_scheme,
-    jaas_file=jaas_file,
-    java_opts=java_opts
-  )
-  if not params.infra_solr_security_manually_managed:
-    solr_cloud_util.setup_kerberos_plugin(
-      zookeeper_quorum=params.zk_quorum,
-      solr_znode=params.infra_solr_znode,
-      jaas_file=jaas_file,
-      java64_home=params.java64_home,
-      secure=params.security_enabled,
-      security_json_location=security_json_file_location,
-      java_opts=java_opts
-    )
-
-  if params.security_enabled:
-    solr_cloud_util.secure_solr_znode(
-      zookeeper_quorum=params.zk_quorum,
-      solr_znode=params.infra_solr_znode,
-      jaas_file=jaas_file,
-      java64_home=params.java64_home,
-      sasl_users_str=params.infra_solr_sasl_user,
-      java_opts=java_opts
-    )
-
-
-@retry(times=30, sleep_time=5, err_class=Fail)
-def create_ambari_solr_znode(java_opts, jaas_file):
-  import params
-  solr_cloud_util.create_znode(
-    zookeeper_quorum=params.zk_quorum,
-    solr_znode=params.infra_solr_znode,
-    java64_home=params.java64_home,
-    retry=30, interval=5, java_opts=java_opts, jaas_file=jaas_file)
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/scripts/status_params.py
deleted file mode 100644
index 3c1ed37..0000000
--- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/scripts/status_params.py
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from os import listdir, path
-from resource_management.libraries.functions import get_kinit_path
-from resource_management.libraries.functions.default import default
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.script.script import Script
-
-config = Script.get_config()
-
-infra_solr_port = default('configurations/infra-solr-env/infra_solr_port', '8886')
-infra_solr_piddir = default('configurations/infra-solr-env/infra_solr_pid_dir', '/var/run/ambari-infra-solr')
-infra_solr_pidfile = format("{infra_solr_piddir}/solr-{infra_solr_port}.pid")
-
-prev_infra_solr_pidfile = ''
-if path.isdir(infra_solr_piddir):
-  for file in listdir(infra_solr_piddir):
-    prev_infra_solr_pidfile = infra_solr_piddir + '/' + file
-
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/templates/infra-solr-security.json.j2 b/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/templates/infra-solr-security.json.j2
deleted file mode 100644
index db502f3..0000000
--- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/templates/infra-solr-security.json.j2
+++ /dev/null
@@ -1,84 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-{
-  "authentication": {
-    "class": "org.apache.solr.security.KerberosPlugin"
-  },
-  "authorization": {
-    "class": "org.apache.solr.security.InfraRuleBasedAuthorizationPlugin",
-    "user-role": {
-      "{{infra_solr_kerberos_service_user}}@{{kerberos_realm}}": "admin",
-{% if infra_solr_logsearch_service_users %}
-{%   for logsearch_kerberos_service_user in infra_solr_logsearch_service_users %}
-      "{{logsearch_kerberos_service_user}}@{{kerberos_realm}}": ["{{infra_solr_role_logsearch}}", "{{infra_solr_role_ranger_admin}}", "{{infra_solr_role_dev}}"],
-{%   endfor %}
-{% endif %}
-      "{{logfeeder_kerberos_service_user}}@{{kerberos_realm}}": ["{{infra_solr_role_logfeeder}}", "{{infra_solr_role_dev}}"],
-      "{{atlas_kerberos_service_user}}@{{kerberos_realm}}": ["{{infra_solr_role_atlas}}", "{{infra_solr_role_ranger_audit}}", "{{infra_solr_role_dev}}"],
-{% if infra_solr_ranger_audit_service_users %}
-{%   for ranger_audit_service_user in infra_solr_ranger_audit_service_users %}
-      "{{ranger_audit_service_user}}@{{kerberos_realm}}": ["{{infra_solr_role_ranger_audit}}", "{{infra_solr_role_dev}}"],
-{%   endfor %}
-{% endif %}
-      "{{ranger_admin_kerberos_service_user}}@{{kerberos_realm}}": ["{{infra_solr_role_ranger_admin}}", "{{infra_solr_role_ranger_audit}}", "{{infra_solr_role_dev}}"]
-    },
-    "permissions": [
-    {
-      "name" : "collection-admin-read",
-      "role" :null
-    },
-    {
-      "name" : "collection-admin-edit",
-      "role" : ["admin", "{{infra_solr_role_logsearch}}", "{{infra_solr_role_logfeeder}}", "{{infra_solr_role_atlas}}", "{{infra_solr_role_ranger_admin}}"]
-    },
-    {
-      "name":"read",
-      "role": "{{infra_solr_role_dev}}"
-    },
-    {
-      "collection": ["{{logsearch_service_logs_collection}}", "{{logsearch_audit_logs_collection}}", "history"],
-      "role": ["admin", "{{infra_solr_role_logsearch}}", "{{infra_solr_role_logfeeder}}"],
-      "name": "logsearch-manager",
-      "path": "/*"
-    },
-    {
-       "collection": ["vertex_index", "edge_index", "fulltext_index"],
-       "role": ["admin", "{{infra_solr_role_atlas}}"],
-       "name": "atlas-manager",
-       "path": "/*"
-    },
-    {
-       "collection": "{{ranger_solr_collection_name}}",
-       "role": ["admin", "{{infra_solr_role_ranger_admin}}", "{{infra_solr_role_ranger_audit}}"],
-       "name": "ranger-manager",
-       "path": "/*"
-    },
-    {
-       "collection": "old_ranger_audits",
-       "role": ["admin", "{{infra_solr_role_ranger_admin}}", "{{infra_solr_role_ranger_audit}}"],
-       "name": "backup-ranger-manager",
-       "path": "/*"
-    },
-    {
-       "collection": ["old_vertex_index", "old_edge_index", "old_fulltext_index"],
-       "role": ["admin", "{{infra_solr_role_atlas}}"],
-       "name": "backup-atlas-manager",
-       "path": "/*"
-    }]
-  }
-}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/templates/infra-solr.conf.j2 b/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/templates/infra-solr.conf.j2
deleted file mode 100644
index 2e385c0..0000000
--- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/templates/infra-solr.conf.j2
+++ /dev/null
@@ -1,17 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-{{infra_solr_user}}   - nofile {{infra_solr_user_nofile_limit}}
-{{infra_solr_user}}   - nproc  {{infra_solr_user_nproc_limit}}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/templates/infra_solr_jaas.conf.j2 b/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/templates/infra_solr_jaas.conf.j2
deleted file mode 100644
index 8f8d711..0000000
--- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/templates/infra_solr_jaas.conf.j2
+++ /dev/null
@@ -1,26 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-Client {
- com.sun.security.auth.module.Krb5LoginModule required
- useKeyTab=true
- storeKey=true
- useTicketCache=false
- keyTab="{{infra_solr_kerberos_keytab}}"
- principal="{{infra_solr_kerberos_principal}}";
-};
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/templates/input.config-ambari-infra.json.j2 b/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/templates/input.config-ambari-infra.json.j2
deleted file mode 100644
index af530e7..0000000
--- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/package/templates/input.config-ambari-infra.json.j2
+++ /dev/null
@@ -1,48 +0,0 @@
-{#
- # Licensed to the Apache Software Foundation (ASF) under one
- # or more contributor license agreements.  See the NOTICE file
- # distributed with this work for additional information
- # regarding copyright ownership.  The ASF licenses this file
- # to you under the Apache License, Version 2.0 (the
- # "License"); you may not use this file except in compliance
- # with the License.  You may obtain a copy of the License at
- #
- #   http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- #}
-{
-  "input":[
-    {
-      "type":"infra_solr",
-      "rowtype":"service",
-      "path":"{{default('/configurations/infra-solr-env/infra_solr_log_dir', '/var/log/ambari-infra-solr')}}/solr.log"
-    }
-  ],
-  "filter":[
-    {
-      "filter":"grok",
-      "conditions":{
-        "fields":{
-          "type":[
-            "infra_solr"
-          ]
-        }
-      },
-      "log4j_format":"",
-      "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})",
-      "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}\\[%{DATA:thread_name}\\]%{SPACE}%{LOGLEVEL:level}%{SPACE}%{GREEDYDATA:log_message}",
-      "post_map_values":{
-        "logtime":{
-          "map_date":{
-            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
-          }
-        }
-      }
-    }
-  ]
-}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/properties/infra-solr-env.sh.j2 b/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/properties/infra-solr-env.sh.j2
deleted file mode 100644
index 0ca8522..0000000
--- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/properties/infra-solr-env.sh.j2
+++ /dev/null
@@ -1,108 +0,0 @@
-#!/bin/bash
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# By default the script will use JAVA_HOME to determine which java
-# to use, but you can set a specific path for Solr to use without
-# affecting other Java applications on your server/workstation.
-SOLR_JAVA_HOME={{java64_home}}
-
-# Increase Java Min/Max Heap as needed to support your indexing / query needs
-SOLR_JAVA_MEM="-Xms{{infra_solr_min_mem}}m -Xmx{{infra_solr_max_mem}}m"
-
-SOLR_JAVA_STACK_SIZE="-Xss{{infra_solr_java_stack_size}}m"
-
-GC_LOG_OPTS="{{infra_solr_gc_log_opts}} -Xloggc:{{infra_solr_log_dir}}/solr_gc.log"
-
-GC_TUNE="{{infra_solr_gc_tune}}"
-
-# Set the ZooKeeper connection string if using an external ZooKeeper ensemble
-# e.g. host1:2181,host2:2181/chroot
-# Leave empty if not using SolrCloud
-ZK_HOST="{{zookeeper_quorum}}{{infra_solr_znode}}"
-
-# Set the ZooKeeper client timeout (for SolrCloud mode)
-ZK_CLIENT_TIMEOUT="60000"
-
-# By default the start script uses "localhost"; override the hostname here
-# for production SolrCloud environments to control the hostname exposed to cluster state
-SOLR_HOST=`hostname -f`
-
-# By default the start script uses UTC; override the timezone if needed
-#SOLR_TIMEZONE="UTC"
-
-# Set to true to activate the JMX RMI connector to allow remote JMX client applications
-# to monitor the JVM hosting Solr; set to "false" to disable that behavior
-# (false is recommended in production environments)
-ENABLE_REMOTE_JMX_OPTS="{{infra_solr_jmx_enabled}}"
-
-# The script will use SOLR_PORT+10000 for the RMI_PORT or you can set it here
-RMI_PORT={{infra_solr_jmx_port}}
-
-# Anything you add to the SOLR_OPTS variable will be included in the java
-# start command line as-is, in ADDITION to other options. If you specify the
-# -a option on start script, those options will be appended as well. Examples:
-#SOLR_OPTS="$SOLR_OPTS -Dsolr.autoSoftCommit.maxTime=3000"
-#SOLR_OPTS="$SOLR_OPTS -Dsolr.autoCommit.maxTime=60000"
-#SOLR_OPTS="$SOLR_OPTS -Dsolr.clustering.enabled=true"
-SOLR_OPTS="$SOLR_OPTS -Djava.rmi.server.hostname={{hostname}}"
-{% if infra_solr_extra_java_opts -%}
-SOLR_OPTS="$SOLR_OPTS {{infra_solr_extra_java_opts}}"
-{% endif %}
-
-# Location where the bin/solr script will save PID files for running instances
-# If not set, the script will create PID files in $SOLR_TIP/bin
-SOLR_PID_DIR={{infra_solr_piddir}}
-
-# Path to a directory where Solr creates index files, the specified directory
-# must contain a solr.xml; by default, Solr will use server/solr
-SOLR_HOME={{infra_solr_datadir}}
-
-# Solr provides a default Log4J configuration properties file in server/resources
-# however, you may want to customize the log settings and file appender location
-# so you can point the script to use a different log4j.properties file
-LOG4J_PROPS={{infra_solr_conf}}/log4j.properties
-
-# Location where Solr should write logs to; should agree with the file appender
-# settings in server/resources/log4j.properties
-SOLR_LOGS_DIR={{infra_solr_log_dir}}
-
-# Sets the port Solr binds to, default is 8983
-SOLR_PORT={{infra_solr_port}}
-
-# Be sure to update the paths to the correct keystore for your environment
-{% if infra_solr_ssl_enabled %}
-SOLR_SSL_KEY_STORE={{infra_solr_keystore_location}}
-SOLR_SSL_KEY_STORE_PASSWORD={{infra_solr_keystore_password}}
-SOLR_SSL_TRUST_STORE={{infra_solr_truststore_location}}
-SOLR_SSL_TRUST_STORE_PASSWORD={{infra_solr_truststore_password}}
-SOLR_SSL_NEED_CLIENT_AUTH=false
-SOLR_SSL_WANT_CLIENT_AUTH=false
-{% endif %}
-
-# Uncomment to set a specific SSL port (-Djetty.ssl.port=N); if not set
-# and you are using SSL, then the start script will use SOLR_PORT for the SSL port
-#SOLR_SSL_PORT=
-
-{% if security_enabled -%}
-SOLR_JAAS_FILE={{infra_solr_jaas_file}}
-SOLR_KERB_KEYTAB={{infra_solr_web_kerberos_keytab}}
-SOLR_KERB_PRINCIPAL={{infra_solr_web_kerberos_principal}}
-SOLR_OPTS="$SOLR_OPTS -Dsolr.hdfs.security.kerberos.principal={{infra_solr_kerberos_principal}}"
-SOLR_OPTS="$SOLR_OPTS {{zk_security_opts}}"
-
-SOLR_AUTH_TYPE="kerberos"
-SOLR_AUTHENTICATION_OPTS=" -DauthenticationPlugin=org.apache.solr.security.KerberosPlugin -Djava.security.auth.login.config=$SOLR_JAAS_FILE -Dsolr.kerberos.principal=${SOLR_KERB_PRINCIPAL} -Dsolr.kerberos.keytab=${SOLR_KERB_KEYTAB} -Dsolr.kerberos.cookie.domain=${SOLR_HOST}"
-{% endif %}
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/properties/solr-client-log4j.properties.j2 b/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/properties/solr-client-log4j.properties.j2
deleted file mode 100644
index df77961..0000000
--- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/properties/solr-client-log4j.properties.j2
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright 2011 The Apache Software Foundation
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-log4j.rootLogger=INFO,file,stdout,stderr
-
-log4j.appender.file=org.apache.log4j.RollingFileAppender
-log4j.appender.file.File={{solr_client_log|default('/var/log/ambari-infra-solr-client/solr-client.log')}}
-log4j.appender.file.MaxFileSize={{solr_client_log_maxfilesize}}MB
-log4j.appender.file.MaxBackupIndex={{solr_client_log_maxbackupindex}}
-log4j.appender.file.layout=org.apache.log4j.PatternLayout
-log4j.appender.file.layout.ConversionPattern=%d{DATE} %5p [%t] %c{1}:%L - %m%n
-
-log4j.appender.stdout=org.apache.log4j.ConsoleAppender
-log4j.appender.stdout.Threshold=INFO
-log4j.appender.stdout.Target=System.out
-log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
-log4j.appender.stdout.layout.ConversionPattern=%m%n
-log4j.appender.stdout.filter.lvlRangeFilter=org.apache.log4j.varia.LevelRangeFilter
-log4j.appender.stdout.filter.lvlRangeFilter.LevelMax=WARN
-
-log4j.appender.stderr=org.apache.log4j.ConsoleAppender
-log4j.appender.stderr.Threshold=ERROR
-log4j.appender.stderr.Target=System.err
-log4j.appender.stderr.layout=org.apache.log4j.PatternLayout
-log4j.appender.stderr.layout.ConversionPattern=%m%n
-log4j.appender.stderr.filter.lvlRangeFilter=org.apache.log4j.varia.LevelRangeFilter
-log4j.appender.stderr.filter.lvlRangeFilter.LevelMin=ERROR
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/properties/solr-log4j.properties.j2 b/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/properties/solr-log4j.properties.j2
deleted file mode 100644
index d81aa17..0000000
--- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/properties/solr-log4j.properties.j2
+++ /dev/null
@@ -1,40 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#  Logging level
-infra.solr.log.dir={{infra_solr_log_dir}}
-#log4j.rootLogger=INFO, file, CONSOLE
-log4j.rootLogger=WARN, file
-
-log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
-
-log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
-log4j.appender.CONSOLE.layout.ConversionPattern=%-4r [%t] %-5p %c %x [%X{collection} %X{shard} %X{replica} %X{core}] \u2013 %m%n
-
-#- size rotation with log cleanup.
-log4j.appender.file=org.apache.log4j.RollingFileAppender
-log4j.appender.file.MaxFileSize={{infra_log_maxfilesize}}MB
-log4j.appender.file.MaxBackupIndex={{infra_log_maxbackupindex}}
-
-#- File to log to and log format
-log4j.appender.file.File=${infra.solr.log.dir}/solr.log
-log4j.appender.file.layout=org.apache.log4j.PatternLayout
-log4j.appender.file.layout.ConversionPattern=%d{ISO8601} [%t] %-5p [%X{collection} %X{shard} %X{replica} %X{core}] %C (%F:%L) - %m%n
-
-log4j.logger.org.apache.zookeeper=WARN
-log4j.logger.org.apache.hadoop=WARN
-
-# set to INFO to enable infostream log messages
-log4j.logger.org.apache.solr.update.LoggingInfoStream=OFF
\ No newline at end of file
diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/properties/solr.xml.j2 b/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/properties/solr.xml.j2
deleted file mode 100644
index 9c5c52d..0000000
--- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/0.1.0/properties/solr.xml.j2
+++ /dev/null
@@ -1,122 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" ?>
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-<solr>
-  <solrcloud>
-    <str name="host">${host:}</str>
-    <int name="hostPort">${jetty.port:}</int>
-    <str name="hostContext">${hostContext:solr}</str>
-    <int name="zkClientTimeout">${zkClientTimeout:15000}</int>
-    <bool name="genericCoreNodeNames">${genericCoreNodeNames:true}</bool>
-  </solrcloud>
-{% if metrics_enabled -%}
-  <metrics>
-    <reporter name="ambariInfra" group="jvm" class="org.apache.ambari.infra.solr.metrics.reporters.SimpleAMSReporter">
-      <int name="period">60</int>
-      <str name="amsCollectorHosts">{{ams_collector_hosts}}</str>
-      <int name="amsCollectorPort">{{ams_collector_port}}</int>
-      <str name="amsCollectorProtocol">{{ams_collector_protocol}}</str>
-      <str name="trustStoreLocation">{{infra_solr_truststore_location}}</str>
-      <str name="trustStoreType">{{infra_solr_truststore_type}}</str>
-      <str name="trustStorePassword">{{infra_solr_truststore_password}}</str>
-      <str name="filter">threads.count</str>
-      <str name="filter">threads.deadlock.count</str>
-      <str name="filter">memory.heap.used</str>
-      <str name="filter">memory.heap.max</str>
-      <str name="filter">memory.non-heap.used</str>
-      <str name="filter">memory.non-heap.max</str>
-      <str name="filter">memory.pools.CMS-Old-Gen.used</str>
-      <str name="filter">memory.pools.CMS-Old-Gen.max</str>
-      <str name="filter">gc.ConcurrentMarkSweep.count</str>
-      <str name="filter">gc.ConcurrentMarkSweep.time</str>
-      <str name="filter">gc.ParNew.count</str>
-      <str name="filter">gc.ParNew.time</str>
-      <str name="filter">memory.pools.Metaspace.used</str>
-      <str name="filter">memory.pools.Metaspace.max</str>
-      <str name="filter">memory.pools.Par-Eden-Space.used</str>
-      <str name="filter">memory.pools.Par-Eden-Space.max</str>
-      <str name="filter">memory.pools.Par-Survivor-Space.used</str>
-      <str name="filter">memory.pools.Par-Survivor-Space.max</str>
-      <str name="filter">gc.G1-Old-Generation.count</str>
-      <str name="filter">gc.G1-Old-Generation.time</str>
-      <str name="filter">gc.G1-Young-Generation.count</str>
-      <str name="filter">gc.G1-Young-Generation.time</str>
-      <str name="filter">memory.pools.G1-Eden-Space.used</str>
-      <str name="filter">memory.pools.G1-Eden-Space.max</str>
-      <str name="filter">memory.pools.G1-Survivor-Space.used</str>
... 690416 lines suppressed ...