You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by sw...@apache.org on 2016/10/20 19:06:02 UTC

[1/3] ambari git commit: Integrate Druid with Ambari (Nishant Bangarwa, Slim Bouguerra via Swapan Shridhar).

Repository: ambari
Updated Branches:
  refs/heads/trunk 563b41def -> 685e926db


http://git-wip-us.apache.org/repos/asf/ambari/blob/685e926d/ambari-server/src/test/python/stacks/2.6/configs/default.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.6/configs/default.json b/ambari-server/src/test/python/stacks/2.6/configs/default.json
new file mode 100644
index 0000000..8b90e48
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.6/configs/default.json
@@ -0,0 +1,552 @@
+{
+  "roleCommand": "SERVICE_CHECK",
+  "clusterName": "c1",
+  "hostname": "c6401.ambari.apache.org",
+  "hostLevelParams": {
+    "not_managed_hdfs_path_list": "[\"/apps/hive/warehouse\",\"/apps/falcon\",\"/mr-history/done\",\"/app-logs\",\"/tmp\"]",
+    "agent_stack_retry_count": "5",
+    "agent_stack_retry_on_unavailability": "false",
+    "jdk_location": "http://c6401.ambari.apache.org:8080/resources/",
+    "ambari_db_rca_password": "mapred",
+    "ambari_db_rca_url": "jdbc:postgresql://c6401.ambari.apache.org/ambarirca",
+    "jce_name": "UnlimitedJCEPolicyJDK7.zip",
+    "stack_version": "2.5",
+    "stack_name": "HDP",
+    "ambari_db_rca_driver": "org.postgresql.Driver",
+    "jdk_name": "jdk-7u67-linux-x64.tar.gz",
+    "ambari_db_rca_username": "mapred",
+    "java_home": "/usr/jdk64/jdk1.7.0_45",
+    "db_name": "ambari"
+  },
+  "commandType": "EXECUTION_COMMAND",
+  "roleParams": {},
+  "serviceName": "SLIDER",
+  "role": "SLIDER",
+  "commandParams": {
+    "version": "2.5.0.0-1235",
+    "command_timeout": "300",
+    "service_package_folder": "OOZIE",
+    "script_type": "PYTHON",
+    "script": "scripts/service_check.py",
+    "excluded_hosts": "host1,host2"
+  },
+  "taskId": 152,
+  "public_hostname": "c6401.ambari.apache.org",
+  "configurations": {
+    "slider-client": {
+      "slider.yarn.queue": "default"
+    },
+    "sqoop-site": {
+      "atlas.cluster.name": "c1",
+      "sqoop.job.data.publish.class": "org.apache.atlas.sqoop.hook.SqoopHook"
+    },
+    "mahout-env": {
+      "mahout_user": "mahout"
+    },
+    "hbase-env": {
+      "hbase_user": "hbase"
+    },
+    "yarn-env": {
+      "yarn_user": "yarn"
+    },
+    "mahout-log4j": {
+      "content": "\n            #\n            #\n            # Licensed to the Apache Software Foundation (ASF) under one\n            # or more contributor license agreements.  See the NOTICE file\n            # distributed with this work for additional information\n            # regarding copyright ownership.  The ASF licenses this file\n            # to you under the Apache License, Version 2.0 (the\n            # \"License\"); you may not use this file except in compliance\n            # with the License.  You may obtain a copy of the License at\n            #\n            #   http://www.apache.org/licenses/LICENSE-2.0\n            #\n            # Unless required by applicable law or agreed to in writing,\n            # software distributed under the License is distributed on an\n            # \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n            # KIND, either express or implied.  See the License for the\n            # specific language governing permissions a
 nd limitations\n            # under the License.\n            #\n            #\n            #\n\n            # Set everything to be logged to the console\n            log4j.rootCategory=WARN, console\n            log4j.appender.console=org.apache.log4j.ConsoleAppender\n            log4j.appender.console.target=System.err\n            log4j.appender.console.layout=org.apache.log4j.PatternLayout\n            log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n\n\n            # Settings to quiet third party logs that are too verbose\n            log4j.logger.org.eclipse.jetty=WARN\n            log4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=WARN\n            log4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=WARN"
+    },
+    "hadoop-env": {
+      "hdfs_user": "hdfs"
+    },
+    "core-site": {
+      "fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020"
+    },
+    "hdfs-site": {
+      "a": "b"
+    },
+    "yarn-site": {
+      "yarn.application.classpath": "/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*",
+      "yarn.resourcemanager.address": "c6401.ambari.apache.org:8050",
+      "yarn.resourcemanager.scheduler.address": "c6401.ambari.apache.org:8030"
+    },
+    "cluster-env": {
+      "managed_hdfs_resource_property_names": "",
+      "security_enabled": "false",
+      "ignore_groupsusers_create": "false",
+      "smokeuser": "ambari-qa",
+      "kerberos_domain": "EXAMPLE.COM",
+      "user_group": "hadoop"
+    },
+    "webhcat-site": {
+      "templeton.jar": "/usr/hdp/current/hive-webhcat/share/webhcat/svr/lib/hive-webhcat-*.jar",
+      "templeton.pig.archive": "hdfs:///hdp/apps/{{ hdp_stack_version }}/pig/pig.tar.gz",
+      "templeton.hive.archive": "hdfs:///hdp/apps/{{ hdp_stack_version }}/hive/hive.tar.gz",
+      "templeton.sqoop.archive": "hdfs:///hdp/apps/{{ hdp_stack_version }}/sqoop/sqoop.tar.gz",
+      "templeton.streaming.jar": "hdfs:///hdp/apps/{{ hdp_stack_version }}/mr/hadoop-streaming.jar"
+    },
+    "slider-log4j": {
+      "content": "log4jproperties\nline2"
+    },
+    "slider-env": {
+      "content": "envproperties\nline2"
+    },
+    "gateway-site": {
+      "java.security.auth.login.config": "/etc/knox/conf/krb5JAASLogin.conf",
+      "gateway.hadoop.kerberos.secured": "false",
+      "gateway.gateway.conf.dir": "deployments",
+      "gateway.path": "gateway",
+      "sun.security.krb5.debug": "true",
+      "java.security.krb5.conf": "/etc/knox/conf/krb5.conf",
+      "gateway.port": "8443"
+    },
+
+    "users-ldif": {
+      "content": "\n            # Licensed to the Apache Software Foundation (ASF) under one\n            # or more contributor license agreements.  See the NOTICE file\n            # distributed with this work for additional information\n            # regarding copyright ownership.  The ASF licenses this file\n            # to you under the Apache License, Version 2.0 (the\n            # \"License\"); you may not use this file except in compliance\n            # with the License.  You may obtain a copy of the License at\n            #\n            #     http://www.apache.org/licenses/LICENSE-2.0\n            #\n            # Unless required by applicable law or agreed to in writing, software\n            # distributed under the License is distributed on an \"AS IS\" BASIS,\n            # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n            # See the License for the specific language governing permissions and\n            # limitations under the Lice
 nse.\n\n            version: 1\n\n            # Please replace with site specific values\n            dn: dc=hadoop,dc=apache,dc=org\n            objectclass: organization\n            objectclass: dcObject\n            o: Hadoop\n            dc: hadoop\n\n            # Entry for a sample people container\n            # Please replace with site specific values\n            dn: ou=people,dc=hadoop,dc=apache,dc=org\n            objectclass:top\n            objectclass:organizationalUnit\n            ou: people\n\n            # Entry for a sample end user\n            # Please replace with site specific values\n            dn: uid=guest,ou=people,dc=hadoop,dc=apache,dc=org\n            objectclass:top\n            objectclass:person\n            objectclass:organizationalPerson\n            objectclass:inetOrgPerson\n            cn: Guest\n            sn: User\n            uid: guest\n            userPassword:guest-password\n\n            # entry for sample user admin\n            dn: 
 uid=admin,ou=people,dc=hadoop,dc=apache,dc=org\n            objectclass:top\n            objectclass:person\n            objectclass:organizationalPerson\n            objectclass:inetOrgPerson\n            cn: Admin\n            sn: Admin\n            uid: admin\n            userPassword:admin-password\n\n            # entry for sample user sam\n            dn: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org\n            objectclass:top\n            objectclass:person\n            objectclass:organizationalPerson\n            objectclass:inetOrgPerson\n            cn: sam\n            sn: sam\n            uid: sam\n            userPassword:sam-password\n\n            # entry for sample user tom\n            dn: uid=tom,ou=people,dc=hadoop,dc=apache,dc=org\n            objectclass:top\n            objectclass:person\n            objectclass:organizationalPerson\n            objectclass:inetOrgPerson\n            cn: tom\n            sn: tom\n            uid: tom\n            userPasswor
 d:tom-password\n\n            # create FIRST Level groups branch\n            dn: ou=groups,dc=hadoop,dc=apache,dc=org\n            objectclass:top\n            objectclass:organizationalUnit\n            ou: groups\n            description: generic groups branch\n\n            # create the analyst group under groups\n            dn: cn=analyst,ou=groups,dc=hadoop,dc=apache,dc=org\n            objectclass:top\n            objectclass: groupofnames\n            cn: analyst\n            description:analyst  group\n            member: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org\n            member: uid=tom,ou=people,dc=hadoop,dc=apache,dc=org\n\n\n            # create the scientist group under groups\n            dn: cn=scientist,ou=groups,dc=hadoop,dc=apache,dc=org\n            objectclass:top\n            objectclass: groupofnames\n            cn: scientist\n            description: scientist group\n            member: uid=sam,ou=people,dc=hadoop,dc=apache,dc=org"
+    },
+
+    "topology": {
+      "content": "\n        <topology>\n\n            <gateway>\n\n                <provider>\n                    <role>authentication</role>\n                    <name>ShiroProvider</name>\n                    <enabled>true</enabled>\n                    <param>\n                        <name>sessionTimeout</name>\n                        <value>30</value>\n                    </param>\n                    <param>\n                        <name>main.ldapRealm</name>\n                        <value>org.apache.hadoop.gateway.shirorealm.KnoxLdapRealm</value>\n                    </param>\n                    <param>\n                        <name>main.ldapRealm.userDnTemplate</name>\n                        <value>uid={0},ou=people,dc=hadoop,dc=apache,dc=org</value>\n                    </param>\n                    <param>\n                        <name>main.ldapRealm.contextFactory.url</name>\n                        <value>ldap://{{knox_host_name}}:33389</value>\n                 
    </param>\n                    <param>\n                        <name>main.ldapRealm.contextFactory.authenticationMechanism</name>\n                        <value>simple</value>\n                    </param>\n                    <param>\n                        <name>urls./**</name>\n                        <value>authcBasic</value>\n                    </param>\n                </provider>\n\n                <provider>\n                    <role>identity-assertion</role>\n                    <name>Default</name>\n                    <enabled>true</enabled>\n                </provider>\n\n            </gateway>\n\n            <service>\n                <role>NAMENODE</role>\n                <url>hdfs://{{namenode_host}}:{{namenode_rpc_port}}</url>\n            </service>\n\n            <service>\n                <role>JOBTRACKER</role>\n                <url>rpc://{{rm_host}}:{{jt_rpc_port}}</url>\n            </service>\n\n            <service>\n                <role>WEBHDFS</role
 >\n                <url>http://{{namenode_host}}:{{namenode_http_port}}/webhdfs</url>\n            </service>\n\n            <service>\n                <role>WEBHCAT</role>\n                <url>http://{{webhcat_server_host}}:{{templeton_port}}/templeton</url>\n            </service>\n\n            <service>\n                <role>OOZIE</role>\n                <url>http://{{oozie_server_host}}:{{oozie_server_port}}/oozie</url>\n            </service>\n\n            <service>\n                <role>WEBHBASE</role>\n                <url>http://{{hbase_master_host}}:{{hbase_master_port}}</url>\n            </service>\n\n            <service>\n                <role>HIVE</role>\n                <url>http://{{hive_server_host}}:{{hive_http_port}}/{{hive_http_path}}</url>\n            </service>\n\n            <service>\n                <role>RESOURCEMANAGER</role>\n                <url>http://{{rm_host}}:{{rm_port}}/ws</url>\n            </service>\n        </topology>"
+    },
+
+    "ldap-log4j": {
+      "content": "\n        # Licensed to the Apache Software Foundation (ASF) under one\n        # or more contributor license agreements.  See the NOTICE file\n        # distributed with this work for additional information\n        # regarding copyright ownership.  The ASF licenses this file\n        # to you under the Apache License, Version 2.0 (the\n        # \"License\"); you may not use this file except in compliance\n        # with the License.  You may obtain a copy of the License at\n        #\n        #     http://www.apache.org/licenses/LICENSE-2.0\n        #\n        # Unless required by applicable law or agreed to in writing, software\n        # distributed under the License is distributed on an \"AS IS\" BASIS,\n        # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n        # See the License for the specific language governing permissions and\n        # limitations under the License.\n        #testing\n\n        app.log.dir=${launcher.dir
 }/../logs\n        app.log.file=${launcher.name}.log\n\n        log4j.rootLogger=ERROR, drfa\n        log4j.logger.org.apache.directory.server.ldap.LdapServer=INFO\n        log4j.logger.org.apache.directory=WARN\n\n        log4j.appender.stdout=org.apache.log4j.ConsoleAppender\n        log4j.appender.stdout.layout=org.apache.log4j.PatternLayout\n        log4j.appender.stdout.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n        log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender\n        log4j.appender.drfa.File=${app.log.dir}/${app.log.file}\n        log4j.appender.drfa.DatePattern=.yyyy-MM-dd\n        log4j.appender.drfa.layout=org.apache.log4j.PatternLayout\n        log4j.appender.drfa.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n"
+    },
+
+    "gateway-log4j": {
+      "content": "\n\n      # Licensed to the Apache Software Foundation (ASF) under one\n      # or more contributor license agreements. See the NOTICE file\n      # distributed with this work for additional information\n      # regarding copyright ownership. The ASF licenses this file\n      # to you under the Apache License, Version 2.0 (the\n      # \"License\"); you may not use this file except in compliance\n      # with the License. You may obtain a copy of the License at\n      #\n      # http://www.apache.org/licenses/LICENSE-2.0\n      #\n      # Unless required by applicable law or agreed to in writing, software\n      # distributed under the License is distributed on an \"AS IS\" BASIS,\n      # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n      # See the License for the specific language governing permissions and\n      # limitations under the License.\n\n      app.log.dir=${launcher.dir}/../logs\n      app.log.file=${launcher.name}.log\n   
    app.audit.file=${launcher.name}-audit.log\n\n      log4j.rootLogger=ERROR, drfa\n\n      log4j.logger.org.apache.hadoop.gateway=INFO\n      #log4j.logger.org.apache.hadoop.gateway=DEBUG\n\n      #log4j.logger.org.eclipse.jetty=DEBUG\n      #log4j.logger.org.apache.shiro=DEBUG\n      #log4j.logger.org.apache.http=DEBUG\n      #log4j.logger.org.apache.http.client=DEBUG\n      #log4j.logger.org.apache.http.headers=DEBUG\n      #log4j.logger.org.apache.http.wire=DEBUG\n\n      log4j.appender.stdout=org.apache.log4j.ConsoleAppender\n      log4j.appender.stdout.layout=org.apache.log4j.PatternLayout\n      log4j.appender.stdout.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n\n\n      log4j.appender.drfa=org.apache.log4j.DailyRollingFileAppender\n      log4j.appender.drfa.File=${app.log.dir}/${app.log.file}\n      log4j.appender.drfa.DatePattern=.yyyy-MM-dd\n      log4j.appender.drfa.layout=org.apache.log4j.PatternLayout\n      log4j.appender.drfa.layout.ConversionPattern=%
 d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n\n\n      log4j.logger.audit=INFO, auditfile\n      log4j.appender.auditfile=org.apache.log4j.DailyRollingFileAppender\n      log4j.appender.auditfile.File=${app.log.dir}/${app.audit.file}\n      log4j.appender.auditfile.Append = true\n      log4j.appender.auditfile.DatePattern = '.'yyyy-MM-dd\n      log4j.appender.auditfile.layout = org.apache.hadoop.gateway.audit.log4j.layout.AuditLayout"
+    },
+    "knox-env": {
+      "knox_master_secret": "sa",
+      "knox_group": "knox",
+      "knox_pid_dir": "/var/run/knox",
+      "knox_user": "knox"
+    },
+    "kafka-env": {
+      "content": "\n#!/bin/bash\n\n# Set KAFKA specific environment variables here.\n\n# The java implementation to use.\nexport JAVA_HOME={{java64_home}}\nexport PATH=$PATH:$JAVA_HOME/bin",
+      "kafka_user": "kafka",
+      "kafka_log_dir": "/var/log/kafka",
+      "kafka_pid_dir": "/var/run/kafka"
+    },
+    "kafka-log4j": {
+      "content": "\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements.  See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership.  The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License.  You may obtain a copy of the License at\n#\n#   http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied.  See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\nkafka.logs.dir=logs\n\nlog4j.rootLogger=INFO, stdout\n\nlog4j.appender.stdout=org.apache.log4j.ConsoleAppender\nlog4j.appender.stdout.layout=org.apache.log4j
 .PatternLayout\nlog4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n\n\nlog4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log\nlog4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n\n\nlog4j.appender.stateChangeAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.stateChangeAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log\nlog4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n\n\nlog4j.appender.requestAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.requestAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-reques
 t.log\nlog4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n\n\nlog4j.appender.cleanerAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.cleanerAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log\nlog4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n\n\nlog4j.appender.controllerAppender=org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.controllerAppender.DatePattern='.'yyyy-MM-dd-HH\nlog4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log\nlog4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout\nlog4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n\n\n# Turn on all our debugging info\n#log4j.logger.kafka.producer.async.DefaultEventHandler=DEBUG, kafkaAppender\n#log4j.logge
 r.kafka.client.ClientUtils=DEBUG, kafkaAppender\n#log4j.logger.kafka.perf=DEBUG, kafkaAppender\n#log4j.logger.kafka.perf.ProducerPerformance$ProducerThread=DEBUG, kafkaAppender\n#log4j.logger.org.I0Itec.zkclient.ZkClient=DEBUG\nlog4j.logger.kafka=INFO, kafkaAppender\nlog4j.logger.kafka.network.RequestChannel$=WARN, requestAppender\nlog4j.additivity.kafka.network.RequestChannel$=false\n\n#log4j.logger.kafka.network.Processor=TRACE, requestAppender\n#log4j.logger.kafka.server.KafkaApis=TRACE, requestAppender\n#log4j.additivity.kafka.server.KafkaApis=false\nlog4j.logger.kafka.request.logger=WARN, requestAppender\nlog4j.additivity.kafka.request.logger=false\n\nlog4j.logger.kafka.controller=TRACE, controllerAppender\nlog4j.additivity.kafka.controller=false\n\nlog4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender\nlog4j.additivity.kafka.log.LogCleaner=false\n\nlog4j.logger.state.change.logger=TRACE, stateChangeAppender\nlog4j.additivity.state.change.logger=false"
+    },
+    "kafka-broker": {
+      "log.segment.bytes": "1073741824",
+      "socket.send.buffer.bytes": "102400",
+      "num.network.threads": "3",
+      "log.flush.scheduler.interval.ms": "3000",
+      "kafka.ganglia.metrics.host": "localhost",
+      "zookeeper.session.timeout.ms": "6000",
+      "replica.lag.time.max.ms": "10000",
+      "num.io.threads": "8",
+      "kafka.ganglia.metrics.group": "kafka",
+      "replica.lag.max.messages": "4000",
+      "port": "6667",
+      "log.retention.bytes": "-1",
+      "fetch.purgatory.purge.interval.requests": "10000",
+      "producer.purgatory.purge.interval.requests": "10000",
+      "default.replication.factor": "1",
+      "replica.high.watermark.checkpoint.interval.ms": "5000",
+      "zookeeper.connect": "c6402.ambari.apache.org:2181",
+      "controlled.shutdown.retry.backoff.ms": "5000",
+      "num.partitions": "1",
+      "log.flush.interval.messages": "10000",
+      "replica.fetch.min.bytes": "1",
+      "queued.max.requests": "500",
+      "controlled.shutdown.max.retries": "3",
+      "replica.fetch.wait.max.ms": "500",
+      "controlled.shutdown.enable": "false",
+      "log.roll.hours": "168",
+      "log.cleanup.interval.mins": "10",
+      "replica.socket.receive.buffer.bytes": "65536",
+      "zookeeper.connection.timeout.ms": "6000",
+      "replica.fetch.max.bytes": "1048576",
+      "num.replica.fetchers": "1",
+      "socket.request.max.bytes": "104857600",
+      "message.max.bytes": "1000000",
+      "zookeeper.sync.time.ms": "2000",
+      "socket.receive.buffer.bytes": "102400",
+      "controller.message.queue.size": "10",
+      "log.flush.interval.ms": "3000",
+      "log.dirs": "/tmp/log/dir",
+      "controller.socket.timeout.ms": "30000",
+      "replica.socket.timeout.ms": "30000",
+      "auto.create.topics.enable": "true",
+      "log.index.size.max.bytes": "10485760",
+      "kafka.ganglia.metrics.port": "8649",
+      "log.index.interval.bytes": "4096",
+      "log.retention.hours": "168"
+    },
+    "spark-defaults": {
+      "spark.yarn.applicationMaster.waitTries": "10",
+      "spark.history.kerberos.keytab": "none",
+      "spark.yarn.preserve.staging.files": "false",
+      "spark.yarn.submit.file.replication": "3",
+      "spark.history.kerberos.principal": "none",
+      "spark.yarn.driver.memoryOverhead": "384",
+      "spark.yarn.queue": "default",
+      "spark.yarn.containerLauncherMaxThreads": "25",
+      "spark.yarn.scheduler.heartbeat.interval-ms": "5000",
+      "spark.history.ui.port": "18080",
+      "spark.yarn.max.executor.failures": "3",
+      "spark.driver.extraJavaOptions": "",
+      "spark.history.provider": "org.apache.spark.deploy.yarn.history.YarnHistoryProvider",
+      "spark.yarn.am.extraJavaOptions": "",
+      "spark.yarn.executor.memoryOverhead": "384"
+    },
+    "spark-javaopts-properties": {
+      "content": " "
+    },
+    "spark-log4j-properties": {
+      "content": "\n# Set everything to be logged to the console\nlog4j.rootCategory=INFO, console\nlog4j.appender.console=org.apache.log4j.ConsoleAppender\nlog4j.appender.console.target=System.err\nlog4j.appender.console.layout=org.apache.log4j.PatternLayout\nlog4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n\n\n# Settings to quiet third party logs that are too verbose\nlog4j.logger.org.eclipse.jetty=WARN\nlog4j.logger.org.eclipse.jetty.util.component.AbstractLifeCycle=ERROR\nlog4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=INFO\nlog4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO"
+    },
+    "spark-env": {
+      "content": "\n#!/usr/bin/env bash\n\n# This file is sourced when running various Spark programs.\n# Copy it as spark-env.sh and edit that to configure Spark for your site.\n\n# Options read in YARN client mode\n#SPARK_EXECUTOR_INSTANCES=\"2\" #Number of workers to start (Default: 2)\n#SPARK_EXECUTOR_CORES=\"1\" #Number of cores for the workers (Default: 1).\n#SPARK_EXECUTOR_MEMORY=\"1G\" #Memory per Worker (e.g. 1000M, 2G) (Default: 1G)\n#SPARK_DRIVER_MEMORY=\"512 Mb\" #Memory for Master (e.g. 1000M, 2G) (Default: 512 Mb)\n#SPARK_YARN_APP_NAME=\"spark\" #The name of your application (Default: Spark)\n#SPARK_YARN_QUEUE=\"~@~Xdefault~@~Y\" #The hadoop queue to use for allocation requests (Default: @~Xdefault~@~Y)\n#SPARK_YARN_DIST_FILES=\"\" #Comma separated list of files to be distributed with the job.\n#SPARK_YARN_DIST_ARCHIVES=\"\" #Comma separated list of archives to be distributed with the job.\n\n# Generic options for the daemons used in the standalone deploy mode\n\n# Alt
 ernate conf dir. (Default: ${SPARK_HOME}/conf)\nexport SPARK_CONF_DIR=${SPARK_HOME:-{{spark_home}}}/conf\n\n# Where log files are stored.(Default:${SPARK_HOME}/logs)\n#export SPARK_LOG_DIR=${SPARK_HOME:-{{spark_home}}}/logs\nexport SPARK_LOG_DIR={{spark_log_dir}}\n\n# Where the pid file is stored. (Default: /tmp)\nexport SPARK_PID_DIR={{spark_pid_dir}}\n\n# A string representing this instance of spark.(Default: $USER)\nSPARK_IDENT_STRING=$USER\n\n# The scheduling priority for daemons. (Default: 0)\nSPARK_NICENESS=0\n\nexport HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}\nexport HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}\n\n# The java implementation to use.\nexport JAVA_HOME={{java_home}}\n\nif [ -d \"/etc/tez/conf/\" ]; then\n  export TEZ_CONF_DIR=/etc/tez/conf\nelse\n  export TEZ_CONF_DIR=\nfi",
+      "spark_pid_dir": "/var/run/spark",
+      "spark_log_dir": "/var/log/spark",
+      "spark_group": "spark",
+      "spark_user": "spark"
+    },
+    "spark-metrics-properties": {
+      "content": "\n# syntax: [instance].sink|source.[name].[options]=[value]\n\n# This file configures Spark's internal metrics system. The metrics system is\n# divided into instances which correspond to internal components.\n# Each instance can be configured to report its metrics to one or more sinks.\n# Accepted values for [instance] are \"master\", \"worker\", \"executor\", \"driver\",\n# and \"applications\". A wild card \"*\" can be used as an instance name, in\n# which case all instances will inherit the supplied property.\n#\n# Within an instance, a \"source\" specifies a particular set of grouped metrics.\n# there are two kinds of sources:\n# 1. Spark internal sources, like MasterSource, WorkerSource, etc, which will\n# collect a Spark component's internal state. Each instance is paired with a\n# Spark source that is added automatically.\n# 2. Common sources, like JvmSource, which will collect low level state.\n# These can be added through configuration options and are then
  loaded\n# using reflection.\n#\n# A \"sink\" specifies where metrics are delivered to. Each instance can be\n# assigned one or more sinks.\n#\n# The sink|source field specifies whether the property relates to a sink or\n# source.\n#\n# The [name] field specifies the name of source or sink.\n#\n# The [options] field is the specific property of this source or sink. The\n# source or sink is responsible for parsing this property.\n#\n# Notes:\n# 1. To add a new sink, set the \"class\" option to a fully qualified class\n# name (see examples below).\n# 2. Some sinks involve a polling period. The minimum allowed polling period\n# is 1 second.\n# 3. Wild card properties can be overridden by more specific properties.\n# For example, master.sink.console.period takes precedence over\n# *.sink.console.period.\n# 4. A metrics specific configuration\n# \"spark.metrics.conf=${SPARK_HOME}/conf/metrics.properties\" should be\n# added to Java properties using -Dspark.metrics.conf=xxx if you want to\
 n# customize metrics system. You can also put the file in ${SPARK_HOME}/conf\n# and it will be loaded automatically.\n# 5. MetricsServlet is added by default as a sink in master, worker and client\n# driver, you can send http request \"/metrics/json\" to get a snapshot of all the\n# registered metrics in json format. For master, requests \"/metrics/master/json\" and\n# \"/metrics/applications/json\" can be sent seperately to get metrics snapshot of\n# instance master and applications. MetricsServlet may not be configured by self.\n#\n\n## List of available sinks and their properties.\n\n# org.apache.spark.metrics.sink.ConsoleSink\n# Name: Default: Description:\n# period 10 Poll period\n# unit seconds Units of poll period\n\n# org.apache.spark.metrics.sink.CSVSink\n# Name: Default: Description:\n# period 10 Poll period\n# unit seconds Units of poll period\n# directory /tmp Where to store CSV files\n\n# org.apache.spark.metrics.sink.GangliaSink\n# Name: Default: Description:\n# host N
 ONE Hostname or multicast group of Ganglia server\n# port NONE Port of Ganglia server(s)\n# period 10 Poll period\n# unit seconds Units of poll period\n# ttl 1 TTL of messages sent by Ganglia\n# mode multicast Ganglia network mode ('unicast' or 'multicast')\n\n# org.apache.spark.metrics.sink.JmxSink\n\n# org.apache.spark.metrics.sink.MetricsServlet\n# Name: Default: Description:\n# path VARIES* Path prefix from the web server root\n# sample false Whether to show entire set of samples for histograms ('false' or 'true')\n#\n# * Default path is /metrics/json for all instances except the master. The master has two paths:\n# /metrics/aplications/json # App information\n# /metrics/master/json # Master information\n\n# org.apache.spark.metrics.sink.GraphiteSink\n# Name: Default: Description:\n# host NONE Hostname of Graphite server\n# port NONE Port of Graphite server\n# period 10 Poll period\n# unit seconds Units of poll period\n# prefix EMPTY STRING Prefix to prepend to metric name\n\n##
  Examples\n# Enable JmxSink for all instances by class name\n#*.sink.jmx.class=org.apache.spark.metrics.sink.JmxSink\n\n# Enable ConsoleSink for all instances by class name\n#*.sink.console.class=org.apache.spark.metrics.sink.ConsoleSink\n\n# Polling period for ConsoleSink\n#*.sink.console.period=10\n\n#*.sink.console.unit=seconds\n\n# Master instance overlap polling period\n#master.sink.console.period=15\n\n#master.sink.console.unit=seconds\n\n# Enable CsvSink for all instances\n#*.sink.csv.class=org.apache.spark.metrics.sink.CsvSink\n\n# Polling period for CsvSink\n#*.sink.csv.period=1\n\n#*.sink.csv.unit=minutes\n\n# Polling directory for CsvSink\n#*.sink.csv.directory=/tmp/\n\n# Worker instance overlap polling period\n#worker.sink.csv.period=10\n\n#worker.sink.csv.unit=minutes\n\n# Enable jvm source for instance master, worker, driver and executor\n#master.source.jvm.class=org.apache.spark.metrics.source.JvmSource\n\n#worker.source.jvm.class=org.apache.spark.metrics.source.JvmSo
 urce\n\n#driver.source.jvm.class=org.apache.spark.metrics.source.JvmSource\n\n#executor.source.jvm.class=org.apache.spark.metrics.source.JvmSource"
+    },
+    "spark-metrics-properties": {
+      "content": "\n# syntax: [instance].sink|source.[name].[options]=[value]\n\n# This file configures Spark's internal metrics system. The metrics system is\n# divided into instances which correspond to internal components.\n# Each instance can be configured to report its metrics to one or more sinks.\n# Accepted values for [instance] are \"master\", \"worker\", \"executor\", \"driver\",\n# and \"applications\". A wild card \"*\" can be used as an instance name, in\n# which case all instances will inherit the supplied property.\n#\n# Within an instance, a \"source\" specifies a particular set of grouped metrics.\n# there are two kinds of sources:\n# 1. Spark internal sources, like MasterSource, WorkerSource, etc, which will\n# collect a Spark component's internal state. Each instance is paired with a\n# Spark source that is added automatically.\n# 2. Common sources, like JvmSource, which will collect low level state.\n# These can be added through configuration options and are then
  loaded\n# using reflection.\n#\n# A \"sink\" specifies where metrics are delivered to. Each instance can be\n# assigned one or more sinks.\n#\n# The sink|source field specifies whether the property relates to a sink or\n# source.\n#\n# The [name] field specifies the name of source or sink.\n#\n# The [options] field is the specific property of this source or sink. The\n# source or sink is responsible for parsing this property.\n#\n# Notes:\n# 1. To add a new sink, set the \"class\" option to a fully qualified class\n# name (see examples below).\n# 2. Some sinks involve a polling period. The minimum allowed polling period\n# is 1 second.\n# 3. Wild card properties can be overridden by more specific properties.\n# For example, master.sink.console.period takes precedence over\n# *.sink.console.period.\n# 4. A metrics specific configuration\n# \"spark.metrics.conf=${SPARK_HOME}/conf/metrics.properties\" should be\n# added to Java properties using -Dspark.metrics.conf=xxx if you want to\
 n# customize metrics system. You can also put the file in ${SPARK_HOME}/conf\n# and it will be loaded automatically.\n# 5. MetricsServlet is added by default as a sink in master, worker and client\n# driver, you can send http request \"/metrics/json\" to get a snapshot of all the\n# registered metrics in json format. For master, requests \"/metrics/master/json\" and\n# \"/metrics/applications/json\" can be sent seperately to get metrics snapshot of\n# instance master and applications. MetricsServlet may not be configured by self.\n#\n\n## List of available sinks and their properties.\n\n# org.apache.spark.metrics.sink.ConsoleSink\n# Name: Default: Description:\n# period 10 Poll period\n# unit seconds Units of poll period\n\n# org.apache.spark.metrics.sink.CSVSink\n# Name: Default: Description:\n# period 10 Poll period\n# unit seconds Units of poll period\n# directory /tmp Where to store CSV files\n\n# org.apache.spark.metrics.sink.GangliaSink\n# Name: Default: Description:\n# host N
 ONE Hostname or multicast group of Ganglia server\n# port NONE Port of Ganglia server(s)\n# period 10 Poll period\n# unit seconds Units of poll period\n# ttl 1 TTL of messages sent by Ganglia\n# mode multicast Ganglia network mode ('unicast' or 'multicast')\n\n# org.apache.spark.metrics.sink.JmxSink\n\n# org.apache.spark.metrics.sink.MetricsServlet\n# Name: Default: Description:\n# path VARIES* Path prefix from the web server root\n# sample false Whether to show entire set of samples for histograms ('false' or 'true')\n#\n# * Default path is /metrics/json for all instances except the master. The master has two paths:\n# /metrics/aplications/json # App information\n# /metrics/master/json # Master information\n\n# org.apache.spark.metrics.sink.GraphiteSink\n# Name: Default: Description:\n# host NONE Hostname of Graphite server\n# port NONE Port of Graphite server\n# period 10 Poll period\n# unit seconds Units of poll period\n# prefix EMPTY STRING Prefix to prepend to metric name\n\n##
  Examples\n# Enable JmxSink for all instances by class name\n#*.sink.jmx.class=org.apache.spark.metrics.sink.JmxSink\n\n# Enable ConsoleSink for all instances by class name\n#*.sink.console.class=org.apache.spark.metrics.sink.ConsoleSink\n\n# Polling period for ConsoleSink\n#*.sink.console.period=10\n\n#*.sink.console.unit=seconds\n\n# Master instance overlap polling period\n#master.sink.console.period=15\n\n#master.sink.console.unit=seconds\n\n# Enable CsvSink for all instances\n#*.sink.csv.class=org.apache.spark.metrics.sink.CsvSink\n\n# Polling period for CsvSink\n#*.sink.csv.period=1\n\n#*.sink.csv.unit=minutes\n\n# Polling directory for CsvSink\n#*.sink.csv.directory=/tmp/\n\n# Worker instance overlap polling period\n#worker.sink.csv.period=10\n\n#worker.sink.csv.unit=minutes\n\n# Enable jvm source for instance master, worker, driver and executor\n#master.source.jvm.class=org.apache.spark.metrics.source.JvmSource\n\n#worker.source.jvm.class=org.apache.spark.metrics.source.JvmSo
 urce\n\n#driver.source.jvm.class=org.apache.spark.metrics.source.JvmSource\n\n#executor.source.jvm.class=org.apache.spark.metrics.source.JvmSource"
+    },
+    "livy-log4j-properties": {
+      "content": "\n            # Set everything to be logged to the console\n            log4j.rootCategory=INFO, console\n            log4j.appender.console=org.apache.log4j.ConsoleAppender\n            log4j.appender.console.target=System.err\n            log4j.appender.console.layout=org.apache.log4j.PatternLayout\n            log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n\n\n            log4j.logger.org.eclipse.jetty=WARN"
+    },
+    "livy-conf": {
+      "livy.server.port": "8998",
+      "livy.server.csrf_protection.enabled": "true",
+      "livy.environment": "production",
+      "livy.impersonation.enabled": "true",
+      "livy.server.session.timeout": "3600000"
+    },
+    "livy-spark-blacklist": {
+      "content": "\n            #\n            # Configuration override / blacklist. Defines a list of properties that users are not allowed\n            # to override when starting Spark sessions.\n            #\n            # This file takes a list of property names (one per line). Empty lines and lines starting with \"#\"\n            # are ignored.\n            #"
+    },
+    "livy-env": {
+      "livy_group": "livy",
+      "spark_home": "/usr/hdp/current/spark-client",
+      "content": "\n            #!/usr/bin/env bash\n\n            # - SPARK_HOME      Spark which you would like to use in livy\n            # - HADOOP_CONF_DIR Directory containing the Hadoop / YARN configuration to use.\n            # - LIVY_LOG_DIR    Where log files are stored.  (Default: ${LIVY_HOME}/logs)\n            # - LIVY_PID_DIR    Where the pid file is stored. (Default: /tmp)\n            # - LIVY_SERVER_JAVA_OPTS  Java Opts for running livy server (You can set jvm related setting here, like jvm memory/gc algorithm and etc.)\n        export SPARK_HOME=/usr/hdp/current/spark-client\n        export HADOOP_CONF_DIR=/etc/hadoop/conf\n        export LIVY_LOG_DIR={{livy_log_dir}}\n        export LIVY_PID_DIR={{livy_pid_dir}}\n        export LIVY_SERVER_JAVA_OPTS=\"-Xmx2g\"",
+      "livy_pid_dir": "/var/run/livy",
+      "livy_log_dir": "/var/log/livy",
+      "livy_user": "livy"
+    },
+    "infra-solr-env": {
+      "infra_solr_znode": "/infra-solr",
+      "infra_solr_user": "solr",
+      "infra_solr_client_log_dir" :"/var/log/ambari-infra-solr-client"
+    },
+    "infra-solr-client-log4j" : {
+      "infra_solr_client_log_dir" : "/var/log/ambari-infra-solr-client",
+      "content" : "content"
+    },
+    "application-properties": {
+      "atlas.cluster.name" : "c2",
+      "atlas.rest.address": "http://c6401.ambari.apache.org:21000",
+      "atlas.graph.storage.backend": "berkeleyje",
+      "atlas.graph.storage.directory": "data/berkley",
+      "atlas.graph.index.search.backend": "solr5",
+      "atlas.graph.index.search.directory": "data/es",
+      "atlas.graph.index.search.elasticsearch.client-only": false,
+      "atlas.graph.index.search.elasticsearch.local-mode": true,
+      "atlas.lineage.hive.table.type.name": "Table",
+      "atlas.lineage.hive.column.type.name": "Column",
+      "atlas.lineage.hive.table.column.name": "columns",
+      "atlas.lineage.hive.process.type.name": "LoadProcess",
+      "atlas.lineage.hive.process.inputs.name": "inputTables",
+      "atlas.lineage.hive.process.outputs.name": "outputTables",
+      "atlas.enableTLS": false,
+      "atlas.authentication.method": "simple",
+      "atlas.authentication.principal": "atlas",
+      "atlas.authentication.keytab": "/etc/security/keytabs/atlas.service.keytab",
+      "atlas.http.authentication.enabled": false,
+      "atlas.http.authentication.type": "simple",
+      "atlas.http.authentication.kerberos.principal": "HTTP/_HOST@EXAMPLE.COM",
+      "atlas.http.authentication.kerberos.keytab": "/etc/security/keytabs/spnego.service.keytab",
+      "atlas.http.authentication.kerberos.name.rules": "DEFAULT",
+      "atlas.server.http.port" : "21000",
+      "atlas.notification.embedded" : false,
+      "atlas.kafka.bootstrap.servers" : "c6401.ambari.apache.org:6667",
+      "atlas.kafka.data" : "/usr/hdp/current/atlas-server/data/kafka",
+      "atlas.kafka.entities.group.id" : "entities",
+      "atlas.kafka.hook.group.id" : "atlas",
+      "atlas.kafka.zookeeper.connect" : "c6401.ambari.apache.org:2181"
+    },
+    "atlas-env": {
+      "content": "# The java implementation to use. If JAVA_HOME is not found we expect java and jar to be in path\nexport JAVA_HOME={{java64_home}}\n# any additional java opts you want to set. This will apply to both client and server operations\nexport METADATA_OPTS={{metadata_opts}}\n# metadata configuration directory\nexport METADATA_CONF={{conf_dir}}\n# Where log files are stored. Defatult is logs directory under the base install location\nexport METADATA_LOG_DIR={{log_dir}}\n# additional classpath entries\nexport METADATACPPATH={{metadata_classpath}}\n# data dir\nexport METADATA_DATA_DIR={{data_dir}}\n# Where do you want to expand the war file. By Default it is in /server/webapp dir under the base install dir.\nexport METADATA_EXPANDED_WEBAPP_DIR={{expanded_war_dir}}",
+      "metadata_user": "atlas",
+      "metadata_port": 21000,
+      "metadata_pid_dir": "/var/run/atlas",
+      "metadata_log_dir": "/var/log/atlas",
+      "metadata_data_dir": "/var/lib/atlas/data",
+      "metadata_expanded_war_dir": "/var/lib/atlas/server/webapp"
+    },
+    "atlas-log4j": {
+      "content": "<property><name>content</name><description>Custom log4j.properties</description><value></value></property>",
+      "atlas_log_level": "debug",
+      "audit_log_level": "OFF"
+    },
+    "atlas-solrconfig": {
+      "content": "<property><name>content</name><description>Custom solrconfig properties</description><value></value></property>"
+    },
+    "zeppelin-env": {
+      "zeppelin.server.kerberos.keytab": "", 
+      "shiro_ini_content": "\n[users]\n# List of users with their password allowed to access Zeppelin.\n# To use a different strategy (LDAP / Database / ...) check the shiro doc at http://shiro.apache.org/configuration.html#Configuration-INISections\n#admin = password1\n#user1 = password2, role1, role2\n#user2 = password3, role3\n#user3 = password4, role2\n\n# Sample LDAP configuration, for user Authentication, currently tested for single Realm\n[main]\n#ldapRealm = org.apache.shiro.realm.ldap.JndiLdapRealm\n#ldapRealm.userDnTemplate = uid={0},cn=users,cn=accounts,dc=hortonworks,dc=com\n#ldapRealm.contextFactory.url = ldap://ldaphost:389\n#ldapRealm.contextFactory.authenticationMechanism = SIMPLE\n#sessionManager = org.apache.shiro.web.session.mgt.DefaultWebSessionManager\n#securityManager.sessionManager = $sessionManager\n# 86,400,000 milliseconds = 24 hour\n#securityManager.sessionManager.globalSessionTimeout = 86400000\nshiro.loginUrl = /api/login\n\n[urls]\n# anon means the acce
 ss is anonymous.\n# authcBasic means Basic Auth Security\n# To enfore security, comment the line below and uncomment the next one\n/api/version = anon\n/** = anon\n#/** = authc", 
+      "zeppelin.spark.jar.dir": "/apps/zeppelin", 
+      "zeppelin.executor.mem": "512m", 
+      "zeppelin_pid_dir": "/var/run/zeppelin", 
+      "zeppelin.executor.instances": "2", 
+      "log4j_properties_content": "\nlog4j.rootLogger = INFO, dailyfile\nlog4j.appender.stdout = org.apache.log4j.ConsoleAppender\nlog4j.appender.stdout.layout = org.apache.log4j.PatternLayout\nlog4j.appender.stdout.layout.ConversionPattern=%5p [%d] ({%t} %F[%M]:%L) - %m%n\nlog4j.appender.dailyfile.DatePattern=.yyyy-MM-dd\nlog4j.appender.dailyfile.Threshold = INFO\nlog4j.appender.dailyfile = org.apache.log4j.DailyRollingFileAppender\nlog4j.appender.dailyfile.File = ${zeppelin.log.file}\nlog4j.appender.dailyfile.layout = org.apache.log4j.PatternLayout\nlog4j.appender.dailyfile.layout.ConversionPattern=%5p [%d] ({%t} %F[%M]:%L) - %m%n", 
+      "zeppelin.server.kerberos.principal": "", 
+      "zeppelin_user": "zeppelin", 
+      "zeppelin_env_content": "\n# Spark master url. eg. spark://master_addr:7077. Leave empty if you want to use local mode\nexport MASTER=yarn-client\nexport SPARK_YARN_JAR={{spark_jar}}\n\n\n# Where log files are stored.  PWD by default.\nexport ZEPPELIN_LOG_DIR={{zeppelin_log_dir}}\n\n# The pid files are stored. /tmp by default.\nexport ZEPPELIN_PID_DIR={{zeppelin_pid_dir}}\n\n\nexport JAVA_HOME={{java64_home}}\n\n# Additional jvm options. for example, export ZEPPELIN_JAVA_OPTS=\"-Dspark.executor.memory=8g -Dspark.cores.max=16\"\nexport ZEPPELIN_JAVA_OPTS=\"-Dhdp.version={{full_stack_version}} -Dspark.executor.memory={{executor_mem}} -Dspark.executor.instances={{executor_instances}} -Dspark.yarn.queue={{spark_queue}}\"\n\n\n# Zeppelin jvm mem options Default -Xmx1024m -XX:MaxPermSize=512m\n# export ZEPPELIN_MEM\n\n# zeppelin interpreter process jvm mem options. Defualt = ZEPPELIN_MEM\n# export ZEPPELIN_INTP_MEM\n\n# zeppelin interpreter process jvm options. Default = ZEPPELIN_JA
 VA_OPTS\n# export ZEPPELIN_INTP_JAVA_OPTS\n\n# Where notebook saved\n# export ZEPPELIN_NOTEBOOK_DIR\n\n# Id of notebook to be displayed in homescreen. ex) 2A94M5J1Z\n# export ZEPPELIN_NOTEBOOK_HOMESCREEN\n\n# hide homescreen notebook from list when this value set to \"true\". default \"false\"\n# export ZEPPELIN_NOTEBOOK_HOMESCREEN_HIDE\n\n# Bucket where notebook saved\n# export ZEPPELIN_NOTEBOOK_S3_BUCKET\n\n# User in bucket where notebook saved. For example bucket/user/notebook/2A94M5J1Z/note.json\n# export ZEPPELIN_NOTEBOOK_S3_USER\n\n# A string representing this instance of zeppelin. $USER by default\n# export ZEPPELIN_IDENT_STRING\n\n# The scheduling priority for daemons. Defaults to 0.\n# export ZEPPELIN_NICENESS\n\n\n#### Spark interpreter configuration ####\n\n## Use provided spark installation ##\n## defining SPARK_HOME makes Zeppelin run spark interpreter process using spark-submit\n##\n# (required) When it is defined, load it instead of Zeppelin embedded Spark libraries\n
 export SPARK_HOME={{spark_home}}\n\n# (optional) extra options to pass to spark submit. eg) \"--driver-memory 512M --executor-memory 1G\".\n# export SPARK_SUBMIT_OPTIONS\n\n## Use embedded spark binaries ##\n## without SPARK_HOME defined, Zeppelin still able to run spark interpreter process using embedded spark binaries.\n## however, it is not encouraged when you can define SPARK_HOME\n##\n# Options read in YARN client mode\n# yarn-site.xml is located in configuration directory in HADOOP_CONF_DIR.\nexport HADOOP_CONF_DIR=/etc/hadoop/conf\n\n# Pyspark (supported with Spark 1.2.1 and above)\n# To configure pyspark, you need to set spark distribution's path to 'spark.home' property in Interpreter setting screen in Zeppelin GUI\n# path to the python command. must be the same path on the driver(Zeppelin) and all workers.\n# export PYSPARK_PYTHON\n\nexport PYTHONPATH=\"${SPARK_HOME}/python:${SPARK_HOME}/python/lib/py4j-0.8.2.1-src.zip\"\nexport SPARK_YARN_USER_ENV=\"PYTHONPATH=${PYTHONPAT
 H}\"\n\n## Spark interpreter options ##\n##\n# Use HiveContext instead of SQLContext if set true. true by default.\n# export ZEPPELIN_SPARK_USEHIVECONTEXT\n\n# Execute multiple SQL concurrently if set true. false by default.\n# export ZEPPELIN_SPARK_CONCURRENTSQL\n\n# Max number of SparkSQL result to display. 1000 by default.\n# export ZEPPELIN_SPARK_MAXRESULT", 
+      "zeppelin_log_dir": "/var/log/zeppelin", 
+      "zeppelin_group": "zeppelin"
+    },
+"zeppelin-config": {
+            "zeppelin.server.port": "9995", 
+            "zeppelin.ssl.truststore.password": "change me", 
+            "zeppelin.interpreters": "org.apache.zeppelin.spark.SparkInterpreter,org.apache.zeppelin.spark.PySparkInterpreter,org.apache.zeppelin.spark.SparkSqlInterpreter,org.apache.zeppelin.spark.DepInterpreter,org.apache.zeppelin.markdown.Markdown,org.apache.zeppelin.angular.AngularInterpreter,org.apache.zeppelin.shell.ShellInterpreter,org.apache.zeppelin.jdbc.JDBCInterpreter,org.apache.zeppelin.phoenix.PhoenixInterpreter,org.apache.zeppelin.livy.LivySparkInterpreter,org.apache.zeppelin.livy.LivyPySparkInterpreter,org.apache.zeppelin.livy.LivySparkRInterpreter,org.apache.zeppelin.livy.LivySparkSQLInterpreter", 
+            "zeppelin.ssl.truststore.path": "conf/truststore", 
+            "zeppelin.notebook.dir": "notebook", 
+            "zeppelin.ssl.keystore.password": "change me", 
+            "zeppelin.ssl.keystore.path": "conf/keystore", 
+            "zeppelin.server.addr": "0.0.0.0", 
+            "zeppelin.ssl.client.auth": "false", 
+            "zeppelin.notebook.homescreen": " ", 
+            "zeppelin.interpreter.dir": "interpreter", 
+            "zeppelin.ssl.keystore.type": "JKS", 
+            "zeppelin.notebook.s3.user": "user", 
+            "zeppelin.ssl.key.manager.password": "change me", 
+            "zeppelin.anonymous.allowed": "true", 
+            "zeppelin.ssl.truststore.type": "JKS", 
+            "zeppelin.ssl": "false", 
+            "zeppelin.notebook.storage": "org.apache.zeppelin.notebook.repo.VFSNotebookRepo", 
+            "zeppelin.websocket.max.text.message.size": "1024000", 
+            "zeppelin.interpreter.connect.timeout": "30000", 
+            "zeppelin.notebook.s3.bucket": "zeppelin", 
+            "zeppelin.notebook.homescreen.hide": "false", 
+            "zeppelin.server.allowed.origins": "*"
+        },
+    "zoo.cfg": {
+      "clientPort": "2181"
+    },
+    "ranger-hbase-plugin-properties": {
+      "ranger-hbase-plugin-enabled":"yes"
+    },
+    "ranger-hive-plugin-properties": {
+      "ranger-hive-plugin-enabled":"yes"
+    },
+    "ranger-env": {
+      "xml_configurations_supported" : "true"
+    },
+    "tagsync-application-properties": {
+            "atlas.kafka.hook.group.id": "atlas",
+            "atlas.kafka.zookeeper.connect": "os-mv-31-dev-4.novalocal:2181",
+            "atlas.kafka.acks": "1",
+            "atlas.kafka.entities.group.id": "ranger_entities_consumer",
+            "atlas.kafka.data": "/usr/hdp/current/atlas-server/data/kafka",
+            "atlas.kafka.bootstrap.servers": "localhost:2181",
+            "atlas.notification.embedded": "false"
+    },
+    "ranger-tagsync-site": {
+            "ranger.tagsync.sink.impl.class": "org.apache.ranger.tagsync.sink.tagadmin.TagAdminRESTSink",
+            "ranger.tagsync.atlasrestsource.endpoint": "",
+            "ranger.tagsync.tagadmin.rest.ssl.config.file": "/usr/hdp/current/ranger-tagsync/conf/mytruststore.jks",
+            "ranger.tagsync.logdir": "/var/log/ranger/tagsync",
+            "ranger.tagsync.filesource.filename": "/usr/hdp/current/ranger-tagsync/conf/etc/ranger/data/tags.json",
+            "ranger.tagsync.enabled": "true",
+            "ranger.tagsync.tagadmin.rest.url": "{{ranger_external_url}}",
+            "ranger.tagsync.atlasrestsource.download.interval": "",
+            "ranger.tagsync.filesource.modtime.check.interval": "60000",
+            "ranger.tagsync.tagadmin.password": "rangertagsync",
+            "ranger.tagsync.source.impl.class": "file",
+            "ranger.tagsync.source.atlas.custom.resource.mappers": "",
+            "ranger.tagsync.tagadmin.alias": "tagsync.tagadmin",
+            "ranger.tagsync.tagadmin.keystore": "/usr/hdp/current/ranger-tagsync/conf/rangertagsync.jceks",
+            "ranger.tagsync.atlas.to.service.mapping": ""
+    },
+    "druid-env": {
+      "druid_log_dir" : "/var/log/druid",
+      "druid_pid_dir" : "/var/run/druid",
+      "content" : "#!/bin/bash\n # Set DRUID specific environment variables here.\n# The java implementation to use\nexport JAVA_HOME={{java8_home}}\nexport PATH=$PATH:$JAVA_HOME/bin\nexport DRUID_PID_DIR={{druid_pid_dir}}\nexport DRUID_LOG_DIR={{druid_log_dir}}\nexport DRUID_CONF_DIR={{druid_conf_dir}}\nexport DRUID_LIB_DIR={{druid_home}}/lib",
+      "druid.coordinator.jvm.heap.memory" : 1024,
+      "druid.coordinator.jvm.direct.memory": 2048,
+      "druid.coordinator.jvm.opts": "-Duser.timezone=UTC -Dfile.encoding=UTF-8",
+      "druid.broker.jvm.heap.memory" : 1024,
+      "druid.broker.jvm.direct.memory": 2048,
+      "druid.broker.jvm.opts": "-Duser.timezone=UTC -Dfile.encoding=UTF-8",
+      "druid.middlemanager.jvm.heap.memory" : 1024,
+      "druid.middlemanager.jvm.direct.memory": 2048,
+      "druid.middlemanager.jvm.opts": "-Duser.timezone=UTC -Dfile.encoding=UTF-8",
+      "druid.historical.jvm.heap.memory" : 1024,
+      "druid.historical.jvm.direct.memory": 2048,
+      "druid.historical.jvm.opts": "-Duser.timezone=UTC -Dfile.encoding=UTF-8",
+      "druid.overlord.jvm.heap.memory" : 1024,
+      "druid.overlord.jvm.direct.memory": 2048,
+      "druid.overlord.jvm.opts": "-Duser.timezone=UTC -Dfile.encoding=UTF-8",
+      "druid.router.jvm.heap.memory" : 1024,
+      "druid.router.jvm.direct.memory": 2048,
+      "druid.router.jvm.opts": "-Duser.timezone=UTC -Dfile.encoding=UTF-8",
+      "druid_user": "druid"
+    },
+    "druid-common" : {
+      "druid.metadata.storage.type" : "mysql",
+      "druid.storage.type" : "hdfs",
+      "druid.storage.storageDirectory" : "/user/druid/data",
+      "druid.indexer.logs.type": "hdfs",
+      "druid.indexer.logs.directory": "/user/druid/logs",
+      "druid.extensions.pullList": "[\"mysql-metadata-storage\"]",
+      "druid.extensions.loadList": "[\"mysql-metadata-storage\", \"druid-datasketches\"]"
+    },
+    "druid-historical" : {
+      "druid.segmentCache.infoDir" : "/apps/druid/segmentCache"
+    },
+    "druid-coordinator" : {
+      "druid.service" : "druid/coordinator"
+    },
+    "druid-overlord" : {
+      "druid.service" : "druid/overlord"
+    },
+    "druid-broker" : {
+      "druid.service" : "druid/broker"
+    },
+    "druid-middlemanager" : {
+      "druid.service" : "druid/middlemanager",
+      "druid.indexer.task.hadoopWorkingPath" : "/tmp/druid-indexing"
+    },
+    "druid-router" : {
+      "druid.service" : "druid/router"
+    },
+    "druid-log4j" : {
+      "content" : "<![CDATA[<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n<!--\n  ~ Licensed to the Apache Software Foundation (ASF) under one\n  ~ or more contributor license agreements.  See the NOTICE file\n  ~ distributed with this work for additional information\n  ~ regarding copyright ownership.  The ASF licenses this file\n  ~ to you under the Apache License, Version 2.0 (the\n  ~ \"License\"); you may not use this file except in compliance\n  ~ with the License.  You may obtain a copy of the License at\n  ~\n  ~     http://www.apache.org/licenses/LICENSE-2.0\n  ~\n  ~ Unless required by applicable law or agreed to in writing, software\n  ~ distributed under the License is distributed on an \"AS IS\" BASIS,\n  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n  ~ See the License for the specific language governing permissions and\n  ~ limitations under the License.\n  -->\n    <Configuration>\n        <Appenders>\n            <Console name=\"Console\
 " target=\"SYSTEM_OUT\">\n                <PatternLayout pattern=\"%d{ISO8601} %p [%t] %c - %m%n\"/>\n            </Console>\n        </Appenders>\n        <Loggers>\n            <Logger name=\"com.metamx\" level=\"{{metamx_log_level}}\"/>\n            <Logger name=\"io.druid\" level=\"{{druid_log_level}}\"/>\n            <Root level=\"{{root_log_level}}\">\n                <AppenderRef ref=\"Console\"/>\n            </Root>\n        </Loggers>\n    </Configuration>\n      ]]>\n"
+    },
+    "druid-logrotate" : {
+      "content" : "<![CDATA[\n    {{druid_log_dir}}/*.log {\n        copytruncate\n        rotate 7\n        daily\n        nocompress\n        missingok\n        notifempty\n        create 660 druid users\n        dateext\n        dateformat -%Y-%m-%d-%s\n        }\n      ]]>\n"
+    }
+  },
+  "configuration_attributes": {
+    "sqoop-site": {},
+    "yarn-site": {
+      "final": {
+        "yarn.nodemanager.disk-health-checker.min-healthy-disks": "true",
+        "yarn.nodemanager.container-executor.class": "true",
+        "yarn.nodemanager.local-dirs": "true"
+      }
+    },
+    "yarn-site": {
+      "final": {
+        "is_supported_yarn_ranger": "true"
+      }
+    },
+    "hdfs-site": {
+      "final": {
+        "dfs.web.ugi": "true",
+        "dfs.support.append": "true",
+        "dfs.cluster.administrators": "true"
+      }
+    },
+    "core-site": {
+      "final": {
+        "hadoop.proxyuser.hive.groups": "true",
+        "webinterface.private.actions": "true",
+        "hadoop.proxyuser.oozie.hosts": "true"
+      }
+    },
+    "knox-env": {},
+    "gateway-site": {},
+    "users-ldif": {},
+    "kafka-env": {},
+    "kafka-log4j": {},
+    "kafka-broker": {},
+    "metadata-env": {},
+    "atlas-hbase-site": {},
+    "tagsync-application-properties": {},
+    "ranger-tagsync-site": {}
+  },
+  "configurationTags": {
+    "slider-client": {
+      "tag": "version1"
+    },
+    "slider-log4j": {
+      "tag": "version1"
+    },
+    "slider-env": {
+      "tag": "version1"
+    },
+    "core-site": {
+      "tag": "version1"
+    },
+    "hdfs-site": {
+      "tag": "version1"
+    },
+    "yarn-site": {
+      "tag": "version1"
+    },
+    "gateway-site": {
+      "tag": "version1"
+    },
+    "topology": {
+      "tag": "version1"
+    },
+    "users-ldif": {
+      "tag": "version1"
+    },
+    "kafka-env": {
+      "tag": "version1"
+    },
+    "kafka-log4j": {
+      "tag": "version1"
+    },
+    "kafka-broker": {
+      "tag": "version1"
+    },
+    "metadata-env": {
+      "tag": "version1"
+    },
+    "tagsync-application-properties": {
+      "tag": "version1"
+    },
+    "ranger-tagsync-site": {
+      "tag": "version1"
+    }
+  },
+  "commandId": "7-1",
+  "clusterHostInfo": {
+    "ambari_server_host": [
+      "c6401.ambari.apache.org"
+    ],
+    "all_ping_ports": [
+      "8670",
+      "8670"
+    ],
+    "rm_host": [
+      "c6402.ambari.apache.org"
+    ],
+    "all_hosts": [
+      "c6401.ambari.apache.org",
+      "c6402.ambari.apache.org"
+    ],
+    "knox_gateway_hosts": [
+      "jaimin-knox-1.c.pramod-thangali.internal"
+    ],
+    "kafka_broker_hosts": [
+      "c6401.ambari.apache.org"
+    ],
+    "infra_solr_hosts": [
+      "c6401.ambari.apache.org"
+    ],
+    "zookeeper_hosts": [
+      "c6401.ambari.apache.org"
+    ],
+    "ranger_tagsync_hosts": [
+      "c6401.ambari.apache.org"
+    ],
+    "atlas_server_hosts": [
+      "c6401.ambari.apache.org"
+    ],
+    "zeppelin_master_hosts": [
+      "c6401.ambari.apache.org"
+    ]
+  }
+}


[2/3] ambari git commit: Integrate Druid with Ambari (Nishant Bangarwa, Slim Bouguerra via Swapan Shridhar).

Posted by sw...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/685e926d/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/status_params.py
new file mode 100644
index 0000000..ee1d61c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/status_params.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.libraries.script.script import Script
+
+config = Script.get_config()
+
+druid_pid_dir = config['configurations']['druid-env']['druid_pid_dir']

http://git-wip-us.apache.org/repos/asf/ambari/blob/685e926d/ambari-server/src/main/resources/common-services/DRUID/0.9.2/quicklinks/quicklinks.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/quicklinks/quicklinks.json b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/quicklinks/quicklinks.json
new file mode 100644
index 0000000..c68b9b9
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/quicklinks/quicklinks.json
@@ -0,0 +1,37 @@
+{
+  "name": "default",
+  "description": "default quick links configuration",
+  "configuration": {
+    "protocol": {
+      "type": "HTTP_ONLY"
+    },
+    "links": [
+      {
+        "name": "coordinator_console",
+        "label": "Druid Coordinator Console",
+        "component_name": "DRUID_COORDINATOR",
+        "requires_user_name": "false",
+        "url": "%@://%@:%@",
+        "port": {
+          "http_property": "druid.port",
+          "http_default_port": "8081",
+          "regex": "^(\\d+)$",
+          "site": "druid-coordinator"
+        }
+      },
+      {
+        "name": "overlord_console",
+        "label": "Druid Overlord Console",
+        "component_name": "DRUID_OVERLORD",
+        "requires_user_name": "false",
+        "url": "%@://%@:%@",
+        "port": {
+          "http_property": "druid.port",
+          "http_default_port": "8090",
+          "regex": "^(\\d+)$",
+          "site": "druid-overlord"
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/685e926d/ambari-server/src/main/resources/common-services/DRUID/0.9.2/themes/theme.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/themes/theme.json b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/themes/theme.json
new file mode 100644
index 0000000..f494594
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/themes/theme.json
@@ -0,0 +1,120 @@
+{
+  "name": "default",
+  "description": "Default theme for Druid service",
+  "configuration": {
+    "layouts": [
+      {
+        "name": "default",
+        "tabs": [
+          {
+            "name": "metadata_storage",
+            "display-name": "META DATA STORAGE CONFIG",
+            "layout": {
+              "tab-columns": "1",
+              "tab-rows": "1",
+              "sections": [
+                {
+                  "name": "section-metadata-storage",
+                  "display-name": "",
+                  "row-index": "0",
+                  "column-index": "0",
+                  "row-span": "2",
+                  "column-span": "1",
+                  "section-columns": "1",
+                  "section-rows": "1",
+                  "subsections": [
+                    {
+                      "name": "subsection-metadata-storage-row1-col1",
+                      "display-name": "META DATA STORAGE",
+                      "row-index": "0",
+                      "column-index": "0",
+                      "row-span": "1",
+                      "column-span": "1"
+                    }
+                  ]
+                }
+              ]
+            }
+          }
+        ]
+      }
+    ],
+    "placement": {
+      "configuration-layout": "default",
+      "configs": [
+        {
+          "config": "druid-common/database_name",
+          "subsection-name": "subsection-metadata-storage-row1-col1"
+        },
+        {
+          "config": "druid-common/druid.metadata.storage.type",
+          "subsection-name": "subsection-metadata-storage-row1-col1"
+        },
+        {
+          "config": "druid-common/druid.metadata.storage.connector.user",
+          "subsection-name": "subsection-metadata-storage-row1-col1"
+        },
+        {
+          "config": "druid-common/druid.metadata.storage.connector.password",
+          "subsection-name": "subsection-metadata-storage-row1-col1"
+        },
+        {
+          "config": "druid-common/metastore_hostname",
+          "subsection-name": "subsection-metadata-storage-row1-col1"
+        },
+        {
+          "config": "druid-common/druid.metadata.storage.connector.port",
+          "subsection-name": "subsection-metadata-storage-row1-col1"
+        },
+        {
+          "config": "druid-common/druid.metadata.storage.connector.connectURI",
+          "subsection-name": "subsection-metadata-storage-row1-col1"
+        }
+      ]
+    },
+    "widgets": [
+      {
+        "config": "druid-common/database_name",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "druid-common/druid.metadata.storage.type",
+        "widget": {
+          "type": "combo"
+        }
+      },
+      {
+        "config": "druid-common/druid.metadata.storage.connector.user",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "druid-common/druid.metadata.storage.connector.password",
+        "widget": {
+          "type": "password"
+        }
+      },
+      {
+        "config": "druid-common/metastore_hostname",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "druid-common/druid.metadata.storage.connector.port",
+        "widget": {
+          "type": "text-field"
+        }
+      },
+      {
+        "config": "druid-common/druid.metadata.storage.connector.connectURI",
+        "widget": {
+          "type": "text-field"
+        }
+      }
+    ]
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/685e926d/ambari-server/src/main/resources/stacks/HDP/2.6/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/role_command_order.json b/ambari-server/src/main/resources/stacks/HDP/2.6/role_command_order.json
new file mode 100644
index 0000000..66b6ed2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/role_command_order.json
@@ -0,0 +1,19 @@
+{
+  "_comment" : "Record format:",
+  "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
+  "general_deps" : {
+    "_comment" : "dependencies for all cases",
+    "DRUID_HISTORICAL-START" : ["ZOOKEEPER_SERVER-START", "NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START"],
+    "DRUID_OVERLORD-START" : ["ZOOKEEPER_SERVER-START", "NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START"],
+    "DRUID_MIDDLEMANAGER-START" : ["ZOOKEEPER_SERVER-START", "NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START"],
+    "DRUID_BROKER-START" : ["ZOOKEEPER_SERVER-START", "NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START"],
+    "DRUID_ROUTER-START" : ["ZOOKEEPER_SERVER-START", "NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START"],
+    "DRUID_COORDINATOR-START" : ["ZOOKEEPER_SERVER-START", "NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START"],
+    "DRUID_OVERLORD-RESTART" : ["DRUID_HISTORICAL-RESTART"],
+    "DRUID_MIDDLEMANAGER-RESTART" : ["DRUID_OVERLORD-RESTART"],
+    "DRUID_BROKER-RESTART" : ["DRUID_MIDDLEMANAGER-RESTART"],
+    "DRUID_ROUTER-RESTART" : ["DRUID_BROKER-RESTART"],
+    "DRUID_COORDINATOR-RESTART" : ["DRUID_ROUTER-RESTART"],
+    "DRUID_SERVICE_CHECK-SERVICE_CHECK" : ["DRUID_HISTORICAL-START", "DRUID_COORDINATOR-START", "DRUID_OVERLORD-START", "DRUID_MIDDLEMANAGER-START", "DRUID_BROKER-START", "DRUID_ROUTER-START"]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/685e926d/ambari-server/src/main/resources/stacks/HDP/2.6/services/DRUID/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/DRUID/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.6/services/DRUID/kerberos.json
new file mode 100644
index 0000000..1661285
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/DRUID/kerberos.json
@@ -0,0 +1,78 @@
+{
+  "services": [
+    {
+      "name": "DRUID",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "druid",
+          "principal": {
+            "value": "${druid-env/druid_user}@${realm}",
+            "type": "user",
+            "configuration": "druid-common/druid.hadoop.security.kerberos.principal",
+            "local_username": "${druid-env/druid_user}"
+          },
+          "keytab": {
+            "file": "${keytab_dir}/druid.headless.keytab",
+            "owner": {
+              "name": "${druid-env/druid_user}",
+              "access": "r"
+            },
+            "group": {
+              "name": "${cluster-env/user_group}",
+              "access": "r"
+            },
+            "configuration": "druid-common/druid.hadoop.security.kerberos.keytab"
+          }
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "components": [
+        {
+          "name": "DRUID_HISTORICAL",
+          "identities": [
+            {
+              "name": "/druid"
+            }
+          ]
+        },
+        {
+          "name": "DRUID_BROKER",
+          "identities": [
+            {
+              "name": "/druid"
+            }
+          ]
+        },
+        {
+          "name": "DRUID_OVERLORD",
+          "identities": [
+            {
+              "name": "/druid"
+            }
+          ]
+        },
+        {
+          "name": "DRUID_COORDINATOR",
+          "identities": [
+            {
+              "name": "/druid"
+            }
+          ]
+        },
+        {
+          "name": "DRUID_MIDDLEMANAGER",
+          "identities": [
+            {
+              "name": "/druid"
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/685e926d/ambari-server/src/main/resources/stacks/HDP/2.6/services/DRUID/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/DRUID/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/DRUID/metainfo.xml
new file mode 100644
index 0000000..49d09b1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/DRUID/metainfo.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>DRUID</name>
+      <version>0.9.2.2.6</version>
+      <extends>common-services/DRUID/0.9.2</extends>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/685e926d/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
index 1f722dc..49dd086 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/stack_advisor.py
@@ -16,6 +16,191 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
 """
+from resource_management.core.logger import Logger
+import json
+from resource_management.libraries.functions import format
+
 
 class HDP26StackAdvisor(HDP25StackAdvisor):
-  pass
\ No newline at end of file
+    def __init__(self):
+        super(HDP26StackAdvisor, self).__init__()
+        Logger.initialize_logger()
+
+    def getServiceConfigurationRecommenderDict(self):
+        parentRecommendConfDict = super(HDP26StackAdvisor, self).getServiceConfigurationRecommenderDict()
+        childRecommendConfDict = {
+            "DRUID": self.recommendDruidConfigurations
+        }
+        parentRecommendConfDict.update(childRecommendConfDict)
+        return parentRecommendConfDict
+
+    def recommendDruidConfigurations(self, configurations, clusterData, services, hosts):
+
+        componentsListList = [service["components"] for service in services["services"]]
+        componentsList = [item["StackServiceComponents"] for sublist in componentsListList for item in sublist]
+        servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+        putCommonProperty = self.putProperty(configurations, "druid-common", services)
+
+        putCommonProperty('druid.zk.service.host', self.getZKHostPortString(services))
+        self.recommendDruidMaxMemoryLimitConfigurations(configurations, clusterData, services, hosts)
+
+        # recommending the metadata storage uri
+        database_name = services['configurations']["druid-common"]["properties"]["database_name"]
+        metastore_hostname = services['configurations']["druid-common"]["properties"]["metastore_hostname"]
+        database_type = services['configurations']["druid-common"]["properties"]["druid.metadata.storage.type"]
+        metadata_storage_port = "1527"
+        mysql_extension_name = "io.druid.extensions:mysql-metadata-storage"
+        mysql_module_name = "mysql-metadata-storage"
+        postgres_module_name = "postgresql-metadata-storage"
+        extensions_load_list = services['configurations']['druid-common']['properties']['druid.extensions.loadList']
+        extensions_pull_list = services['configurations']['druid-common']['properties']['druid.extensions.pullList']
+        putDruidCommonProperty = self.putProperty(configurations, "druid-common", services)
+
+        extensions_pull_list = self.removeFromList(extensions_pull_list, mysql_extension_name)
+        extensions_load_list = self.removeFromList(extensions_load_list, mysql_module_name)
+        extensions_load_list = self.removeFromList(extensions_load_list, postgres_module_name)
+
+        if database_type == 'mysql':
+            metadata_storage_port = "3306"
+            extensions_pull_list = self.addToList(extensions_pull_list, mysql_extension_name)
+            extensions_load_list = self.addToList(extensions_load_list, mysql_module_name)
+
+        if database_type == 'postgres':
+            extensions_load_list = self.addToList(extensions_load_list, postgres_module_name)
+            metadata_storage_port = "5432"
+
+        putDruidCommonProperty('druid.metadata.storage.connector.port', metadata_storage_port)
+        putDruidCommonProperty('druid.metadata.storage.connector.connectURI',
+                               self.getMetadataConnectionString(database_type).format(metastore_hostname, database_name,
+                                                                                      metadata_storage_port))
+        # HDFS is installed
+        if "HDFS" in servicesList and "hdfs-site" in services["configurations"]:
+            # recommend HDFS as default deep storage
+            extensions_load_list = self.addToList(extensions_load_list, "druid-hdfs-storage")
+            putCommonProperty("druid.storage.type", "hdfs")
+            putCommonProperty("druid.storage.storageDirectory", "/user/druid/data")
+            # configure indexer logs configs
+            putCommonProperty("druid.indexer.logs.type", "hdfs")
+            putCommonProperty("druid.indexer.logs.directory", "/user/druid/logs")
+
+        if "KAFKA" in servicesList:
+            extensions_load_list = self.addToList(extensions_load_list, "druid-kafka-indexing-service")
+
+        putCommonProperty('druid.extensions.loadList', extensions_load_list)
+        putCommonProperty('druid.extensions.pullList', extensions_pull_list)
+
+        # JVM Configs go to env properties
+        putEnvProperty = self.putProperty(configurations, "druid-env", services)
+
+        # processing thread pool Config
+        for component in ['DRUID_HISTORICAL', 'DRUID_BROKER']:
+            component_hosts = self.getHostsWithComponent("DRUID", component, services, hosts)
+            nodeType = self.DRUID_COMPONENT_NODE_TYPE_MAP[component]
+            putComponentProperty = self.putProperty(configurations, format("druid-{nodeType}"), services)
+            if (component_hosts is not None and len(component_hosts) > 0):
+                totalAvailableCpu = self.getMinCpu(component_hosts)
+                processingThreads = 1
+                if totalAvailableCpu > 1:
+                    processingThreads = totalAvailableCpu - 1
+                putComponentProperty('druid.processing.numThreads', processingThreads)
+                putComponentProperty('druid.server.http.numThreads', max(10, (totalAvailableCpu * 17) / 16 + 2) + 30)
+
+    def getMetadataConnectionString(self, database_type):
+        driverDict = {
+            'mysql': 'jdbc:mysql://{0}:{2}/{1}?createDatabaseIfNotExist=true',
+            'derby': 'jdbc:derby://{0}:{2}/{1};create=true',
+            'postgres': 'jdbc:postgresql://{0}:{2}/{1}'
+        }
+        return driverDict.get(database_type.lower())
+
+    def addToList(self, json_list, word):
+        desr_list = json.loads(json_list)
+        if word not in desr_list:
+            desr_list.append(word)
+        return json.dumps(desr_list)
+
+    def removeFromList(self, json_list, word):
+        desr_list = json.loads(json_list)
+        if word in desr_list:
+            desr_list.remove(word)
+        return json.dumps(desr_list)
+
+    def recommendDruidMaxMemoryLimitConfigurations(self, configurations, clusterData, services, hosts):
+        putEnvPropertyAttribute = self.putPropertyAttribute(configurations, "druid-env")
+        for component in ["DRUID_HISTORICAL", "DRUID_MIDDLEMANAGER", "DRUID_BROKER", "DRUID_OVERLORD",
+                          "DRUID_COORDINATOR"]:
+            component_hosts = self.getHostsWithComponent("DRUID", component, services, hosts)
+            if component_hosts is not None and len(component_hosts) > 0:
+                totalAvailableMem = self.getMinMemory(component_hosts) / 1024  # In MB
+                nodeType = self.DRUID_COMPONENT_NODE_TYPE_MAP[component]
+                putEnvPropertyAttribute(format('druid.{nodeType}.jvm.heap.memory'), 'maximum',
+                                        max(totalAvailableMem, 1024))
+
+    DRUID_COMPONENT_NODE_TYPE_MAP = {
+        'DRUID_BROKER': 'broker',
+        'DRUID_COORDINATOR': 'coordinator',
+        'DRUID_HISTORICAL': 'historical',
+        'DRUID_MIDDLEMANAGER': 'middlemanager',
+        'DRUID_OVERLORD': 'overlord',
+        'DRUID_ROUTER': 'router'
+    }
+
+    def getMinMemory(self, component_hosts):
+        min_ram_kb = 1073741824  # 1 TB
+        for host in component_hosts:
+            ram_kb = host['Hosts']['total_mem']
+            min_ram_kb = min(min_ram_kb, ram_kb)
+        return min_ram_kb
+
+    def getMinCpu(self, component_hosts):
+        min_cpu = 256
+        for host in component_hosts:
+            cpu_count = host['Hosts']['cpu_count']
+            min_cpu = min(min_cpu, cpu_count)
+        return min_cpu
+
+    def getServiceConfigurationValidators(self):
+        parentValidators = super(HDP26StackAdvisor, self).getServiceConfigurationValidators()
+        childValidators = {
+            "DRUID": {"druid-env": self.validateDruidEnvConfigurations,
+                      "druid-historical": self.validateDruidHistoricalConfigurations,
+                      "druid-broker": self.validateDruidBrokerConfigurations}
+        }
+        self.mergeValidators(parentValidators, childValidators)
+        return parentValidators
+
+    def validateDruidEnvConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+        validationItems = []
+        #  Minimum Direct memory Validation
+        envProperties = services['configurations']['druid-env']['properties']
+        for nodeType in ['broker', 'historical']:
+            properties = services['configurations'][format('druid-{nodeType}')]['properties']
+            intermediateBufferSize = int(properties['druid.processing.buffer.sizeBytes']) / (1024 * 1024)  # In MBs
+            processingThreads = int(properties['druid.processing.numThreads'])
+            directMemory = int(envProperties[format('druid.{nodeType}.jvm.direct.memory')])
+            if directMemory < (processingThreads + 1) * intermediateBufferSize:
+                validationItems.extend(
+                    {"config-name": format("druid.{nodeType}.jvm.direct.memory"), "item": self.getErrorItem(
+                        format(
+                            "Not enough direct memory available for {nodeType} Node."
+                            "Please adjust druid.{nodeType}.jvm.direct.memory, druid.processing.buffer.sizeBytes, druid.processing.numThreads"
+                        )
+                    )
+                     })
+        return self.toConfigurationValidationProblems(validationItems, "druid-env")
+
+    def validateDruidHistoricalConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+        validationItems = [
+            {"config-name": "druid.processing.numThreads",
+             "item": self.validatorEqualsToRecommendedItem(properties, recommendedDefaults,
+                                                           "druid.processing.numThreads")}
+        ]
+        return self.toConfigurationValidationProblems(validationItems, "druid-historical")
+
+    def validateDruidBrokerConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+        validationItems = [
+            {"config-name": "druid.processing.numThreads",
+             "item": self.validatorEqualsToRecommendedItem(properties, recommendedDefaults,
+                                                           "druid.processing.numThreads")}
+        ]
+        return self.toConfigurationValidationProblems(validationItems, "druid-broker")

http://git-wip-us.apache.org/repos/asf/ambari/blob/685e926d/ambari-server/src/test/python/stacks/2.6/DRUID/test_druid.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.6/DRUID/test_druid.py b/ambari-server/src/test/python/stacks/2.6/DRUID/test_druid.py
new file mode 100644
index 0000000..2988169
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.6/DRUID/test_druid.py
@@ -0,0 +1,647 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import os
+
+from stacks.utils.RMFTestCase import *
+
+from mock.mock import MagicMock, patch
+from resource_management.libraries import functions
+from resource_management.libraries.functions import format
+from resource_management.core.logger import Logger
+
+@patch("resource_management.libraries.Script.get_tmp_dir", new=MagicMock(return_value=('/var/lib/ambari-agent/tmp')))
+@patch.object(functions, "get_stack_version", new=MagicMock(return_value="2.0.0.0-1234"))
+class TestDruid(RMFTestCase):
+  COMMON_SERVICES_PACKAGE_DIR = "DRUID/0.9.2/package"
+  STACK_VERSION = "2.6"
+  DEFAULT_IMMUTABLE_PATHS = ['/apps/hive/warehouse', '/apps/falcon', '/mr-history/done', '/app-logs', '/tmp']
+
+  def setUp(self):
+    Logger.logger = MagicMock()
+    self.testDirectory = os.path.dirname(os.path.abspath(__file__))
+    self.num_times_to_iterate = 3
+    self.wait_time = 1
+
+  def test_configure_overlord(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/overlord.py",
+                       classname="DruidOverlord",
+                       command="configure",
+                       config_file=self.get_src_folder() + "/test/python/stacks/2.6/configs/default.json",
+                       config_overrides = { 'role' : 'DRUID_OVERLORD' },
+                       stack_version=self.STACK_VERSION,
+                       target=RMFTestCase.TARGET_COMMON_SERVICES
+                       )
+    self.assert_configure_default('druid-overlord')
+    self.assertNoMoreResources()
+
+  def test_start_overlord(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/overlord.py",
+                       classname="DruidOverlord",
+                       command="start",
+                       config_file=self.get_src_folder() + "/test/python/stacks/2.6/configs/default.json",
+                       stack_version=self.STACK_VERSION,
+                       config_overrides = { 'role' : 'DRUID_OVERLORD' },
+                       target=RMFTestCase.TARGET_COMMON_SERVICES
+                       )
+    self.assert_configure_default('druid-overlord')
+    self.assertResourceCalled('Execute', format('source /usr/hdp/current/druid-overlord/conf/druid-env.sh ; /usr/hdp/current/druid-overlord/bin/node.sh overlord start'),
+                              user='druid'
+                              )
+    self.assertNoMoreResources()
+
+  def test_stop_overlord(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/overlord.py",
+                       classname="DruidOverlord",
+                       command="stop",
+                       config_file=self.get_src_folder() + "/test/python/stacks/2.6/configs/default.json",
+                       stack_version=self.STACK_VERSION,
+                       config_overrides = { 'role' : 'DRUID_OVERLORD' },
+                       target=RMFTestCase.TARGET_COMMON_SERVICES
+                       )
+    self.assertResourceCalled('Execute', format('source /usr/hdp/current/druid-overlord/conf/druid-env.sh ; /usr/hdp/current/druid-overlord/bin/node.sh overlord stop'),
+                              user='druid'
+                              )
+    self.assertNoMoreResources()
+
+  def test_configure_coordinator(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/coordinator.py",
+                       classname="DruidCoordinator",
+                       command="configure",
+                       config_file=self.get_src_folder() + "/test/python/stacks/2.6/configs/default.json",
+                       config_overrides = { 'role' : 'DRUID_COORDINATOR' },
+                       stack_version=self.STACK_VERSION,
+                       target=RMFTestCase.TARGET_COMMON_SERVICES
+                       )
+    self.assert_configure_default('druid-coordinator')
+    self.assertNoMoreResources()
+
+  def test_start_coordinator(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/coordinator.py",
+                       classname="DruidCoordinator",
+                       command="start",
+                       config_file=self.get_src_folder() + "/test/python/stacks/2.6/configs/default.json",
+                       stack_version=self.STACK_VERSION,
+                       config_overrides = { 'role' : 'DRUID_COORDINATOR' },
+                       target=RMFTestCase.TARGET_COMMON_SERVICES
+                       )
+    self.assert_configure_default('druid-coordinator')
+    self.assertResourceCalled('Execute', format('source /usr/hdp/current/druid-coordinator/conf/druid-env.sh ; /usr/hdp/current/druid-coordinator/bin/node.sh coordinator start'),
+                              user='druid'
+                              )
+    self.assertNoMoreResources()
+
+  def test_stop_coordinator(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/coordinator.py",
+                       classname="DruidCoordinator",
+                       command="stop",
+                       config_file=self.get_src_folder() + "/test/python/stacks/2.6/configs/default.json",
+                       stack_version=self.STACK_VERSION,
+                       config_overrides = { 'role' : 'DRUID_COORDINATOR' },
+                       target=RMFTestCase.TARGET_COMMON_SERVICES
+                       )
+    self.assertResourceCalled('Execute', format('source /usr/hdp/current/druid-coordinator/conf/druid-env.sh ; /usr/hdp/current/druid-coordinator/bin/node.sh coordinator stop'),
+                              user='druid'
+                              )
+    self.assertNoMoreResources()
+
+  def test_configure_broker(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/broker.py",
+                       classname="DruidBroker",
+                       command="configure",
+                       config_file=self.get_src_folder() + "/test/python/stacks/2.6/configs/default.json",
+                       config_overrides = { 'role' : 'DRUID_BROKER' },
+                       stack_version=self.STACK_VERSION,
+                       target=RMFTestCase.TARGET_COMMON_SERVICES
+                       )
+    self.assert_configure_default('druid-broker')
+    self.assertNoMoreResources()
+
+  def test_start_broker(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/broker.py",
+                       classname="DruidBroker",
+                       command="start",
+                       config_file=self.get_src_folder() + "/test/python/stacks/2.6/configs/default.json",
+                       stack_version=self.STACK_VERSION,
+                       config_overrides = { 'role' : 'DRUID_BROKER' },
+                       target=RMFTestCase.TARGET_COMMON_SERVICES
+                       )
+    self.assert_configure_default('druid-broker')
+    self.assertResourceCalled('Execute', format('source /usr/hdp/current/druid-broker/conf/druid-env.sh ; /usr/hdp/current/druid-broker/bin/node.sh broker start'),
+                              user='druid'
+                              )
+    self.assertNoMoreResources()
+
+  def test_stop_broker(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/broker.py",
+                       classname="DruidBroker",
+                       command="stop",
+                       config_file=self.get_src_folder() + "/test/python/stacks/2.6/configs/default.json",
+                       stack_version=self.STACK_VERSION,
+                       config_overrides = { 'role' : 'DRUID_BROKER' },
+                       target=RMFTestCase.TARGET_COMMON_SERVICES
+                       )
+    self.assertResourceCalled('Execute', format('source /usr/hdp/current/druid-broker/conf/druid-env.sh ; /usr/hdp/current/druid-broker/bin/node.sh broker stop'),
+                              user='druid'
+                              )
+    self.assertNoMoreResources()
+
+  def test_configure_router(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/router.py",
+                       classname="DruidRouter",
+                       command="configure",
+                       config_file=self.get_src_folder() + "/test/python/stacks/2.6/configs/default.json",
+                       config_overrides = { 'role' : 'DRUID_ROUTER' },
+                       stack_version=self.STACK_VERSION,
+                       target=RMFTestCase.TARGET_COMMON_SERVICES
+                       )
+    self.assert_configure_default('druid-router')
+    self.assertNoMoreResources()
+
+  def test_start_router(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/router.py",
+                       classname="DruidRouter",
+                       command="start",
+                       config_file=self.get_src_folder() + "/test/python/stacks/2.6/configs/default.json",
+                       stack_version=self.STACK_VERSION,
+                       config_overrides = { 'role' : 'DRUID_ROUTER' },
+                       target=RMFTestCase.TARGET_COMMON_SERVICES
+                       )
+    self.assert_configure_default('druid-router')
+    self.assertResourceCalled('Execute', format('source /usr/hdp/current/druid-router/conf/druid-env.sh ; /usr/hdp/current/druid-router/bin/node.sh router start'),
+                              user='druid'
+                              )
+    self.assertNoMoreResources()
+
+  def test_stop_router(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/router.py",
+                       classname="DruidRouter",
+                       command="stop",
+                       config_file=self.get_src_folder() + "/test/python/stacks/2.6/configs/default.json",
+                       stack_version=self.STACK_VERSION,
+                       config_overrides = { 'role' : 'DRUID_ROUTER' },
+                       target=RMFTestCase.TARGET_COMMON_SERVICES
+                       )
+    self.assertResourceCalled('Execute', format('source /usr/hdp/current/druid-router/conf/druid-env.sh ; /usr/hdp/current/druid-router/bin/node.sh router stop'),
+                              user='druid'
+                              )
+    self.assertNoMoreResources()
+
+  def test_configure_historical(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/historical.py",
+                       classname="DruidHistorical",
+                       command="configure",
+                       config_file=self.get_src_folder() + "/test/python/stacks/2.6/configs/default.json",
+                       config_overrides = { 'role' : 'DRUID_HISTORICAL' },
+                       stack_version=self.STACK_VERSION,
+                       target=RMFTestCase.TARGET_COMMON_SERVICES
+                       )
+    self.assert_configure_default('druid-historical')
+    self.assertNoMoreResources()
+
+  def test_start_historical(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/historical.py",
+                       classname="DruidHistorical",
+                       command="start",
+                       config_file=self.get_src_folder() + "/test/python/stacks/2.6/configs/default.json",
+                       stack_version=self.STACK_VERSION,
+                       config_overrides = { 'role' : 'DRUID_HISTORICAL' },
+                       target=RMFTestCase.TARGET_COMMON_SERVICES
+                       )
+    self.assert_configure_default('druid-historical')
+    self.assertResourceCalled('Execute', format('source /usr/hdp/current/druid-historical/conf/druid-env.sh ; /usr/hdp/current/druid-historical/bin/node.sh historical start'),
+                              user='druid'
+                              )
+    self.assertNoMoreResources()
+
+  def test_stop_historical(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/historical.py",
+                       classname="DruidHistorical",
+                       command="stop",
+                       config_file=self.get_src_folder() + "/test/python/stacks/2.6/configs/default.json",
+                       stack_version=self.STACK_VERSION,
+                       config_overrides = { 'role' : 'DRUID_HISTORICAL' },
+                       target=RMFTestCase.TARGET_COMMON_SERVICES
+                       )
+    self.assertResourceCalled('Execute', format('source /usr/hdp/current/druid-historical/conf/druid-env.sh ; /usr/hdp/current/druid-historical/bin/node.sh historical stop'),
+                              user='druid'
+                              )
+    self.assertNoMoreResources()
+
+  def test_configure_middleManager(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/middlemanager.py",
+                       classname="DruidMiddleManager",
+                       command="configure",
+                       config_file=self.get_src_folder() + "/test/python/stacks/2.6/configs/default.json",
+                       config_overrides = { 'role' : 'DRUID_MIDDLEMANAGER' },
+                       stack_version=self.STACK_VERSION,
+                       target=RMFTestCase.TARGET_COMMON_SERVICES
+                       )
+    self.assert_configure_default('druid-middlemanager')
+    self.assertNoMoreResources()
+
+  def test_start_middleManager(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/middlemanager.py",
+                       classname="DruidMiddleManager",
+                       command="start",
+                       config_file=self.get_src_folder() + "/test/python/stacks/2.6/configs/default.json",
+                       stack_version=self.STACK_VERSION,
+                       config_overrides = { 'role' : 'DRUID_MIDDLEMANAGER' },
+                       target=RMFTestCase.TARGET_COMMON_SERVICES
+                       )
+    self.assert_configure_default('druid-middlemanager')
+    self.assertResourceCalled('Execute', format('source /usr/hdp/current/druid-middlemanager/conf/druid-env.sh ; /usr/hdp/current/druid-middlemanager/bin/node.sh middleManager start'),
+                              user='druid'
+                              )
+    self.assertNoMoreResources()
+
+  def test_stop_middleManager(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/middlemanager.py",
+                       classname="DruidMiddleManager",
+                       command="stop",
+                       config_file=self.get_src_folder() + "/test/python/stacks/2.6/configs/default.json",
+                       stack_version=self.STACK_VERSION,
+                       config_overrides = { 'role' : 'DRUID_MIDDLEMANAGER' },
+                       target=RMFTestCase.TARGET_COMMON_SERVICES
+                       )
+    self.assertResourceCalled('Execute', format('source /usr/hdp/current/druid-middlemanager/conf/druid-env.sh ; /usr/hdp/current/druid-middlemanager/bin/node.sh middleManager stop'),
+                              user='druid'
+                              )
+    self.assertNoMoreResources()
+
+  def assert_configure_default(self, role):
+
+    self.assertResourceCalled('Directory', '/var/log/druid',
+                              mode=0755,
+                              cd_access='a',
+                              owner='druid',
+                              group='hadoop',
+                              create_parents=True,
+                              recursive_ownership=True
+                              )
+
+    self.assertResourceCalled('Directory', '/var/run/druid',
+                              mode=0755,
+                              cd_access='a',
+                              owner='druid',
+                              group='hadoop',
+                              create_parents=True,
+                              recursive_ownership=True
+                              )
+
+    self.assertResourceCalled('Directory', format('/usr/hdp/current/{role}/conf/_common'),
+                              mode=0755,
+                              cd_access='a',
+                              owner='druid',
+                              group='hadoop',
+                              create_parents=True,
+                              recursive_ownership=True
+                              )
+
+    self.assertResourceCalled('Directory', format('/usr/hdp/current/{role}/conf/coordinator'),
+                              mode=0755,
+                              cd_access='a',
+                              owner='druid',
+                              group='hadoop',
+                              create_parents=True,
+                              recursive_ownership=True
+                              )
+
+    self.assertResourceCalled('Directory', format('/usr/hdp/current/{role}/conf/broker'),
+                              mode=0755,
+                              cd_access='a',
+                              owner='druid',
+                              group='hadoop',
+                              create_parents=True,
+                              recursive_ownership=True
+                              )
+
+    self.assertResourceCalled('Directory', format('/usr/hdp/current/{role}/conf/middleManager'),
+                              mode=0755,
+                              cd_access='a',
+                              owner='druid',
+                              group='hadoop',
+                              create_parents=True,
+                              recursive_ownership=True
+                              )
+
+    self.assertResourceCalled('Directory', format('/usr/hdp/current/{role}/conf/historical'),
+                              mode=0755,
+                              cd_access='a',
+                              owner='druid',
+                              group='hadoop',
+                              create_parents=True,
+                              recursive_ownership=True
+                              )
+
+    self.assertResourceCalled('Directory', format('/usr/hdp/current/{role}/conf/overlord'),
+                              mode=0755,
+                              cd_access='a',
+                              owner='druid',
+                              group='hadoop',
+                              create_parents=True,
+                              recursive_ownership=True
+                              )
+
+    self.assertResourceCalled('Directory', format('/usr/hdp/current/{role}/conf/router'),
+                              mode=0755,
+                              cd_access='a',
+                              owner='druid',
+                              group='hadoop',
+                              create_parents=True,
+                              recursive_ownership=True
+                              )
+
+    self.assertResourceCalled('Directory', '/apps/druid/segmentCache',
+                              mode=0755,
+                              cd_access='a',
+                              owner='druid',
+                              group='hadoop',
+                              create_parents=True,
+                              recursive_ownership=True
+                              )
+
+    self.assertResourceCalled('File', format('/usr/hdp/current/{role}/conf/druid-env.sh'),
+                              owner = 'druid',
+                              content = InlineTemplate(self.getConfig()['configurations']['druid-env']['content'])
+                              )
+    druid_common_config = mutable_config_dict(self.getConfig()['configurations']['druid-common'])
+    druid_common_config['druid.host'] = 'c6401.ambari.apache.org'
+    druid_common_config['druid.extensions.directory'] = format('/usr/hdp/current/{role}/extensions')
+    druid_common_config['druid.extensions.hadoopDependenciesDir'] = format('/usr/hdp/current/{role}/hadoop-dependencies')
+    druid_common_config['druid.selectors.indexing.serviceName'] = 'druid/overlord'
+    druid_common_config['druid.selectors.coordinator.serviceName'] = 'druid/coordinator'
+
+    self.assertResourceCalled('PropertiesFile', 'common.runtime.properties',
+                              dir=format("/usr/hdp/current/{role}/conf/_common"),
+                              properties=druid_common_config,
+                              owner='druid',
+                              group='hadoop'
+                              )
+
+    self.assertResourceCalled('File', format('/usr/hdp/current/{role}/conf/_common/druid-log4j.xml'),
+                              mode=0644,
+                              owner = 'druid',
+                              group = 'hadoop',
+                              content = InlineTemplate(self.getConfig()['configurations']['druid-log4j']['content'])
+                              )
+
+    self.assertResourceCalled('File', '/etc/logrotate.d/druid',
+                              mode=0644,
+                              owner = 'root',
+                              group = 'root',
+                              content = InlineTemplate(self.getConfig()['configurations']['druid-logrotate']['content'])
+                              )
+
+    self.assertResourceCalled('XmlConfig', "core-site.xml",
+                              conf_dir=format('/usr/hdp/current/{role}/conf/_common'),
+                              configurations=self.getConfig()['configurations']['core-site'],
+                              configuration_attributes=self.getConfig()['configuration_attributes']['core-site'],
+                              owner='druid',
+                              group='hadoop'
+                              )
+
+    self.assertResourceCalled('XmlConfig', "yarn-site.xml",
+                              conf_dir=format('/usr/hdp/current/{role}/conf/_common'),
+                              configurations=self.getConfig()['configurations']['yarn-site'],
+                              configuration_attributes=self.getConfig()['configuration_attributes']['yarn-site'],
+                              owner='druid',
+                              group='hadoop'
+                              )
+
+    self.assertResourceCalled('XmlConfig', "hdfs-site.xml",
+                              conf_dir=format('/usr/hdp/current/{role}/conf/_common'),
+                              configurations=self.getConfig()['configurations']['hdfs-site'],
+                              configuration_attributes=self.getConfig()['configuration_attributes']['hdfs-site'],
+                              owner='druid',
+                              group='hadoop'
+                              )
+
+    self.assertResourceCalled('PropertiesFile', "runtime.properties",
+                              dir=format('/usr/hdp/current/{role}/conf/coordinator'),
+                              properties=self.getConfig()['configurations']['druid-coordinator'],
+                              owner='druid',
+                              group='hadoop'
+                              )
+
+    self.assertResourceCalled('File', format("/usr/hdp/current/{role}/conf/coordinator/jvm.config"),
+                              owner='druid',
+                              group='hadoop',
+                              content=InlineTemplate("-server \n-Xms{{node_heap_memory}}m \n-Xmx{{node_heap_memory}}m \n-XX:MaxDirectMemorySize={{node_direct_memory}}m \n-Dlog4j.configurationFile={{log4j_config_file}} \n-Dlog4j.debug \n{{node_jvm_opts}}",
+                                                     node_heap_memory=1024,
+                                                     node_direct_memory=2048,
+                                                     node_jvm_opts='-Duser.timezone=UTC -Dfile.encoding=UTF-8',
+                                                     log4j_config_file=format('/usr/hdp/current/{role}/conf/_common/druid-log4j.xml')
+                                                     )
+                              )
+
+    self.assertResourceCalled('PropertiesFile', "runtime.properties",
+                              dir=format('/usr/hdp/current/{role}/conf/overlord'),
+                              properties=self.getConfig()['configurations']['druid-overlord'],
+                              owner='druid',
+                              group='hadoop'
+                              )
+
+    self.assertResourceCalled('File', format("/usr/hdp/current/{role}/conf/overlord/jvm.config"),
+                              owner='druid',
+                              group='hadoop',
+                              content=InlineTemplate("-server \n-Xms{{node_heap_memory}}m \n-Xmx{{node_heap_memory}}m \n-XX:MaxDirectMemorySize={{node_direct_memory}}m \n-Dlog4j.configurationFile={{log4j_config_file}} \n-Dlog4j.debug \n{{node_jvm_opts}}",
+                                                     node_heap_memory=1024,
+                                                     node_direct_memory=2048,
+                                                     node_jvm_opts='-Duser.timezone=UTC -Dfile.encoding=UTF-8',
+                                                     log4j_config_file=format('/usr/hdp/current/{role}/conf/_common/druid-log4j.xml')
+                                                     )
+                              )
+
+    self.assertResourceCalled('PropertiesFile', "runtime.properties",
+                              dir=format('/usr/hdp/current/{role}/conf/historical'),
+                              properties=self.getConfig()['configurations']['druid-historical'],
+                              owner='druid',
+                              group='hadoop'
+                              )
+
+    self.assertResourceCalled('File', format("/usr/hdp/current/{role}/conf/historical/jvm.config"),
+                            owner='druid',
+                            group='hadoop',
+                            content=InlineTemplate("-server \n-Xms{{node_heap_memory}}m \n-Xmx{{node_heap_memory}}m \n-XX:MaxDirectMemorySize={{node_direct_memory}}m \n-Dlog4j.configurationFile={{log4j_config_file}} \n-Dlog4j.debug \n{{node_jvm_opts}}",
+                                                   node_heap_memory=1024,
+                                                   node_direct_memory=2048,
+                                                   node_jvm_opts='-Duser.timezone=UTC -Dfile.encoding=UTF-8',
+                                                   log4j_config_file=format('/usr/hdp/current/{role}/conf/_common/druid-log4j.xml')
+                                                   )
+                            )
+
+
+    self.assertResourceCalled('PropertiesFile', "runtime.properties",
+                          dir=format('/usr/hdp/current/{role}/conf/broker'),
+                          properties=self.getConfig()['configurations']['druid-broker'],
+                          owner='druid',
+                          group='hadoop'
+                          )
+
+    self.assertResourceCalled('File', format("/usr/hdp/current/{role}/conf/broker/jvm.config"),
+                          owner='druid',
+                          group='hadoop',
+                          content=InlineTemplate("-server \n-Xms{{node_heap_memory}}m \n-Xmx{{node_heap_memory}}m \n-XX:MaxDirectMemorySize={{node_direct_memory}}m \n-Dlog4j.configurationFile={{log4j_config_file}} \n-Dlog4j.debug \n{{node_jvm_opts}}",
+                                                 node_heap_memory=1024,
+                                                 node_direct_memory=2048,
+                                                 node_jvm_opts='-Duser.timezone=UTC -Dfile.encoding=UTF-8',
+                                                 log4j_config_file=format('/usr/hdp/current/{role}/conf/_common/druid-log4j.xml')
+                                                 )
+                          )
+
+
+    self.assertResourceCalled('PropertiesFile', "runtime.properties",
+                          dir=format('/usr/hdp/current/{role}/conf/middleManager'),
+                          properties=self.getConfig()['configurations']['druid-middlemanager'],
+                          owner='druid',
+                          group='hadoop'
+                          )
+
+    self.assertResourceCalled('File', format("/usr/hdp/current/{role}/conf/middleManager/jvm.config"),
+                          owner='druid',
+                          group='hadoop',
+                          content=InlineTemplate("-server \n-Xms{{node_heap_memory}}m \n-Xmx{{node_heap_memory}}m \n-XX:MaxDirectMemorySize={{node_direct_memory}}m \n-Dlog4j.configurationFile={{log4j_config_file}} \n-Dlog4j.debug \n{{node_jvm_opts}}",
+                                                 node_heap_memory=1024,
+                                                 node_direct_memory=2048,
+                                                 node_jvm_opts='-Duser.timezone=UTC -Dfile.encoding=UTF-8',
+                                                 log4j_config_file=format('/usr/hdp/current/{role}/conf/_common/druid-log4j.xml')
+                                                 )
+                          )
+
+    self.assertResourceCalled('PropertiesFile', "runtime.properties",
+                              dir=format('/usr/hdp/current/{role}/conf/router'),
+                              properties=self.getConfig()['configurations']['druid-router'],
+                              owner='druid',
+                              group='hadoop'
+                              )
+
+    self.assertResourceCalled('File', format("/usr/hdp/current/{role}/conf/router/jvm.config"),
+                              owner='druid',
+                              group='hadoop',
+                              content=InlineTemplate("-server \n-Xms{{node_heap_memory}}m \n-Xmx{{node_heap_memory}}m \n-XX:MaxDirectMemorySize={{node_direct_memory}}m \n-Dlog4j.configurationFile={{log4j_config_file}} \n-Dlog4j.debug \n{{node_jvm_opts}}",
+                                                     node_heap_memory=1024,
+                                                     node_direct_memory=2048,
+                                                     node_jvm_opts='-Duser.timezone=UTC -Dfile.encoding=UTF-8',
+                                                     log4j_config_file=format('/usr/hdp/current/{role}/conf/_common/druid-log4j.xml')
+                                                     )
+                              )
+
+    self.assertResourceCalled('HdfsResource', '/user/druid',
+                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
+                              security_enabled = False,
+                              hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
+                              keytab = UnknownConfigurationMock(),
+                              default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+                              hdfs_site = {u'a': u'b'},
+                              kinit_path_local = '/usr/bin/kinit',
+                              principal_name = 'missing_principal',
+                              user = 'hdfs',
+                              owner = 'druid',
+                              hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
+                              type = 'directory',
+                              action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
+                              dfs_type = '',
+                              recursive_chown=True,
+                              recursive_chmod=True
+                              )
+
+    self.assertResourceCalled('HdfsResource', '/user/druid/data',
+                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
+                              security_enabled = False,
+                              hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
+                              keytab = UnknownConfigurationMock(),
+                              default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+                              hdfs_site = {u'a': u'b'},
+                              kinit_path_local = '/usr/bin/kinit',
+                              principal_name = 'missing_principal',
+                              user = 'hdfs',
+                              owner = 'druid',
+                              hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
+                              type = 'directory',
+                              action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
+                              dfs_type = '',
+                              mode=0755
+                              )
+
+    self.assertResourceCalled('HdfsResource', '/tmp/druid-indexing',
+                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
+                              security_enabled = False,
+                              hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
+                              keytab = UnknownConfigurationMock(),
+                              default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+                              hdfs_site = {u'a': u'b'},
+                              kinit_path_local = '/usr/bin/kinit',
+                              principal_name = 'missing_principal',
+                              user = 'hdfs',
+                              owner = 'druid',
+                              hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
+                              type = 'directory',
+                              action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
+                              dfs_type = '',
+                              mode=0755
+                              )
+
+    self.assertResourceCalled('HdfsResource', '/user/druid/logs',
+                              immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
+                              security_enabled = False,
+                              hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
+                              keytab = UnknownConfigurationMock(),
+                              default_fs = 'hdfs://c6401.ambari.apache.org:8020',
+                              hdfs_site = {u'a': u'b'},
+                              kinit_path_local = '/usr/bin/kinit',
+                              principal_name = 'missing_principal',
+                              user = 'hdfs',
+                              owner = 'druid',
+                              hadoop_conf_dir = '/usr/hdp/current/hadoop-client/conf',
+                              type = 'directory',
+                              action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
+                              dfs_type = '',
+                              mode=0755
+                              )
+
+    self.assertResourceCalled('Directory', format('/usr/hdp/current/{role}/extensions'),
+                              mode=0755,
+                              cd_access='a',
+                              owner='druid',
+                              group='hadoop',
+                              create_parents=True,
+                              recursive_ownership=True
+                              )
+
+    self.assertResourceCalled('Directory', format('/usr/hdp/current/{role}/hadoop-dependencies'),
+                              mode=0755,
+                              cd_access='a',
+                              owner='druid',
+                              group='hadoop',
+                              create_parents=True,
+                              recursive_ownership=True
+                              )
+
+    self.assertResourceCalled('Execute', format("source /usr/hdp/current/{role}/conf/druid-env.sh ; java -classpath '/usr/hdp/current/{role}/lib/*' -Ddruid.extensions.loadList=[] -Ddruid.extensions.directory=/usr/hdp/current/{role}/extensions -Ddruid.extensions.hadoopDependenciesDir=/usr/hdp/current/{role}/hadoop-dependencies io.druid.cli.Main tools pull-deps -c mysql-metadata-storage --no-default-hadoop"),
+                              user='druid'
+                              )
+
+
+def mutable_config_dict(config):
+  rv = {}
+  for key, value in config.iteritems():
+    rv[key] = value
+  return rv

http://git-wip-us.apache.org/repos/asf/ambari/blob/685e926d/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py b/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
new file mode 100644
index 0000000..8227d69
--- /dev/null
+++ b/ambari-server/src/test/python/stacks/2.6/common/test_stack_advisor.py
@@ -0,0 +1,575 @@
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import json
+import os
+from unittest import TestCase
+from mock.mock import patch
+
+
+class TestHDP26StackAdvisor(TestCase):
+  def setUp(self):
+    import imp
+    self.maxDiff = None
+    self.testDirectory = os.path.dirname(os.path.abspath(__file__))
+    stackAdvisorPath = os.path.join(self.testDirectory, '../../../../../main/resources/stacks/stack_advisor.py')
+    hdp206StackAdvisorPath = os.path.join(self.testDirectory,
+                                          '../../../../../main/resources/stacks/HDP/2.0.6/services/stack_advisor.py')
+    hdp21StackAdvisorPath = os.path.join(self.testDirectory,
+                                         '../../../../../main/resources/stacks/HDP/2.1/services/stack_advisor.py')
+    hdp22StackAdvisorPath = os.path.join(self.testDirectory,
+                                         '../../../../../main/resources/stacks/HDP/2.2/services/stack_advisor.py')
+    hdp23StackAdvisorPath = os.path.join(self.testDirectory,
+                                         '../../../../../main/resources/stacks/HDP/2.3/services/stack_advisor.py')
+    hdp24StackAdvisorPath = os.path.join(self.testDirectory,
+                                         '../../../../../main/resources/stacks/HDP/2.4/services/stack_advisor.py')
+    hdp25StackAdvisorPath = os.path.join(self.testDirectory,
+                                         '../../../../../main/resources/stacks/HDP/2.5/services/stack_advisor.py')
+    hdp26StackAdvisorPath = os.path.join(self.testDirectory,
+                                         '../../../../../main/resources/stacks/HDP/2.6/services/stack_advisor.py')
+    hdp26StackAdvisorClassName = 'HDP26StackAdvisor'
+
+    with open(stackAdvisorPath, 'rb') as fp:
+      imp.load_module('stack_advisor', fp, stackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
+    with open(hdp206StackAdvisorPath, 'rb') as fp:
+      imp.load_module('stack_advisor_impl', fp, hdp206StackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
+    with open(hdp21StackAdvisorPath, 'rb') as fp:
+      imp.load_module('stack_advisor_impl', fp, hdp21StackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
+    with open(hdp22StackAdvisorPath, 'rb') as fp:
+      imp.load_module('stack_advisor_impl', fp, hdp22StackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
+    with open(hdp23StackAdvisorPath, 'rb') as fp:
+      imp.load_module('stack_advisor_impl', fp, hdp23StackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
+    with open(hdp24StackAdvisorPath, 'rb') as fp:
+      imp.load_module('stack_advisor_impl', fp, hdp24StackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
+    with open(hdp25StackAdvisorPath, 'rb') as fp:
+      imp.load_module('stack_advisor_impl', fp, hdp25StackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
+    with open(hdp26StackAdvisorPath, 'rb') as fp:
+      stack_advisor_impl = imp.load_module('stack_advisor_impl', fp, hdp26StackAdvisorPath,
+                                           ('.py', 'rb', imp.PY_SOURCE))
+    clazz = getattr(stack_advisor_impl, hdp26StackAdvisorClassName)
+    self.stackAdvisor = clazz()
+
+    # substitute method in the instance
+    self.get_system_min_uid_real = self.stackAdvisor.get_system_min_uid
+    self.stackAdvisor.get_system_min_uid = self.get_system_min_uid_magic
+
+  @patch('__builtin__.open')
+  @patch('os.path.exists')
+  def get_system_min_uid_magic(self, exists_mock, open_mock):
+    class MagicFile(object):
+      def read(self):
+        return """
+          #test line UID_MIN 200
+          UID_MIN 500
+          """
+
+      def __exit__(self, exc_type, exc_val, exc_tb):
+        pass
+
+      def __enter__(self):
+        return self
+
+    exists_mock.return_value = True
+    open_mock.return_value = MagicFile()
+    return self.get_system_min_uid_real()
+
+  def test_recommendDruidConfigurations_withMysql(self):
+    hosts = {
+      "items": [
+        {
+          "Hosts": {
+            "cpu_count": 4,
+            "total_mem": 50331648,
+            "disk_info": [
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"},
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"}
+            ],
+            "public_host_name": "c6401.ambari.apache.org",
+            "host_name": "c6401.ambari.apache.org"
+          },
+        }
+      ]
+    }
+
+    services = {
+      "Versions": {
+        "parent_stack_version": "2.5",
+        "stack_name": "HDP",
+        "stack_version": "2.6",
+        "stack_hierarchy": {
+          "stack_name": "HDP",
+          "stack_versions": ["2.5", "2.4", "2.3", "2.2", "2.1", "2.0.6"]
+        }
+      },
+      "services": [{
+        "StackServices": {
+          "service_name": "DRUID",
+        },
+        "components": [
+          {
+            "StackServiceComponents": {
+              "component_name": "DRUID_COORDINATOR",
+              "hostnames": ["c6401.ambari.apache.org"]
+            },
+          },
+          {
+            "StackServiceComponents": {
+              "component_name": "DRUID_OVERLORD",
+              "hostnames": ["c6401.ambari.apache.org"]
+            },
+          },
+          {
+            "StackServiceComponents": {
+              "component_name": "DRUID_BROKER",
+              "hostnames": ["c6401.ambari.apache.org"]
+            },
+          },
+          {
+            "StackServiceComponents": {
+              "component_name": "DRUID_HISTORICAL",
+              "hostnames": ["c6401.ambari.apache.org"]
+            },
+          },
+          {
+            "StackServiceComponents": {
+              "component_name": "DRUID_MIDDLEMANAGER",
+              "hostnames": ["c6401.ambari.apache.org"]
+            },
+          }
+        ]
+      }
+      ],
+      "configurations": {
+        "druid-common": {
+          "properties": {
+            "database_name": "druid",
+            "metastore_hostname": "c6401.ambari.apache.org",
+            "druid.metadata.storage.type": "mysql",
+            "druid.extensions.loadList": "[\"postgresql-metadata-storage\"]",
+            "druid.extensions.pullList": "[]"
+          }
+        }
+      }
+    }
+
+    clusterData = {
+      "cpu": 4,
+      "mapMemory": 30000,
+      "amMemory": 20000,
+      "reduceMemory": 20560,
+      "containers": 30,
+      "ramPerContainer": 512,
+      "referenceNodeManagerHost": {
+        "total_mem": 10240 * 1024
+      }
+    }
+
+    configurations = {
+    }
+
+    self.stackAdvisor.recommendDruidConfigurations(configurations, clusterData, services, hosts)
+    self.assertEquals(configurations,
+                      {'druid-historical': {
+                        'properties': {'druid.processing.numThreads': '3', 'druid.server.http.numThreads': '40'}},
+                        'druid-broker': {
+                          'properties': {'druid.processing.numThreads': '3', 'druid.server.http.numThreads': '40'}},
+                        'druid-common': {'properties': {'druid.extensions.loadList': '["mysql-metadata-storage"]',
+                                                        'druid.metadata.storage.connector.port': '3306',
+                                                        'druid.metadata.storage.connector.connectURI': 'jdbc:mysql://c6401.ambari.apache.org:3306/druid?createDatabaseIfNotExist=true',
+                                                        'druid.zk.service.host': '',
+                                                        'druid.extensions.pullList': '["io.druid.extensions:mysql-metadata-storage"]'}},
+                        'druid-env': {'properties': {},
+                                      'property_attributes': {'druid.coordinator.jvm.heap.memory': {'maximum': '49152'},
+                                                              'druid.overlord.jvm.heap.memory': {'maximum': '49152'},
+                                                              'druid.middlemanager.jvm.heap.memory': {
+                                                                'maximum': '49152'},
+                                                              'druid.historical.jvm.heap.memory': {'maximum': '49152'},
+                                                              'druid.broker.jvm.heap.memory': {'maximum': '49152'}}}}
+                      )
+
+  def test_recommendDruidConfigurations_WithPostgresql(self):
+    hosts = {
+      "items": [
+        {
+          "Hosts": {
+            "cpu_count": 4,
+            "total_mem": 50331648,
+            "disk_info": [
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"},
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"}
+            ],
+            "public_host_name": "c6401.ambari.apache.org",
+            "host_name": "c6401.ambari.apache.org"
+          }
+        }
+      ]
+    }
+
+    services = {
+      "Versions": {
+        "parent_stack_version": "2.5",
+        "stack_name": "HDP",
+        "stack_version": "2.6",
+        "stack_hierarchy": {
+          "stack_name": "HDP",
+          "stack_versions": ["2.5", "2.4", "2.3", "2.2", "2.1", "2.0.6"]
+        }
+      },
+      "services": [{
+        "StackServices": {
+          "service_name": "DRUID",
+        },
+        "components": [
+          {
+            "StackServiceComponents": {
+              "component_name": "DRUID_COORDINATOR",
+              "hostnames": ["c6401.ambari.apache.org"]
+            },
+          },
+          {
+            "StackServiceComponents": {
+              "component_name": "DRUID_OVERLORD",
+              "hostnames": ["c6401.ambari.apache.org"]
+            },
+          },
+          {
+            "StackServiceComponents": {
+              "component_name": "DRUID_BROKER",
+              "hostnames": ["c6401.ambari.apache.org"]
+            },
+          },
+          {
+            "StackServiceComponents": {
+              "component_name": "DRUID_HISTORICAL",
+              "hostnames": ["c6401.ambari.apache.org"]
+            },
+          },
+          {
+            "StackServiceComponents": {
+              "component_name": "DRUID_MIDDLEMANAGER",
+              "hostnames": ["c6401.ambari.apache.org"]
+            },
+          }
+        ]
+      }
+      ],
+      "configurations": {
+        "druid-common": {
+          "properties": {
+            "database_name": "druid",
+            "metastore_hostname": "c6401.ambari.apache.org",
+            "druid.metadata.storage.type": "postgres",
+            "druid.extensions.loadList": "[\"mysql-metadata-storage\"]",
+            "druid.extensions.pullList": "[]"
+          }
+        }
+      }
+    }
+
+    clusterData = {
+    }
+
+    configurations = {
+    }
+
+    self.stackAdvisor.recommendDruidConfigurations(configurations, clusterData, services, hosts)
+    self.assertEquals(configurations,
+                      {'druid-historical': {
+                        'properties': {'druid.processing.numThreads': '3', 'druid.server.http.numThreads': '40'}},
+                        'druid-broker': {
+                          'properties': {'druid.processing.numThreads': '3', 'druid.server.http.numThreads': '40'}},
+                        'druid-common': {'properties': {'druid.extensions.loadList': '["postgresql-metadata-storage"]',
+                                                        'druid.metadata.storage.connector.port': '5432',
+                                                        'druid.metadata.storage.connector.connectURI': 'jdbc:postgresql://c6401.ambari.apache.org:5432/druid',
+                                                        'druid.zk.service.host': '',
+                                                        'druid.extensions.pullList': '[]'}},
+                        'druid-env': {'properties': {},
+                                      'property_attributes': {'druid.coordinator.jvm.heap.memory': {'maximum': '49152'},
+                                                              'druid.overlord.jvm.heap.memory': {'maximum': '49152'},
+                                                              'druid.middlemanager.jvm.heap.memory': {
+                                                                'maximum': '49152'},
+                                                              'druid.historical.jvm.heap.memory': {'maximum': '49152'},
+                                                              'druid.broker.jvm.heap.memory': {'maximum': '49152'}}}}
+                      )
+
+  def test_recommendDruidConfigurations_WithDerby(self):
+    hosts = {
+      "items": [
+        {
+          "Hosts": {
+            "cpu_count": 4,
+            "total_mem": 50331648,
+            "disk_info": [
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"},
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"}
+            ],
+            "public_host_name": "c6401.ambari.apache.org",
+            "host_name": "c6401.ambari.apache.org"
+          }
+        }
+      ]
+    }
+
+    services = {
+      "Versions": {
+        "parent_stack_version": "2.5",
+        "stack_name": "HDP",
+        "stack_version": "2.6",
+        "stack_hierarchy": {
+          "stack_name": "HDP",
+          "stack_versions": ["2.5", "2.4", "2.3", "2.2", "2.1", "2.0.6"]
+        }
+      },
+      "services": [{
+        "StackServices": {
+          "service_name": "DRUID",
+        },
+        "components": [
+          {
+            "StackServiceComponents": {
+              "component_name": "DRUID_COORDINATOR",
+              "hostnames": ["c6401.ambari.apache.org"]
+            },
+          },
+          {
+            "StackServiceComponents": {
+              "component_name": "DRUID_OVERLORD",
+              "hostnames": ["c6401.ambari.apache.org"]
+            },
+          },
+          {
+            "StackServiceComponents": {
+              "component_name": "DRUID_BROKER",
+              "hostnames": ["c6401.ambari.apache.org"]
+            },
+          },
+          {
+            "StackServiceComponents": {
+              "component_name": "DRUID_HISTORICAL",
+              "hostnames": ["c6401.ambari.apache.org"]
+            },
+          },
+          {
+            "StackServiceComponents": {
+              "component_name": "DRUID_MIDDLEMANAGER",
+              "hostnames": ["c6401.ambari.apache.org"]
+            },
+          }
+        ]
+      }
+      ],
+      "configurations": {
+        "druid-common": {
+          "properties": {
+            "database_name": "druid",
+            "metastore_hostname": "c6401.ambari.apache.org",
+            "druid.metadata.storage.type": "derby",
+            "druid.extensions.loadList": "[\"mysql-metadata-storage\"]",
+            "druid.extensions.pullList": "[]"
+          }
+        }
+      }
+    }
+
+    clusterData = {
+    }
+
+    configurations = {
+    }
+
+    self.stackAdvisor.recommendDruidConfigurations(configurations, clusterData, services, hosts)
+    self.assertEquals(configurations,
+                      {'druid-historical': {
+                        'properties': {'druid.processing.numThreads': '3', 'druid.server.http.numThreads': '40'}},
+                        'druid-broker': {
+                          'properties': {'druid.processing.numThreads': '3', 'druid.server.http.numThreads': '40'}},
+                        'druid-common': {'properties': {'druid.extensions.loadList': '[]',
+                                                        'druid.metadata.storage.connector.port': '1527',
+                                                        'druid.metadata.storage.connector.connectURI': 'jdbc:derby://c6401.ambari.apache.org:1527/druid;create=true',
+                                                        'druid.zk.service.host': '',
+                                                        'druid.extensions.pullList': '[]'}},
+                        'druid-env': {'properties': {},
+                                      'property_attributes': {'druid.coordinator.jvm.heap.memory': {'maximum': '49152'},
+                                                              'druid.overlord.jvm.heap.memory': {'maximum': '49152'},
+                                                              'druid.middlemanager.jvm.heap.memory': {
+                                                                'maximum': '49152'},
+                                                              'druid.historical.jvm.heap.memory': {'maximum': '49152'},
+                                                              'druid.broker.jvm.heap.memory': {'maximum': '49152'}}}}
+                      )
+
+  def test_recommendDruidConfigurations_heterogeneous_hosts(self):
+    hosts = {
+      "items": [
+        {
+          "href": "/api/v1/hosts/c6401.ambari.apache.org",
+          "Hosts": {
+            "cpu_count": 4,
+            "total_mem": 50331648,
+            "disk_info": [
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"},
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"}
+            ],
+            "public_host_name": "c6401.ambari.apache.org",
+            "host_name": "c6401.ambari.apache.org"
+          }
+        }, {
+          "href": "/api/v1/hosts/c6402.ambari.apache.org",
+          "Hosts": {
+            "cpu_count": 1,
+            "total_mem": 1922680,
+            "disk_info": [
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"},
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"}
+            ],
+            "public_host_name": "c6402.ambari.apache.org",
+            "host_name": "c6402.ambari.apache.org"
+          }
+        },
+        {
+          "href": "/api/v1/hosts/c6403.ambari.apache.org",
+          "Hosts": {
+            "cpu_count": 3,
+            "total_mem": 3845360,
+            "disk_info": [
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"},
+              {"mountpoint": "/"},
+              {"mountpoint": "/dev/shm"},
+              {"mountpoint": "/vagrant"}
+            ],
+            "public_host_name": "c6403.ambari.apache.org",
+            "host_name": "c6403.ambari.apache.org"
+          }
+        }
+      ]
+    }
+
+    services = {
+      "Versions": {
+        "parent_stack_version": "2.5",
+        "stack_name": "HDP",
+        "stack_version": "2.6",
+        "stack_hierarchy": {
+          "stack_name": "HDP",
+          "stack_versions": ["2.5", "2.4", "2.3", "2.2", "2.1", "2.0.6"]
+        }
+      },
+      "services": [{
+        "StackServices": {
+          "service_name": "DRUID",
+        },
+        "components": [
+          {
+            "StackServiceComponents": {
+              "component_name": "DRUID_COORDINATOR",
+              "hostnames": ["c6401.ambari.apache.org"]
+            },
+          },
+          {
+            "StackServiceComponents": {
+              "component_name": "DRUID_OVERLORD",
+              "hostnames": ["c6401.ambari.apache.org"]
+            },
+          },
+          {
+            "StackServiceComponents": {
+              "component_name": "DRUID_BROKER",
+              "hostnames": ["c6402.ambari.apache.org", "c6403.ambari.apache.org"]
+            },
+          },
+          {
+            "StackServiceComponents": {
+              "component_name": "DRUID_HISTORICAL",
+              "hostnames": ["c6401.ambari.apache.org", "c6403.ambari.apache.org"]
+            },
+          },
+          {
+            "StackServiceComponents": {
+              "component_name": "DRUID_MIDDLEMANAGER",
+              "hostnames": ["c6401.ambari.apache.org"]
+            },
+          }
+        ]
+      }
+      ],
+      "configurations": {
+        "druid-common": {
+          "properties": {
+            "database_name": "druid",
+            "metastore_hostname": "c6401.ambari.apache.org",
+            "druid.metadata.storage.type": "derby",
+            "druid.extensions.loadList": "[\"mysql-metadata-storage\"]",
+            "druid.extensions.pullList": "[]"
+          }
+        }
+      }
+    }
+
+    clusterData = {
+    }
+
+    configurations = {
+    }
+
+    self.stackAdvisor.recommendDruidConfigurations(configurations, clusterData, services, hosts)
+    self.assertEquals(configurations,
+                      {'druid-historical': {
+                        'properties': {'druid.processing.numThreads': '2', 'druid.server.http.numThreads': '40'}},
+                        'druid-broker': {
+                          'properties': {'druid.processing.numThreads': '1', 'druid.server.http.numThreads': '40'}},
+                        'druid-common': {'properties': {'druid.extensions.loadList': '[]',
+                                                        'druid.metadata.storage.connector.port': '1527',
+                                                        'druid.metadata.storage.connector.connectURI': 'jdbc:derby://c6401.ambari.apache.org:1527/druid;create=true',
+                                                        'druid.zk.service.host': '',
+                                                        'druid.extensions.pullList': '[]'}},
+                        'druid-env': {'properties': {},
+                                      'property_attributes': {'druid.coordinator.jvm.heap.memory': {'maximum': '49152'},
+                                                              'druid.overlord.jvm.heap.memory': {'maximum': '49152'},
+                                                              'druid.middlemanager.jvm.heap.memory': {
+                                                                'maximum': '49152'},
+                                                              'druid.historical.jvm.heap.memory': {'maximum': '3755'},
+                                                              'druid.broker.jvm.heap.memory': {'maximum': '1877'}}}}
+                      )
+
+
+def load_json(self, filename):
+  file = os.path.join(self.testDirectory, filename)
+  with open(file, 'rb') as f:
+    data = json.load(f)
+  return data


[3/3] ambari git commit: Integrate Druid with Ambari (Nishant Bangarwa, Slim Bouguerra via Swapan Shridhar).

Posted by sw...@apache.org.
Integrate Druid with Ambari (Nishant Bangarwa, Slim Bouguerra via Swapan Shridhar).


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/685e926d
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/685e926d
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/685e926d

Branch: refs/heads/trunk
Commit: 685e926db0d442411ab5bee50c1e189cfb0a261e
Parents: 563b41d
Author: Swapan Shridhar <ss...@hortonworks.com>
Authored: Thu Oct 20 12:04:24 2016 -0700
Committer: Swapan Shridhar <ss...@hortonworks.com>
Committed: Thu Oct 20 12:04:24 2016 -0700

----------------------------------------------------------------------
 .../DRUID/0.9.2/configuration/druid-broker.xml  | 100 +++
 .../DRUID/0.9.2/configuration/druid-common.xml  | 205 ++++++
 .../0.9.2/configuration/druid-coordinator.xml   |  43 ++
 .../DRUID/0.9.2/configuration/druid-env.xml     | 241 +++++++
 .../0.9.2/configuration/druid-historical.xml    |  88 +++
 .../DRUID/0.9.2/configuration/druid-log4j.xml   |  84 +++
 .../0.9.2/configuration/druid-logrotate.xml     |  46 ++
 .../0.9.2/configuration/druid-middlemanager.xml | 104 +++
 .../0.9.2/configuration/druid-overlord.xml      |  52 ++
 .../DRUID/0.9.2/configuration/druid-router.xml  |  59 ++
 .../common-services/DRUID/0.9.2/metainfo.xml    | 249 +++++++
 .../DRUID/0.9.2/package/scripts/broker.py       |  28 +
 .../DRUID/0.9.2/package/scripts/coordinator.py  |  28 +
 .../DRUID/0.9.2/package/scripts/druid.py        | 259 ++++++++
 .../DRUID/0.9.2/package/scripts/druid_node.py   |  85 +++
 .../DRUID/0.9.2/package/scripts/historical.py   |  28 +
 .../0.9.2/package/scripts/middlemanager.py      |  28 +
 .../DRUID/0.9.2/package/scripts/overlord.py     |  28 +
 .../DRUID/0.9.2/package/scripts/params.py       | 129 ++++
 .../DRUID/0.9.2/package/scripts/router.py       |  28 +
 .../0.9.2/package/scripts/service_check.py      |  44 ++
 .../0.9.2/package/scripts/status_params.py      |  24 +
 .../DRUID/0.9.2/quicklinks/quicklinks.json      |  37 ++
 .../DRUID/0.9.2/themes/theme.json               | 120 ++++
 .../stacks/HDP/2.6/role_command_order.json      |  19 +
 .../stacks/HDP/2.6/services/DRUID/kerberos.json |  78 +++
 .../stacks/HDP/2.6/services/DRUID/metainfo.xml  |  27 +
 .../stacks/HDP/2.6/services/stack_advisor.py    | 187 +++++-
 .../test/python/stacks/2.6/DRUID/test_druid.py  | 647 +++++++++++++++++++
 .../stacks/2.6/common/test_stack_advisor.py     | 575 ++++++++++++++++
 .../test/python/stacks/2.6/configs/default.json | 552 ++++++++++++++++
 31 files changed, 4221 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/685e926d/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-broker.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-broker.xml b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-broker.xml
new file mode 100644
index 0000000..4f05da0
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-broker.xml
@@ -0,0 +1,100 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>druid.service</name>
+    <value>druid/broker</value>
+    <description>The druid.service name of broker node.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.port</name>
+    <value>8082</value>
+    <description>The port on which the broker will accept connections.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.broker.http.numConnections</name>
+    <value>20</value>
+    <description>Size of connection pool for the Broker to connect to historical and real-time nodes. If there are more
+      queries than this number that all need to speak to the same node, then they will queue up.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.server.http.numThreads</name>
+    <value>50</value>
+    <description>Number of threads for HTTP requests.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.processing.buffer.sizeBytes</name>
+    <value>1073741824</value>
+    <value-attributes>
+      <type>long</type>
+      <minimum>0</minimum>
+      <unit>Bytes</unit>
+    </value-attributes>
+    <description>This specifies a buffer size for the storage of intermediate results. The computation engine in both
+      the Historical and Realtime nodes will use a scratch buffer of this size to do all of their intermediate
+      computations
+      off-heap. Larger values allow for more aggregations in a single pass over the data while smaller values can
+      require more passes depending on the query that is being executed.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.processing.numThreads</name>
+    <value>2</value>
+    <description>The number of processing threads to have available for parallel processing of segments.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.broker.cache.useCache</name>
+    <value>true</value>
+    <description>Enable the cache on the broker.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.broker.cache.populateCache</name>
+    <value>true</value>
+    <description>Populate the cache on the broker.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.cache.type</name>
+    <value>local</value>
+    <description>The type of cache to use for queries.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.cache.sizeInBytes</name>
+    <value>10000000</value>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <unit>Bytes</unit>
+    </value-attributes>
+    <description>Maximum cache size in bytes. Zero disables caching.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/685e926d/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-common.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-common.xml b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-common.xml
new file mode 100644
index 0000000..b6571c3
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-common.xml
@@ -0,0 +1,205 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>druid.extensions.pullList</name>
+    <value>[]</value>
+    <description>A comma-separated list of one or more druid extensions to download from maven.</description>
+    <depends-on>
+      <property>\u2002\u2002\u2002\u2002
+        <type>druid-common</type>\u2002\u2002
+        <name>druid.metadata.storage.type</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.extensions.loadList</name>
+    <value>["druid-datasketches"]
+    </value>
+    <depends-on>
+      <property>\u2002\u2002\u2002\u2002
+        <type>druid-common</type>\u2002\u2002
+        <name>druid.metadata.storage.type</name>
+      </property>
+    </depends-on>
+    <description>A comma-separated list of one or more druid extensions to load.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.zk.service.host</name>
+    <value>localhost:2181</value>
+    <description>
+      zookeeper connection string.
+    </description>
+  </property>
+  <property>
+    <name>druid.zk.paths.base</name>
+    <value>/druid</value>
+    <description>
+      Base Zookeeper path
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.discovery.curator.path</name>
+    <value>/druid/discovery</value>
+    <description>
+      Services announce themselves under this ZooKeeper path.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.storage.type</name>
+    <value></value>
+    <description>
+      Choices:local, noop, s3, hdfs, c*. The type of deep storage to use.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.storage.storageDirectory</name>
+    <value></value>
+    <description>
+      directory to use as deep storage.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.metadata.storage.connector.password</name>
+    <value></value>
+    <property-type>PASSWORD</property-type>
+    <display-name>Metadata storage password</display-name>
+    <description>Password for the metadata storage data base.</description>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.metadata.storage.connector.user</name>
+    <value>druid</value>
+    <display-name>Metadata storage user</display-name>
+    <description>Metadata storage user</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.metadata.storage.connector.port</name>
+    <value>1527</value>
+    <display-name>Metadata storage port</display-name>
+    <description>Metadata storage port</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <depends-on>
+      <property>\u2002\u2002\u2002\u2002
+        <type>druid-common</type>\u2002\u2002
+        <name>druid.metadata.storage.type</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>database_name</name>
+    <value>druid</value>
+    <display-name>Metadata storage database name</display-name>
+    <description>Metadata storage database name</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>metastore_hostname</name>
+    <value>localhost</value>
+    <display-name>Metadata storage hostname name</display-name>
+    <description>Metadata storage hostname name</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property require-input="true">
+    <name>druid.metadata.storage.type</name>
+    <display-name>Metadata storage type</display-name>
+    <value>derby</value>
+    <value-attributes>
+      <overridable>false</overridable>
+      <type>value-list</type>
+      <entries>
+        <entry>
+          <value>mysql</value>
+          <label>MYSQL</label>
+        </entry>
+        <entry>
+          <value>derby</value>
+          <label>DERBY</label>
+        </entry>
+        <entry>
+          <value>postgres</value>
+          <label>POSTGRES</label>
+        </entry>
+      </entries>
+    </value-attributes>
+    <description>Type of the metadata storage. Note that derby will work only if all the druid node are located
+      within the same node. Use mysql or postgres for distributed mode.
+      mysql installed by ambari is only for development and not suitable for production use cases due to it being not HA
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property require-input="true">
+    <name>druid.metadata.storage.connector.connectURI</name>
+    <value>jdbc:derby://localhost:1527/druid;create=true</value>
+    <display-name>Metadata storage connector url</display-name>
+    <description>Metadata storage connector url</description>
+    <on-ambari-upgrade add="false"/>
+    <depends-on>
+      <property>\u2002\u2002\u2002\u2002
+        <type>druid-common</type>\u2002\u2002
+        <name>database_name</name>
+      </property>
+      <property>\u2002\u2002\u2002\u2002
+        <type>druid-common</type>\u2002\u2002
+        <name>metastore_hostname</name>
+      </property>
+      <property>\u2002\u2002\u2002\u2002
+        <type>druid-common</type>\u2002\u2002
+        <name>druid.metadata.storage.type</name>
+      </property>
+      <property>\u2002\u2002\u2002\u2002
+        <type>druid-common</type>\u2002\u2002
+        <name>druid.metadata.storage.connector.port</name>
+      </property>
+    </depends-on>
+  </property>
+  <property>
+    <name>druid.hadoop.security.kerberos.principal</name>
+    <display-name>kerberos principal</display-name>
+    <description>Kerberos principal e.g druid@EXAMPLE.COM</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.hadoop.security.kerberos.keytab</name>
+    <display-name>Kerberos keytab location</display-name>
+    <description>Kerberos keytab location</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/685e926d/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-coordinator.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-coordinator.xml b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-coordinator.xml
new file mode 100644
index 0000000..618f11d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-coordinator.xml
@@ -0,0 +1,43 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>druid.service</name>
+    <value>druid/coordinator</value>
+    <description>The druid.service name of coordinator node.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.port</name>
+    <value>8081</value>
+    <description>The port on which the coordinator will accept connections.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.coordinator.merge.on</name>
+    <value>false</value>
+    <description>Boolean flag for whether or not the coordinator should try and merge small segments into a more optimal
+      segment size.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/685e926d/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-env.xml b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-env.xml
new file mode 100644
index 0000000..29d2b96
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-env.xml
@@ -0,0 +1,241 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <!--Heap Settings -->
+  <property>
+    <name>druid.broker.jvm.heap.memory</name>
+    <value>2048</value>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>65536</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.coordinator.jvm.heap.memory</name>
+    <value>512</value>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>65536</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.middlemanager.jvm.heap.memory</name>
+    <value>256</value>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>65536</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.historical.jvm.heap.memory</name>
+    <value>2048</value>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>65536</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.overlord.jvm.heap.memory</name>
+    <value>512</value>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>65536</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.router.jvm.heap.memory</name>
+    <value>512</value>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>65536</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- DirectMemorySettings -->
+  <property>
+    <name>druid.broker.jvm.direct.memory</name>
+    <value>1048576</value>
+    <depends-on>
+      <property>\u2002\u2002\u2002\u2002
+        <type>druid-broker</type>\u2002\u2002
+        <name>druid.processing.buffer.sizeBytes</name>
+      </property>
+      <property>\u2002\u2002\u2002\u2002
+        <type>druid-broker</type>\u2002\u2002
+        <name>druid.processing.numThreads</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.coordinator.jvm.direct.memory</name>
+    <value>1048576</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.middlemanager.jvm.direct.memory</name>
+    <value>1048576</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.historical.jvm.direct.memory</name>
+    <value>1048576</value>
+    <depends-on>
+      <property>
+        <type>druid-historical</type>\u2002\u2002
+        <name>druid.processing.buffer.sizeBytes</name>
+      </property>
+      <property>\u2002\u2002\u2002\u2002
+        <type>druid-historical</type>\u2002\u2002
+        <name>druid.processing.numThreads</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.overlord.jvm.direct.memory</name>
+    <value>1048576</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.router.jvm.direct.memory</name>
+    <value>1048576</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- JavaOpts Tune GC related configs here-->
+  <property>
+    <name>druid.broker.jvm.opts</name>
+    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.coordinator.jvm.opts</name>
+    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.middlemanager.jvm.opts</name>
+    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.historical.jvm.opts</name>
+    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.overlord.jvm.opts</name>
+    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.router.jvm.opts</name>
+    <value>-Duser.timezone=UTC -Dfile.encoding=UTF-8</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid_user</name>
+    <display-name>Druid User</display-name>
+    <value>druid</value>
+    <property-type>USER</property-type>
+    <description></description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid_log_dir</name>
+    <value>/var/log/druid</value>
+    <description></description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid_pid_dir</name>
+    <value>/var/run/druid</value>
+    <display-name>Druid PID dir</display-name>
+    <description></description>
+    <value-attributes>
+      <type>directory</type>
+      <editable-only-at-install>true</editable-only-at-install>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <!-- druid-env.sh -->
+  <property>
+    <name>content</name>
+    <display-name>druid-env template</display-name>
+    <description>This is simple template for druid-env.sh file</description>
+    <value>
+      #!/bin/bash
+
+      # Set DRUID specific environment variables here.
+
+      # The java implementation to use.
+      export JAVA_HOME={{java8_home}}
+      export PATH=$PATH:$JAVA_HOME/bin
+      export DRUID_PID_DIR={{druid_pid_dir}}
+      export DRUID_LOG_DIR={{druid_log_dir}}
+      export DRUID_CONF_DIR={{druid_conf_dir}}
+      export DRUID_LIB_DIR={{druid_home}}/lib
+
+    </value>
+    <value-attributes>
+      <type>content</type>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/685e926d/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-historical.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-historical.xml b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-historical.xml
new file mode 100644
index 0000000..0545e4c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-historical.xml
@@ -0,0 +1,88 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>druid.service</name>
+    <value>druid/historical</value>
+    <description>The druid.service name of historical node.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.port</name>
+    <value>8083</value>
+    <description>The port on which the historical nodes will accept connections.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.server.http.numThreads</name>
+    <value>50</value>
+    <description>Number of threads for HTTP requests.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.processing.buffer.sizeBytes</name>
+    <value>1073741824</value>
+    <value-attributes>
+      <type>long</type>
+      <minimum>0</minimum>
+      <unit>Bytes</unit>
+    </value-attributes>
+    <description>This specifies a buffer size for the storage of intermediate results. The computation engine in both
+      the Historical and Realtime nodes will use a scratch buffer of this size to do all of their intermediate
+      computations off-heap. Larger values allow for more aggregations in a single pass over the data while smaller
+      values can require more passes depending on the query that is being executed.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.processing.numThreads</name>
+    <value>10</value>
+    <description>The number of processing threads to have available for parallel processing of segments.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.segmentCache.locations</name>
+    <value>[{"path":"/apps/druid/segmentCache","maxSize"\:300000000000}]</value>
+    <description>Segments assigned to a Historical node are first stored on the local file system (in a disk cache) and
+      then served by the Historical node. These locations define where that local cache resides.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.server.maxSize</name>
+    <value>300000000000</value>
+    <description>The maximum number of bytes-worth of segments that the node wants assigned to it. This is not a limit
+      that Historical nodes actually enforces, just a value published to the Coordinator node so it can plan
+      accordingly.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.segmentCache.infoDir</name>
+    <value>/apps/druid/segmentCache</value>
+    <description>Historical nodes keep track of the segments they are serving so that when the process is restarted they
+      can reload the same segments without waiting for the Coordinator to reassign. This path defines where this
+      metadata is kept. Directory will be created if needed.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/685e926d/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-log4j.xml b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-log4j.xml
new file mode 100644
index 0000000..bcb731a
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-log4j.xml
@@ -0,0 +1,84 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+  <property>
+    <name>druid_log_level</name>
+    <value>info</value>
+    <description>Log level for io.druid logging</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>root_log_level</name>
+    <value>WARN</value>
+    <description>Log level for root logging</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>metamx_log_level</name>
+    <value>info</value>
+    <description>Log level for com.metamxlogging</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>content</name>
+    <display-name>druid-log4j template</display-name>
+    <description>Custom log4j.properties</description>
+    <value><![CDATA[<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one
+  ~ or more contributor license agreements.  See the NOTICE file
+  ~ distributed with this work for additional information
+  ~ regarding copyright ownership.  The ASF licenses this file
+  ~ to you under the Apache License, Version 2.0 (the
+  ~ "License"); you may not use this file except in compliance
+  ~ with the License.  You may obtain a copy of the License at
+  ~
+  ~     http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+    <Configuration>
+        <Appenders>
+            <Console name="Console" target="SYSTEM_OUT">
+                <PatternLayout pattern="%d{ISO8601} %p [%t] %c - %m%n"/>
+            </Console>
+        </Appenders>
+        <Loggers>
+            <Logger name="com.metamx" level="{{metamx_log_level}}"/>
+            <Logger name="io.druid" level="{{druid_log_level}}"/>
+            <Root level="{{root_log_level}}">
+                <AppenderRef ref="Console"/>
+            </Root>
+        </Loggers>
+    </Configuration>
+      ]]></value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/685e926d/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-logrotate.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-logrotate.xml b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-logrotate.xml
new file mode 100644
index 0000000..f612766
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-logrotate.xml
@@ -0,0 +1,46 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+  <property>
+    <name>content</name>
+    <display-name>druid logrotate template</display-name>
+    <description>Custom logrotate file</description>
+    <value><![CDATA[
+    {{druid_log_dir}}/*.log {
+        copytruncate
+        rotate 7
+        daily
+        nocompress
+        missingok
+        notifempty
+        create 660 druid users
+        dateext
+        dateformat -%Y-%m-%d-%s
+        }
+      ]]></value>
+    <value-attributes>
+      <type>content</type>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/685e926d/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-middlemanager.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-middlemanager.xml b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-middlemanager.xml
new file mode 100644
index 0000000..967438e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-middlemanager.xml
@@ -0,0 +1,104 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>druid.service</name>
+    <value>druid/middlemanager</value>
+    <description>The druid.service name of middlemanager node.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.port</name>
+    <value>8091</value>
+    <description>The port on which the middlemanager nodes will accept connections.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.indexer.runner.startPort</name>
+    <value>8100</value>
+    <description>The port that peons begin running on.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.worker.capacity</name>
+    <value>3</value>
+    <description>
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.indexer.runner.javaOpts</name>
+    <value>-server -Xmx2g -Duser.timezone=UTC -Dfile.encoding=UTF-8 -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager -Dhdp.version={{stack_version}} -Dhadoop.mapreduce.job.classloader=true</value>
+    <description>
+      A string of -X Java options to pass to the peon's JVM.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.indexer.task.baseTaskDir</name>
+    <value>/tmp/persistent/tasks</value>
+    <description>
+      Base temporary working directory for druid tasks.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.server.http.numThreads</name>
+    <value>50</value>
+    <description>
+      Number of threads for HTTP requests.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.processing.buffer.sizeBytes</name>
+    <value>256000000</value>
+    <value-attributes>
+      <type>long</type>
+      <minimum>0</minimum>
+      <unit>Bytes</unit>
+    </value-attributes>
+    <description>
+      This specifies a buffer size for the storage of intermediate results. The computation engine in both the
+      Historical and Realtime nodes will use a scratch buffer of this size to do all of their intermediate computations
+      off-heap. Larger values allow for more aggregations in a single pass over the data while smaller values can
+      require more passes depending on the query that is being executed.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.processing.numThreads</name>
+    <value>2</value>
+    <description>
+      The number of processing threads to have available for parallel processing of segments.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.indexer.task.hadoopWorkingPath</name>
+    <value>/tmp/druid-indexing</value>
+    <description>
+      Temporary working directory for Hadoop tasks
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/685e926d/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-overlord.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-overlord.xml b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-overlord.xml
new file mode 100644
index 0000000..57d1c63
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-overlord.xml
@@ -0,0 +1,52 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>druid.service</name>
+    <value>druid/overlord</value>
+    <description>The druid.service name of overlord node.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.port</name>
+    <value>8090</value>
+    <description>The port on which the overlord will accept connections.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.indexer.runner.type</name>
+    <value>remote</value>
+    <description>Choices "local" or "remote". Indicates whether tasks should be run locally or in a distributed
+      environment.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.indexer.storage.type</name>
+    <value>metadata</value>
+    <description>Choices are "local" or "metadata". Indicates whether incoming tasks should be stored locally (in heap)
+      or in metadata storage. Storing incoming tasks in metadata storage allows for tasks to be resumed if the overlord
+      should fail.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/685e926d/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-router.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-router.xml b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-router.xml
new file mode 100644
index 0000000..bfb0d21
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/configuration/druid-router.xml
@@ -0,0 +1,59 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>druid.service</name>
+    <value>druid/router</value>
+    <description>The druid.service name of router node.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.port</name>
+    <value>8888</value>
+    <description>The port on which the broker will accept connections.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.router.http.numConnections</name>
+    <value>20</value>
+    <description>
+      Size of connection pool for the router to connect to historical and real-time nodes. If there are more
+      queries than this number that all need to speak to the same node, then they will queue up.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.server.http.numThreads</name>
+    <value>50</value>
+    <description>Number of threads for HTTP requests.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>druid.router.tierToBrokerMap</name>
+    <value>{}</value>
+    <description>
+      Used to route queries for a certain tier of data to their appropriate broker. An ordered JSON map of
+      tiers to broker names. The priority of brokers is based on the ordering.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/685e926d/ambari-server/src/main/resources/common-services/DRUID/0.9.2/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/metainfo.xml b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/metainfo.xml
new file mode 100644
index 0000000..c897f12
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/metainfo.xml
@@ -0,0 +1,249 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>DRUID</name>
+      <displayName>Druid</displayName>
+      <comment>A fast column-oriented distributed data store.</comment>
+      <version>0.9.2</version>
+      <components>
+        <component>
+          <name>DRUID_COORDINATOR</name>
+          <displayName>Druid Coordinator</displayName>
+          <category>MASTER</category>
+          <cardinality>1+</cardinality>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/coordinator.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+        <component>
+          <name>DRUID_OVERLORD</name>
+          <displayName>Druid Overlord</displayName>
+          <category>MASTER</category>
+          <cardinality>1+</cardinality>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/overlord.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+        <component>
+          <name>DRUID_HISTORICAL</name>
+          <displayName>Druid Historical</displayName>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/historical.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+        <component>
+          <name>DRUID_BROKER</name>
+          <displayName>Druid Broker</displayName>
+          <category>MASTER</category>
+          <cardinality>1+</cardinality>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/broker.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+        <component>
+          <name>DRUID_MIDDLEMANAGER</name>
+          <displayName>Druid MiddleManager</displayName>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/middlemanager.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+        <component>
+          <name>DRUID_ROUTER</name>
+          <displayName>Druid Router</displayName>
+          <category>SLAVE</category>
+          <cardinality>0+</cardinality>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/router.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+      </components>
+
+      <themes>
+        <theme>
+          <fileName>theme.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>redhat7,amazon2015,redhat6,suse11,suse12</osFamily>
+          <packages>
+            <package>
+              <name>druid_${stack_version}</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>debian7,ubuntu12,ubuntu14,ubuntu16</osFamily>
+          <packages>
+            <package>
+              <name>druid-${stack_version}</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>amazon2015,redhat6,suse11,suse12</osFamily>
+          <packages>
+            <package>
+              <name>mysql</name>
+              <skipUpgrade>true</skipUpgrade>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>amazon2015,redhat6,debian7,ubuntu12,ubuntu14,ubuntu16</osFamily>
+          <packages>
+            <package>
+              <name>mysql-server</name>
+              <skipUpgrade>true</skipUpgrade>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>redhat7</osFamily>
+          <packages>
+            <package>
+              <name>mysql-community-release</name>
+              <skipUpgrade>true</skipUpgrade>
+            </package>
+            <package>
+              <name>mysql-community-server</name>
+              <skipUpgrade>true</skipUpgrade>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>suse11,suse12</osFamily>
+          <packages>
+            <package>
+              <name>mysql-client</name>
+              <skipUpgrade>true</skipUpgrade>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+      <requiredServices>
+        <service>ZOOKEEPER</service>
+      </requiredServices>
+      <configuration-dependencies>
+        <config-type>druid-common</config-type>
+        <config-type>druid-env</config-type>
+        <config-type>druid-coordinator</config-type>
+        <config-type>druid-overlord</config-type>
+        <config-type>druid-historical</config-type>
+        <config-type>druid-broker</config-type>
+        <config-type>druid-middlemanager</config-type>
+        <config-type>druid-log4j</config-type>
+        <config-type>druid-logrotate</config-type>
+        <config-type>druid-router</config-type>
+        <config-type>zoo.cfg</config-type>
+        <config-type>core-site</config-type>
+        <config-type>mapred-site</config-type>
+        <config-type>yarn-site</config-type>
+        <config-type>hdfs-site</config-type>
+      </configuration-dependencies>
+
+      <quickLinksConfigurations>
+        <quickLinksConfiguration>
+          <fileName>quicklinks.json</fileName>
+          <default>true</default>
+        </quickLinksConfiguration>
+      </quickLinksConfigurations>
+
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/685e926d/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/broker.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/broker.py b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/broker.py
new file mode 100644
index 0000000..bd170cb
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/broker.py
@@ -0,0 +1,28 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from druid_node import DruidBase
+
+
+class DruidBroker(DruidBase):
+  def __init__(self):
+    DruidBase.__init__(self, nodeType="broker")
+
+
+if __name__ == "__main__":
+  DruidBroker().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/685e926d/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/coordinator.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/coordinator.py b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/coordinator.py
new file mode 100644
index 0000000..a86fa40
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/coordinator.py
@@ -0,0 +1,28 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from druid_node import DruidBase
+
+
+class DruidCoordinator(DruidBase):
+  def __init__(self):
+    DruidBase.__init__(self, nodeType="coordinator")
+
+
+if __name__ == "__main__":
+  DruidCoordinator().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/685e926d/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/druid.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/druid.py b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/druid.py
new file mode 100644
index 0000000..0478d17
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/druid.py
@@ -0,0 +1,259 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.libraries.resources.properties_file import PropertiesFile
+from resource_management.core.resources.system import Directory, Execute, File
+from resource_management.core.source import InlineTemplate
+from resource_management.libraries.functions import format
+from resource_management.libraries.resources import XmlConfig
+from resource_management.libraries.functions.show_logs import show_logs
+from resource_management.core.logger import Logger
+
+
+def druid(upgrade_type=None, nodeType=None):
+  import params
+  ensure_base_directories()
+
+  # Environment Variables
+  File(format("{params.druid_conf_dir}/druid-env.sh"),
+       owner=params.druid_user,
+       content=InlineTemplate(params.druid_env_sh_template)
+       )
+
+  # common config
+  druid_common_config = mutable_config_dict(params.config['configurations']['druid-common'])
+  # User cannot override below configs
+  druid_common_config['druid.host'] = params.hostname
+  druid_common_config['druid.extensions.directory'] = params.druid_extensions_dir
+  druid_common_config['druid.extensions.hadoopDependenciesDir'] = params.druid_hadoop_dependencies_dir
+  druid_common_config['druid.selectors.indexing.serviceName'] = params.config['configurations']['druid-overlord'][
+    'druid.service']
+  druid_common_config['druid.selectors.coordinator.serviceName'] = \
+    params.config['configurations']['druid-coordinator']['druid.service']
+
+  # delete the password and user if empty otherwiswe derby will fail.
+  if 'derby' == druid_common_config['druid.metadata.storage.type']:
+    del druid_common_config['druid.metadata.storage.connector.user']
+    del druid_common_config['druid.metadata.storage.connector.password']
+
+  druid_env_config = mutable_config_dict(params.config['configurations']['druid-env'])
+
+  PropertiesFile("common.runtime.properties",
+                 dir=params.druid_common_conf_dir,
+                 properties=druid_common_config,
+                 owner=params.druid_user,
+                 group=params.user_group,
+                 )
+  Logger.info("Created common.runtime.properties")
+
+  File(format("{params.druid_common_conf_dir}/druid-log4j.xml"),
+       mode=0644,
+       owner=params.druid_user,
+       group=params.user_group,
+       content=InlineTemplate(params.log4j_props)
+       )
+  Logger.info("Created log4j file")
+
+  File("/etc/logrotate.d/druid",
+       mode=0644,
+       owner='root',
+       group='root',
+       content=InlineTemplate(params.logrotate_props)
+       )
+
+  Logger.info("Created log rotate file")
+
+  # Write Hadoop Configs if configured
+  if 'core-site' in params.config['configurations']:
+    XmlConfig("core-site.xml",
+              conf_dir=params.druid_common_conf_dir,
+              configurations=params.config['configurations']['core-site'],
+              configuration_attributes=params.config['configuration_attributes']['core-site'],
+              owner=params.druid_user,
+              group=params.user_group
+              )
+
+  if 'mapred-site' in params.config['configurations']:
+    XmlConfig("mapred-site.xml",
+              conf_dir=params.druid_common_conf_dir,
+              configurations=params.config['configurations']['mapred-site'],
+              configuration_attributes=params.config['configuration_attributes']['mapred-site'],
+              owner=params.druid_user,
+              group=params.user_group
+              )
+
+  if 'yarn-site' in params.config['configurations']:
+    XmlConfig("yarn-site.xml",
+              conf_dir=params.druid_common_conf_dir,
+              configurations=params.config['configurations']['yarn-site'],
+              configuration_attributes=params.config['configuration_attributes']['yarn-site'],
+              owner=params.druid_user,
+              group=params.user_group
+              )
+
+  if 'hdfs-site' in params.config['configurations']:
+    XmlConfig("hdfs-site.xml",
+              conf_dir=params.druid_common_conf_dir,
+              configurations=params.config['configurations']['hdfs-site'],
+              configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
+              owner=params.druid_user,
+              group=params.user_group
+              )
+
+  # node specific configs
+  for node_type in ['coordinator', 'overlord', 'historical', 'broker', 'middleManager', 'router']:
+    node_config_dir = format('{params.druid_conf_dir}/{node_type}')
+    node_type_lowercase = node_type.lower()
+
+    # Write runtime.properties file
+    node_config = mutable_config_dict(params.config['configurations'][format('druid-{node_type_lowercase}')])
+    PropertiesFile("runtime.properties",
+                   dir=node_config_dir,
+                   properties=node_config,
+                   owner=params.druid_user,
+                   group=params.user_group,
+                   )
+    Logger.info(format("Created druid-{node_type_lowercase} runtime.properties"))
+
+    # Write jvm configs
+    File(format('{node_config_dir}/jvm.config'),
+         owner=params.druid_user,
+         group=params.user_group,
+         content=InlineTemplate(
+           "-server \n-Xms{{node_heap_memory}}m \n-Xmx{{node_heap_memory}}m \n-XX:MaxDirectMemorySize={{node_direct_memory}}m \n-Dlog4j.configurationFile={{log4j_config_file}} \n-Dlog4j.debug \n{{node_jvm_opts}}",
+           node_heap_memory=druid_env_config[format('druid.{node_type_lowercase}.jvm.heap.memory')],
+           log4j_config_file=format("{params.druid_common_conf_dir}/druid-log4j.xml"),
+           node_direct_memory=druid_env_config[
+             format('druid.{node_type_lowercase}.jvm.direct.memory')],
+           node_jvm_opts=druid_env_config[format('druid.{node_type_lowercase}.jvm.opts')])
+         )
+    Logger.info(format("Created druid-{node_type_lowercase} jvm.config"))
+
+  # All druid nodes have dependency on hdfs_client
+  ensure_hadoop_directories()
+  # Pull all required dependencies
+  pulldeps()
+
+
+def mutable_config_dict(config):
+  rv = {}
+  for key, value in config.iteritems():
+    rv[key] = value
+  return rv
+
+
+def ensure_hadoop_directories():
+  import params
+  if 'hdfs-site' not in params.config['configurations']:
+    # HDFS Not Installed nothing to do.
+    Logger.info("Skipping HDFS directory creation as HDFS not installed")
+    return
+
+  druid_common_config = params.config['configurations']['druid-common']
+  # final overlord config contains both common and overlord config
+  druid_middlemanager_config = params.config['configurations']['druid-middlemanager']
+
+  # If user is using HDFS as deep storage create HDFS Directory for storing segments
+  deep_storage = druid_common_config["druid.storage.type"]
+  storage_dir = druid_common_config["druid.storage.storageDirectory"]
+
+  if deep_storage == 'hdfs':
+    # create the home dir for druid
+    params.HdfsResource(format("/user/{params.druid_user}"),
+                        type="directory",
+                        action="create_on_execute",
+                        owner=params.druid_user,
+                        recursive_chown=True,
+                        recursive_chmod=True
+                        )
+
+    # create the segment storage dir
+    create_hadoop_directory(storage_dir)
+
+  # Create HadoopIndexTask hadoopWorkingPath
+  hadoop_working_path = druid_middlemanager_config['druid.indexer.task.hadoopWorkingPath']
+  if hadoop_working_path is not None:
+    create_hadoop_directory(hadoop_working_path)
+
+  # If HDFS is used for storing logs, create Index Task log directory
+  indexer_logs_type = druid_common_config['druid.indexer.logs.type']
+  indexer_logs_directory = druid_common_config['druid.indexer.logs.directory']
+  if indexer_logs_type == 'hdfs' and indexer_logs_directory is not None:
+    create_hadoop_directory(indexer_logs_directory)
+
+
+def create_hadoop_directory(hadoop_dir):
+  import params
+  params.HdfsResource(hadoop_dir,
+                      type="directory",
+                      action="create_on_execute",
+                      owner=params.druid_user,
+                      mode=0755
+                      )
+  Logger.info(format("Created Hadoop Directory [{hadoop_dir}]"))
+
+
+def ensure_base_directories():
+  import params
+  Directory(
+    [params.druid_log_dir, params.druid_pid_dir, params.druid_common_conf_dir, params.druid_coordinator_conf_dir,
+     params.druid_broker_conf_dir, params.druid_middlemanager_conf_dir, params.druid_historical_conf_dir,
+     params.druid_overlord_conf_dir, params.druid_router_conf_dir, params.druid_segment_infoDir],
+    mode=0755,
+    cd_access='a',
+    owner=params.druid_user,
+    group=params.user_group,
+    create_parents=True,
+    recursive_ownership=True,
+  )
+
+
+def get_daemon_cmd(params=None, node_type=None, command=None):
+  return format('source {params.druid_conf_dir}/druid-env.sh ; {params.druid_home}/bin/node.sh {node_type} {command}')
+
+
+def getPid(params=None, nodeType=None):
+  return format('{params.druid_pid_dir}/{nodeType}.pid')
+
+
+def pulldeps():
+  import params
+  extensions_list = eval(params.druid_extensions)
+  extensions_string = '{0}'.format("-c ".join(extensions_list))
+  if len(extensions_list) > 0:
+    try:
+      # Make sure druid user has permissions to write dependencies
+      Directory(
+        [params.druid_extensions_dir, params.druid_hadoop_dependencies_dir],
+        mode=0755,
+        cd_access='a',
+        owner=params.druid_user,
+        group=params.user_group,
+        create_parents=True,
+        recursive_ownership=True,
+      )
+      Execute(format(
+        "source {params.druid_conf_dir}/druid-env.sh ; java -classpath '{params.druid_home}/lib/*' -Ddruid.extensions.loadList=[] "
+        "-Ddruid.extensions.directory={params.druid_extensions_dir} -Ddruid.extensions.hadoopDependenciesDir={params.druid_hadoop_dependencies_dir} "
+        "io.druid.cli.Main tools pull-deps -c {extensions_string} --no-default-hadoop"),
+        user=params.druid_user
+      )
+      Logger.info(format("Pull Dependencies Complete"))
+    except:
+      show_logs(params.druid_log_dir, params.druid_user)
+      raise

http://git-wip-us.apache.org/repos/asf/ambari/blob/685e926d/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/druid_node.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/druid_node.py b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/druid_node.py
new file mode 100644
index 0000000..3b7f695
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/druid_node.py
@@ -0,0 +1,85 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management import Script
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.check_process_status import check_process_status
+
+from resource_management.libraries.functions.show_logs import show_logs
+from druid import druid, get_daemon_cmd, getPid
+
+
+class DruidBase(Script):
+  def __init__(self, nodeType=None):
+    self.nodeType = nodeType
+
+  def get_component_name(self):
+    node_type_lower = self.nodeType.lower()
+    return format("druid-{node_type_lower}")
+
+  def install(self, env):
+    self.install_packages(env)
+
+  def configure(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    druid(upgrade_type=upgrade_type, nodeType=self.nodeType)
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    return
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env, upgrade_type=upgrade_type)
+    daemon_cmd = get_daemon_cmd(params, self.nodeType, "start")
+    try:
+      Execute(daemon_cmd,
+              user=params.druid_user
+              )
+    except:
+      show_logs(params.druid_log_dir, params.druid_user)
+      raise
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+
+    daemon_cmd = get_daemon_cmd(params, self.nodeType, "stop")
+    try:
+      Execute(daemon_cmd,
+              user=params.druid_user
+              )
+    except:
+      show_logs(params.druid_log_dir, params.druid_user)
+      raise
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = getPid(status_params, self.nodeType)
+    check_process_status(pid_file)
+
+  def get_log_folder(self):
+    import params
+    return params.druid_log_dir
+
+  def get_user(self):
+    import params
+    return params.druid_user

http://git-wip-us.apache.org/repos/asf/ambari/blob/685e926d/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/historical.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/historical.py b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/historical.py
new file mode 100644
index 0000000..22390a6
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/historical.py
@@ -0,0 +1,28 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from druid_node import DruidBase
+
+
+class DruidHistorical(DruidBase):
+  def __init__(self):
+    DruidBase.__init__(self, nodeType="historical")
+
+
+if __name__ == "__main__":
+  DruidHistorical().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/685e926d/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/middlemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/middlemanager.py b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/middlemanager.py
new file mode 100644
index 0000000..20df89c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/middlemanager.py
@@ -0,0 +1,28 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from druid_node import DruidBase
+
+
+class DruidMiddleManager(DruidBase):
+  def __init__(self):
+    DruidBase.__init__(self, nodeType="middleManager")
+
+
+if __name__ == "__main__":
+  DruidMiddleManager().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/685e926d/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/overlord.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/overlord.py b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/overlord.py
new file mode 100644
index 0000000..e4d7fcc
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/overlord.py
@@ -0,0 +1,28 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from druid_node import DruidBase
+
+
+class DruidOverlord(DruidBase):
+  def __init__(self):
+    DruidBase.__init__(self, nodeType="overlord")
+
+
+if __name__ == "__main__":
+  DruidOverlord().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/685e926d/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/params.py b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/params.py
new file mode 100644
index 0000000..00add3f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/params.py
@@ -0,0 +1,129 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from ambari_commons import OSCheck
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
+from resource_management.libraries.functions.default import default
+
+import status_params
+
+# a map of the Ambari role to the component name
+# for use with <stack-root>/current/<component>
+SERVER_ROLE_DIRECTORY_MAP = {
+  'DRUID_BROKER': 'druid-broker',
+  'DRUID_COORDINATOR': 'druid-coordinator',
+  'DRUID_HISTORICAL': 'druid-historical',
+  'DRUID_MIDDLEMANAGER': 'druid-middlemanager',
+  'DRUID_OVERLORD': 'druid-overlord',
+  'DRUID_ROUTER': 'druid-router'
+}
+
+# server configurations
+config = Script.get_config()
+stack_root = Script.get_stack_root()
+tmp_dir = Script.get_tmp_dir()
+
+# stack version
+stack_version = default("/commandParams/version", None)
+
+# default role to coordinator needed for service checks
+component_directory = Script.get_component_from_role(SERVER_ROLE_DIRECTORY_MAP, "DRUID_COORDINATOR")
+
+hostname = config['hostname']
+
+# default druid parameters
+druid_home = format("{stack_root}/current/{component_directory}")
+druid_conf_dir = format("{stack_root}/current/{component_directory}/conf")
+
+druid_common_conf_dir = druid_conf_dir + "/_common"
+druid_coordinator_conf_dir = druid_conf_dir + "/coordinator"
+druid_overlord_conf_dir = druid_conf_dir + "/overlord"
+druid_broker_conf_dir = druid_conf_dir + "/broker"
+druid_historical_conf_dir = druid_conf_dir + "/historical"
+druid_middlemanager_conf_dir = druid_conf_dir + "/middleManager"
+druid_router_conf_dir = druid_conf_dir + "/router"
+druid_extensions_dir = druid_home + "/extensions"
+druid_hadoop_dependencies_dir = druid_home + "/hadoop-dependencies"
+druid_segment_infoDir = config['configurations']['druid-historical']['druid.segmentCache.infoDir']
+druid_user = config['configurations']['druid-env']['druid_user']
+druid_log_dir = config['configurations']['druid-env']['druid_log_dir']
+druid_classpath = config['configurations']['druid-env']['druid_classpath']
+druid_extensions = config['configurations']['druid-common']['druid.extensions.pullList']
+
+# status params
+druid_pid_dir = status_params.druid_pid_dir
+user_group = config['configurations']['cluster-env']['user_group']
+java8_home = config['hostLevelParams']['java_home']
+druid_env_sh_template = config['configurations']['druid-env']['content']
+
+# log4j params
+log4j_props = config['configurations']['druid-log4j']['content']
+druid_log_level = config['configurations']['druid-log4j']['druid_log_level']
+metamx_log_level = config['configurations']['druid-log4j']['metamx_log_level']
+root_log_level = config['configurations']['druid-log4j']['root_log_level']
+logrotate_props = config['configurations']['druid-logrotate']['content']
+
+# Metadata storage
+metadata_storage_user = config['configurations']['druid-common']['druid.metadata.storage.connector.user']
+metadata_storage_password = config['configurations']['druid-common']['druid.metadata.storage.connector.password']
+metadata_storage_db_name = config['configurations']['druid-common']['database_name']
+
+# HDFS
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', 'missing_principal').replace("_HOST",
+                                                                                                             hostname)
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+dfs_type = default("/commandParams/dfs_type", "")
+
+# Kerberose
+druid_principal_name = default('/configurations/druid-common/druid.hadoop.security.kerberos.principal',
+                               'missing_principal')
+druid_user_keytab = default('/configurations/druid-common/druid.hadoop.security.kerberos.keytab', 'missing_keytab')
+
+import functools
+
+# create partial functions with common arguments for every HdfsResource call
+# to create hdfs directory we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  hdfs_resource_ignore_file="/var/lib/ambari-agent/data/.hdfs_resource_ignore",
+  security_enabled=security_enabled,
+  keytab=hdfs_user_keytab,
+  kinit_path_local=kinit_path_local,
+  hadoop_bin_dir=hadoop_bin_dir,
+  hadoop_conf_dir=hadoop_conf_dir,
+  principal_name=hdfs_principal_name,
+  hdfs_site=hdfs_site,
+  default_fs=default_fs,
+  immutable_paths=get_not_managed_resources(),
+  dfs_type=dfs_type
+)

http://git-wip-us.apache.org/repos/asf/ambari/blob/685e926d/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/router.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/router.py b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/router.py
new file mode 100644
index 0000000..1731a2a
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/router.py
@@ -0,0 +1,28 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from druid_node import DruidBase
+
+
+class DruidRouter(DruidBase):
+  def __init__(self):
+    DruidBase.__init__(self, nodeType="router")
+
+
+if __name__ == "__main__":
+  DruidRouter().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/685e926d/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/service_check.py
new file mode 100644
index 0000000..139b727
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/DRUID/0.9.2/package/scripts/service_check.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.format import format
+from resource_management.core.resources.system import Execute
+
+
+class ServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    self.checkComponent(params, "druid_coordinator", "druid-coordinator")
+    self.checkComponent(params, "druid_overlord", "druid-overlord")
+
+  def checkComponent(self, params, component_name, config_name):
+    component_port = params.config['configurations'][format('{config_name}')]['druid.port']
+    for component_host in params.config['clusterHostInfo'][format('{component_name}_hosts')]:
+      Execute(format(
+        "curl -s -o /dev/null -w'%{{http_code}}' --negotiate -u: -k {component_host}:{component_port}/status | grep 200"),
+        tries=10,
+        try_sleep=3,
+        logoutput=True)
+
+
+if __name__ == "__main__":
+  ServiceCheck().execute()