You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@bigtop.apache.org by gu...@apache.org on 2022/08/10 05:01:06 UTC

[bigtop] branch master updated: BIGTOP-3740: Add Zeppelin support for Bigtop 3.1.0 Mpack (#954)

This is an automated email from the ASF dual-hosted git repository.

guyuqi pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/bigtop.git


The following commit(s) were added to refs/heads/master by this push:
     new 0f89bf0f BIGTOP-3740: Add Zeppelin support for Bigtop 3.1.0 Mpack (#954)
0f89bf0f is described below

commit 0f89bf0fd2fea22c8ae9fd80f019df8229a84563
Author: 吴治国 <ch...@startdt.com>
AuthorDate: Wed Aug 10 13:01:01 2022 +0800

    BIGTOP-3740: Add Zeppelin support for Bigtop 3.1.0 Mpack (#954)
    
    * BIGTOP-3740: Add Zeppelin support for Bigtop 3.1.0 Mpack
---
 .../stacks/BGTP/1.0/services/ZEPPELIN/alerts.json  |  19 +
 .../ZEPPELIN/configuration/zeppelin-env.xml        | 217 +++++++
 .../configuration/zeppelin-log4j-properties.xml    |  37 ++
 .../ZEPPELIN/configuration/zeppelin-shiro-ini.xml  | 106 ++++
 .../ZEPPELIN/configuration/zeppelin-site.xml       | 232 +++++++
 .../BGTP/1.0/services/ZEPPELIN/kerberos.json       |  53 ++
 .../stacks/BGTP/1.0/services/ZEPPELIN/metainfo.xml | 112 ++++
 .../package/scripts/alerts/alert_check_zeppelin.py | 108 ++++
 .../package/scripts/interpreter_json_template.py   | 497 +++++++++++++++
 .../services/ZEPPELIN/package/scripts/master.py    | 677 +++++++++++++++++++++
 .../services/ZEPPELIN/package/scripts/params.py    | 318 ++++++++++
 .../ZEPPELIN/package/scripts/service_check.py      |  40 ++
 .../ZEPPELIN/package/scripts/status_params.py      |  29 +
 .../templates/input.config-zeppelin.json.j2        |  48 ++
 .../services/ZEPPELIN/quicklinks/quicklinks.json   |  36 ++
 .../1.0/services/ZEPPELIN/role_command_order.json  |   8 +
 .../BGTP/1.0/services/ZEPPELIN/service_advisor.py  | 293 +++++++++
 .../1.0/services/ZEPPELIN/themes/directories.json  |  89 +++
 .../dev-support/docker/centos7/build-containers.sh |   2 +-
 19 files changed, 2920 insertions(+), 1 deletion(-)

diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/alerts.json b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/alerts.json
new file mode 100644
index 00000000..dcccc8af
--- /dev/null
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/alerts.json
@@ -0,0 +1,19 @@
+{
+    "ZEPPELIN": {
+      "service": [],
+      "ZEPPELIN_MASTER": [
+        {
+          "name": "zeppelin_server_status",
+          "label": "Zeppelin Server Status",
+          "description": "This host-level alert is triggered if the Zeppelin server cannot be determined to be up and responding to client requests.",
+          "interval": 1,
+          "scope": "ANY",
+          "source": {
+            "type": "SCRIPT",
+            "path": "BGTP/1.0/services/ZEPPELIN/package/scripts/alerts/alert_check_zeppelin.py"
+          }
+        }
+      ]
+    }
+  }
+  
\ No newline at end of file
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/configuration/zeppelin-env.xml b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/configuration/zeppelin-env.xml
new file mode 100644
index 00000000..26d85135
--- /dev/null
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/configuration/zeppelin-env.xml
@@ -0,0 +1,217 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<configuration>
+  <property>
+    <name>zeppelin_pid_dir</name>
+    <display-name>Zeppelin pid dir</display-name>
+    <value>/var/run/zeppelin</value>
+    <description>Dir containing process ID file</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin_user</name>
+    <display-name>Zeppelin User</display-name>
+    <value>zeppelin</value>
+    <property-type>USER</property-type>
+    <description>User zeppelin daemon runs as</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+      <user-groups>
+        <property>
+          <type>zeppelin-env</type>
+          <name>zeppelin_group</name>
+        </property>
+        <property>
+          <type>cluster-env</type>
+          <name>user_group</name>
+        </property>
+      </user-groups>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin_group</name>
+    <display-name>Zeppelin Group</display-name>
+    <value>zeppelin</value>
+    <property-type>GROUP</property-type>
+    <description>zeppelin group</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin_log_dir</name>
+    <display-name>Zeppelin Log Dir</display-name>
+    <value>/var/log/zeppelin</value>
+    <description>Zeppelin Log dir</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin_war_tempdir</name>
+    <display-name>Zeppelin War Temp Dir</display-name>
+    <value>/var/run/zeppelin/webapps</value>
+    <description>Zeppelin war tempdir</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin_notebook_dir</name>
+    <display-name>Zeppelin Notebook Dir</display-name>
+    <value>/user/zeppelin/notebook</value>
+    <description>Zeppelin notebook directory path(default to HDFS path)</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>spark_home</name>
+    <display-name>Spark Home</display-name>
+    <value>/usr/lib/spark</value>
+    <description>Spark Home</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase_home</name>
+    <display-name>HBase Home</display-name>
+    <value>/usr/lib/hbase</value>
+    <description>HBase Home</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hbase_conf_dir</name>
+    <display-name>HBase Conf Dir</display-name>
+    <value>/etc/hbase/conf</value>
+    <description>HBase Conf Dir</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin_env_content</name>
+    <description>This is the jinja template for zeppelin-env.sh file</description>
+    <value>
+      # export JAVA_HOME=
+      export JAVA_HOME={{java64_home}}
+      # export MASTER=                              # Spark master url. eg. spark://master_addr:7077. Leave empty if you want to use local mode.
+      export MASTER=yarn-client
+
+      # export ZEPPELIN_JAVA_OPTS                   # Additional jvm options. for example, export ZEPPELIN_JAVA_OPTS="-Dspark.executor.memory=8g -Dspark.cores.max=16"
+      # export ZEPPELIN_MEM                         # Zeppelin jvm mem options Default -Xms1024m -Xmx1024m -XX:MaxPermSize=512m
+      # export ZEPPELIN_INTP_MEM                    # zeppelin interpreter process jvm mem options. Default -Xms1024m -Xmx1024m -XX:MaxPermSize=512m
+      # export ZEPPELIN_INTP_JAVA_OPTS              # zeppelin interpreter process jvm options.
+      # export ZEPPELIN_SSL_PORT                    # ssl port (used when ssl environment variable is set to true)
+
+      # export ZEPPELIN_LOG_DIR                     # Where log files are stored.  PWD by default.
+      export ZEPPELIN_LOG_DIR={{zeppelin_log_dir}}
+      # export ZEPPELIN_PID_DIR                     # The pid files are stored. ${ZEPPELIN_HOME}/run by default.
+      export ZEPPELIN_PID_DIR={{zeppelin_pid_dir}}
+      # export ZEPPELIN_WAR_TEMPDIR                 # The location of jetty temporary directory.
+      export ZEPPELIN_WAR_TEMPDIR={{zeppelin_war_tempdir}}
+      # export ZEPPELIN_NOTEBOOK_DIR                # Where notebook saved
+      export ZEPPELIN_NOTEBOOK_DIR={{zeppelin_notebook_dir}}
+      # export ZEPPELIN_NOTEBOOK_HOMESCREEN         # Id of notebook to be displayed in homescreen. ex) 2A94M5J1Z
+      # export ZEPPELIN_NOTEBOOK_HOMESCREEN_HIDE    # hide homescreen notebook from list when this value set to "true". default "false"
+      # export ZEPPELIN_NOTEBOOK_S3_BUCKET          # Bucket where notebook saved
+      # export ZEPPELIN_NOTEBOOK_S3_ENDPOINT        # Endpoint of the bucket
+      # export ZEPPELIN_NOTEBOOK_S3_USER            # User in bucket where notebook saved. For example bucket/user/notebook/2A94M5J1Z/note.json
+      # export ZEPPELIN_IDENT_STRING                # A string representing this instance of zeppelin. $USER by default.
+      # export ZEPPELIN_NICENESS                    # The scheduling priority for daemons. Defaults to 0.
+      # export ZEPPELIN_INTERPRETER_LOCALREPO       # Local repository for interpreter's additional dependency loading
+      # export ZEPPELIN_NOTEBOOK_STORAGE            # Refers to pluggable notebook storage class, can have two classes simultaneously with a sync between them (e.g. local and remote).
+      # export ZEPPELIN_NOTEBOOK_ONE_WAY_SYNC       # If there are multiple notebook storages, should we treat the first one as the only source of truth?
+      # export ZEPPELIN_NOTEBOOK_PUBLIC             # Make notebook public by default when created, private otherwise
+      export ZEPPELIN_INTP_CLASSPATH_OVERRIDES="{{external_dependency_conf}}"
+      #### Spark interpreter configuration ####
+
+      ## Kerberos ticket refresh setting
+      ##
+      export KINIT_FAIL_THRESHOLD=5
+      export KERBEROS_REFRESH_INTERVAL=1d
+
+      ## Use provided spark installation ##
+      ## defining SPARK_HOME makes Zeppelin run spark interpreter process using spark-submit
+      ##
+      # export SPARK_HOME                           # (required) When it is defined, load it instead of Zeppelin embedded Spark libraries
+      export SPARK_HOME={{spark_home}}
+      # export SPARK_SUBMIT_OPTIONS                 # (optional) extra options to pass to spark submit. eg) "--driver-memory 512M --executor-memory 1G".
+      # export SPARK_APP_NAME                       # (optional) The name of spark application.
+
+      ## Use embedded spark binaries ##
+      ## without SPARK_HOME defined, Zeppelin still able to run spark interpreter process using embedded spark binaries.
+      ## however, it is not encouraged when you can define SPARK_HOME
+      ##
+      # Options read in YARN client mode
+      # export HADOOP_CONF_DIR                      # yarn-site.xml is located in configuration directory in HADOOP_CONF_DIR.
+      export HADOOP_CONF_DIR=/etc/hadoop/conf
+      # Pyspark (supported with Spark 1.2.1 and above)
+      # To configure pyspark, you need to set spark distribution's path to 'spark.home' property in Interpreter setting screen in Zeppelin GUI
+      # export PYSPARK_PYTHON                       # path to the python command. must be the same path on the driver(Zeppelin) and all workers.
+      # export PYTHONPATH
+
+      ## Spark interpreter options ##
+      ##
+      # export ZEPPELIN_SPARK_USEHIVECONTEXT        # Use HiveContext instead of SQLContext if set true. true by default.
+      # export ZEPPELIN_SPARK_CONCURRENTSQL         # Execute multiple SQL concurrently if set true. false by default.
+      # export ZEPPELIN_SPARK_IMPORTIMPLICIT        # Import implicits, UDF collection, and sql if set true. true by default.
+      # export ZEPPELIN_SPARK_MAXRESULT             # Max number of Spark SQL result to display. 1000 by default.
+      # export ZEPPELIN_WEBSOCKET_MAX_TEXT_MESSAGE_SIZE       # Size in characters of the maximum text message to be received by websocket. Defaults to 1024000
+
+
+      #### HBase interpreter configuration ####
+
+      ## To connect to HBase running on a cluster, either HBASE_HOME or HBASE_CONF_DIR must be set
+
+      # export HBASE_HOME=                          # (require) Under which HBase scripts and configuration should be
+      # export HBASE_CONF_DIR=                      # (optional) Alternatively, configuration directory can be set to point to the directory that has hbase-site.xml
+      export HBASE_HOME={{hbase_home}}
+      export HBASE_CONF_DIR={{hbase_conf_dir}}
+
+      # export ZEPPELIN_IMPERSONATE_CMD             # Optional, when user want to run interpreter as end web user. eg) 'sudo -H -u ${ZEPPELIN_IMPERSONATE_USER} bash -c '
+    </value>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>zeppelin.executor.mem</name>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin.executor.instances</name>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin.spark.jar.dir</name>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin.server.kerberos.principal</name>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>zeppelin.server.kerberos.keytab</name>
+    <deleted>true</deleted>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/configuration/zeppelin-log4j-properties.xml b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/configuration/zeppelin-log4j-properties.xml
new file mode 100644
index 00000000..a8a24efb
--- /dev/null
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/configuration/zeppelin-log4j-properties.xml
@@ -0,0 +1,37 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<configuration>
+<property>
+    <name>log4j_properties_content</name>
+    <description>This is the content for log4j.properties file</description>
+    <value>
+log4j.rootLogger = INFO, dailyfile
+log4j.appender.stdout = org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout = org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%5p [%d{ISO8601}] ({%t} %F[%M]:%L) - %m%n
+log4j.appender.dailyfile.DatePattern=.yyyy-MM-dd
+log4j.appender.dailyfile.Threshold = INFO
+log4j.appender.dailyfile = org.apache.log4j.DailyRollingFileAppender
+log4j.appender.dailyfile.File = ${zeppelin.log.file}
+log4j.appender.dailyfile.layout = org.apache.log4j.PatternLayout
+log4j.appender.dailyfile.layout.ConversionPattern=%5p [%d{ISO8601}] ({%t} %F[%M]:%L) - %m%n
+    </value>
+    <on-ambari-upgrade add="false"/>
+</property>
+</configuration>
\ No newline at end of file
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/configuration/zeppelin-shiro-ini.xml b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/configuration/zeppelin-shiro-ini.xml
new file mode 100644
index 00000000..18277edf
--- /dev/null
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/configuration/zeppelin-shiro-ini.xml
@@ -0,0 +1,106 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<configuration>
+  <property>
+    <name>shiro_ini_content</name>
+    <description>This is the jinja template for shiro.ini file</description>
+      <value>
+[users]
+# List of users with their password allowed to access Zeppelin.
+# To use a different strategy (LDAP / Database / ...) check the shiro doc at http://shiro.apache.org/configuration.html#Configuration-INISections
+admin = $shiro1$SHA-256$500000$p6Be9+t2hdUXJQj2D0b1fg==$bea5JIMqcVF3J6eNZGWQ/3eeDByn5iEZDuGsEip06+M=, admin
+user1 = $shiro1$SHA-256$500000$G2ymy/qmuZnGY6or4v2KfA==$v9fabqWgCNCgechtOUqAQenGDs0OSLP28q2wolPT4wU=, role1, role2
+user2 = $shiro1$SHA-256$500000$aHBgiuwSgAcP3Xt5mEzeFw==$KosBnN2BNKA9/KHBL0hnU/woJFl+xzJFj12NQ0fnjCU=, role3
+user3 = $shiro1$SHA-256$500000$nf0GzH10GbYVoxa7DOlOSw==$ov/IA5W8mRWPwvAoBjNYxg3udJK0EmrVMvFCwcr9eAs=, role2
+
+# Sample LDAP configuration, for user Authentication, currently tested for single Realm
+[main]
+### A sample for configuring Active Directory Realm
+#activeDirectoryRealm = org.apache.zeppelin.realm.ActiveDirectoryGroupRealm
+#activeDirectoryRealm.systemUsername = userNameA
+
+#use either systemPassword or hadoopSecurityCredentialPath, more details in http://zeppelin.apache.org/docs/latest/security/shiroauthentication.html
+#activeDirectoryRealm.systemPassword = passwordA
+#activeDirectoryRealm.hadoopSecurityCredentialPath = jceks://file/user/zeppelin/zeppelin.jceks
+#activeDirectoryRealm.searchBase = CN=Users,DC=SOME_GROUP,DC=COMPANY,DC=COM
+#activeDirectoryRealm.url = ldap://ldap.test.com:389
+#activeDirectoryRealm.groupRolesMap = "CN=admin,OU=groups,DC=SOME_GROUP,DC=COMPANY,DC=COM":"admin","CN=finance,OU=groups,DC=SOME_GROUP,DC=COMPANY,DC=COM":"finance","CN=hr,OU=groups,DC=SOME_GROUP,DC=COMPANY,DC=COM":"hr"
+#activeDirectoryRealm.authorizationCachingEnabled = false
+
+### A sample for configuring LDAP Directory Realm
+#ldapRealm = org.apache.zeppelin.realm.LdapGroupRealm
+## search base for ldap groups (only relevant for LdapGroupRealm):
+#ldapRealm.contextFactory.environment[ldap.searchBase] = dc=COMPANY,dc=COM
+#ldapRealm.contextFactory.url = ldap://ldap.test.com:389
+#ldapRealm.userDnTemplate = uid={0},ou=Users,dc=COMPANY,dc=COM
+#ldapRealm.contextFactory.authenticationMechanism = SIMPLE
+
+### A sample PAM configuration
+#pamRealm=org.apache.zeppelin.realm.PamRealm
+#pamRealm.service=sshd
+
+## To be commented out when not using [user] block / paintext
+passwordMatcher = org.apache.shiro.authc.credential.PasswordMatcher
+iniRealm.credentialsMatcher = $passwordMatcher
+
+sessionManager = org.apache.shiro.web.session.mgt.DefaultWebSessionManager
+### If caching of user is required then uncomment below lines
+cacheManager = org.apache.shiro.cache.MemoryConstrainedCacheManager
+securityManager.cacheManager = $cacheManager
+
+cookie = org.apache.shiro.web.servlet.SimpleCookie
+cookie.name = JSESSIONID
+#Uncomment the line below when running Zeppelin-Server in HTTPS mode
+#cookie.secure = true
+cookie.httpOnly = true
+sessionManager.sessionIdCookie = $cookie
+
+securityManager.sessionManager = $sessionManager
+# 86,400,000 milliseconds = 24 hour
+securityManager.sessionManager.globalSessionTimeout = 86400000
+shiro.loginUrl = /api/login
+
+[roles]
+role1 = *
+role2 = *
+role3 = *
+admin = *
+
+[urls]
+# This section is used for url-based security.
+# You can secure interpreter, configuration and credential information by urls. Comment or uncomment the below urls that you want to hide.
+# anon means the access is anonymous.
+# authc means Form based Auth Security
+# To enfore security, comment the line below and uncomment the next one
+/api/version = anon
+#/api/interpreter/** = authc, roles[admin]
+#/api/configurations/** = authc, roles[admin]
+#/api/credential/** = authc, roles[admin]
+#/** = anon
+/** = authc
+      </value>
+    <depends-on>
+      <property>
+        <type>zeppelin-site</type>
+        <name>zeppelin.ssl</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
\ No newline at end of file
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/configuration/zeppelin-site.xml b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/configuration/zeppelin-site.xml
new file mode 100644
index 00000000..782345a6
--- /dev/null
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/configuration/zeppelin-site.xml
@@ -0,0 +1,232 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<configuration>
+    <!-- contents of actual zeppelin-site.xml -->
+    <property>
+        <name>zeppelin.server.addr</name>
+        <value>0.0.0.0</value>
+        <description>Server address</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>zeppelin.server.port</name>
+        <value>9995</value>
+        <description>Server port.The subsequent port (e.g. 9996) should also be open as it will be
+            used by the web socket
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>zeppelin.server.ssl.port</name>
+        <value>9995</value>
+        <description>Server ssl port. (used when ssl property is set to true)
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>zeppelin.notebook.dir</name>
+        <value>notebook</value>
+        <description>notebook persist</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>zeppelin.notebook.homescreen</name>
+        <value> </value>
+        <description>id of notebook to be displayed in homescreen. e.g.) 2A94M5J1Z Empty value
+            displays default home screen
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>zeppelin.notebook.homescreen.hide</name>
+        <value>false</value>
+        <description>hide homescreen notebook from list when this value set to true</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>zeppelin.notebook.s3.user</name>
+        <value>user</value>
+        <description>user name for s3 folder structure. If S3 is used to store the notebooks, it is
+            necessary to use the following folder structure bucketname/username/notebook/
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>zeppelin.notebook.s3.bucket</name>
+        <value>zeppelin</value>
+        <description>bucket name for notebook storage. If S3 is used to store the notebooks, it is
+            necessary to use the following folder structure bucketname/username/notebook/
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>zeppelin.notebook.storage</name>
+        <value>org.apache.zeppelin.notebook.repo.FileSystemNotebookRepo</value>
+        <description>notebook persistence layer implementation. If S3 is used, set this to
+            org.apache.zeppelin.notebook.repo.S3NotebookRepo instead. If S3 is used to store the
+            notebooks, it is necessary to use the following folder structure
+            bucketname/username/notebook/
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>zeppelin.config.storage.class</name>
+        <value>org.apache.zeppelin.storage.FileSystemConfigStorage</value>
+        <description></description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>zeppelin.config.fs.dir</name>
+        <value>conf</value>
+        <description>Location where interpreter.json should be installed</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>zeppelin.interpreter.dir</name>
+        <value>interpreter</value>
+        <description>Interpreter implementation base directory</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>zeppelin.interpreters</name>
+        <value>org.apache.zeppelin.spark.SparkInterpreter,org.apache.zeppelin.spark.PySparkInterpreter,org.apache.zeppelin.spark.SparkSqlInterpreter,org.apache.zeppelin.spark.DepInterpreter,org.apache.zeppelin.markdown.Markdown,org.apache.zeppelin.angular.AngularInterpreter,org.apache.zeppelin.shell.ShellInterpreter,org.apache.zeppelin.jdbc.JDBCInterpreter,org.apache.zeppelin.phoenix.PhoenixInterpreter,org.apache.zeppelin.livy.LivySparkInterpreter,org.apache.zeppelin.livy.LivyPySparkInte [...]
+        <description>Comma separated interpreter configurations. First interpreter become a
+            default
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>zeppelin.interpreter.group.order</name>
+        <value>spark,angular,jdbc,livy,md,sh</value>
+        <description>Comma separated interpreter configurations. First interpreter become default
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>zeppelin.interpreter.connect.timeout</name>
+        <value>30000</value>
+        <description>Interpreter process connect timeout in msec.</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>zeppelin.ssl</name>
+        <value>false</value>
+        <description>Should SSL be used by the servers?</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>zeppelin.ssl.client.auth</name>
+        <value>false</value>
+        <description>Should client authentication be used for SSL connections?</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>zeppelin.ssl.keystore.path</name>
+        <value>conf/keystore</value>
+        <description>Path to keystore relative to Zeppelin home</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>zeppelin.ssl.keystore.type</name>
+        <value>JKS</value>
+        <description>The format of the given keystore (e.g. JKS or PKCS12)</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>zeppelin.ssl.keystore.password</name>
+        <value>change me</value>
+        <property-type>PASSWORD</property-type>
+        <description>Keystore password. Can be obfuscated by the Jetty Password tool</description>
+        <value-attributes>
+            <type>password</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>zeppelin.ssl.key.manager.password</name>
+        <value>change me</value>
+        <property-type>PASSWORD</property-type>
+        <description>Key Manager password. Defaults to keystore password. Can be obfuscated.
+        </description>
+        <value-attributes>
+            <type>password</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>zeppelin.ssl.truststore.path</name>
+        <value>conf/truststore</value>
+        <description>Path to truststore relative to Zeppelin home. Defaults to the keystore path
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>zeppelin.ssl.truststore.type</name>
+        <value>JKS</value>
+        <description>The format of the given truststore (e.g. JKS or PKCS12). Defaults to the same
+            type as the keystore type
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>zeppelin.ssl.truststore.password</name>
+        <value>change me</value>
+        <property-type>PASSWORD</property-type>
+        <description>Truststore password. Can be obfuscated by the Jetty Password tool. Defaults to
+            the keystore password
+        </description>
+        <value-attributes>
+            <type>password</type>
+        </value-attributes>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>zeppelin.server.allowed.origins</name>
+        <value>*</value>
+        <description>Allowed sources for REST and WebSocket requests (i.e.
+            http://onehost:8080,http://otherhost.com). If you leave * you are vulnerable to
+            https://issues.apache.org/jira/browse/ZEPPELIN-173
+        </description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>zeppelin.anonymous.allowed</name>
+        <value>false</value>
+        <description>Anonymous user allowed by default</description>
+        <on-ambari-upgrade add="true"/>
+    </property>
+    <property>
+        <name>zeppelin.notebook.public</name>
+        <value>false</value>
+        <description>Make notebook public by default when created, private otherwise</description>
+        <on-ambari-upgrade add="true"/>
+    </property>
+    <property>
+        <name>zeppelin.websocket.max.text.message.size</name>
+        <value>1024000</value>
+        <description>Size in characters of the maximum text message to be received by websocket. Defaults to 1024000</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+    <property>
+        <name>zeppelin.interpreter.config.upgrade</name>
+        <value>true</value>
+        <description>If this is set to true, on every restart of Zeppelin server default interpreter parameters will be reset</description>
+        <on-ambari-upgrade add="false"/>
+    </property>
+</configuration>
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/kerberos.json b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/kerberos.json
new file mode 100644
index 00000000..9194eb06
--- /dev/null
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/kerberos.json
@@ -0,0 +1,53 @@
+{
+    "services": [
+      {
+        "name": "ZEPPELIN",
+        "identities": [
+          {
+            "name": "zeppelin_smokeuser",
+            "reference": "/smokeuser"
+          },
+          {
+            "name": "zeppelin_user",
+            "principal": {
+              "value": "${zeppelin-env/zeppelin_user}${principal_suffix}@${realm}",
+              "type" : "user",
+              "configuration": "zeppelin-site/zeppelin.server.kerberos.principal",
+              "local_username" : "${zeppelin-env/zeppelin_user}"
+            },
+            "keytab": {
+              "file": "${keytab_dir}/zeppelin.server.kerberos.keytab",
+              "owner": {
+                "name": "${zeppelin-env/zeppelin_user}",
+                "access": "r"
+              },
+              "group": {
+                "name": "${cluster-env/user_group}",
+                "access": ""
+              },
+              "configuration": "zeppelin-site/zeppelin.server.kerberos.keytab"
+            }
+          }
+        ],
+        "components": [
+          {
+            "name": "ZEPPELIN_MASTER"
+          }
+        ],
+        "configurations": [
+          {
+            "zeppelin-env": {
+              "zeppelin.kerberos.enabled": "true"
+            }
+          },
+          {
+            "core-site": {
+              "hadoop.proxyuser.${zeppelin-env/zeppelin_user}.groups": "*",
+              "hadoop.proxyuser.${zeppelin-env/zeppelin_user}.hosts": "*"
+            }
+          }
+        ]
+      }
+    ]
+  }
+  
\ No newline at end of file
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/metainfo.xml b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/metainfo.xml
new file mode 100644
index 00000000..c4d5686c
--- /dev/null
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/metainfo.xml
@@ -0,0 +1,112 @@
+<?xml version="1.0"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ZEPPELIN</name>
+      <displayName>Zeppelin Notebook</displayName>
+      <comment>A web-based notebook that enables interactive data analytics. It enables you to
+        make beautiful data-driven, interactive and collaborative documents with SQL, Scala
+        and more.
+      </comment>
+      <version>Bigtop+3.1</version>
+      <components>
+        <component>
+          <name>ZEPPELIN_MASTER</name>
+          <displayName>Zeppelin Notebook</displayName>
+          <category>MASTER</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/master.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>10000</timeout>
+          </commandScript>
+          <dependencies>
+            <dependency>
+              <name>SPARK2/SPARK2_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>YARN/YARN_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <logs>
+            <log>
+              <logId>zeppelin</logId>
+              <primary>true</primary>
+            </log>
+          </logs>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>zeppelin</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <requiredServices>
+        <service>HDFS</service>
+        <service>YARN</service>
+        <service>SPARK2</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>zeppelin-site</config-type>
+        <config-type>zeppelin-env</config-type>
+        <config-type>zeppelin-shiro-ini</config-type>
+        <config-type>zeppelin-log4j-properties</config-type>
+      </configuration-dependencies>
+      <restartRequiredAfterChange>true</restartRequiredAfterChange>
+
+      <quickLinksConfigurations>
+        <quickLinksConfiguration>
+          <fileName>quicklinks.json</fileName>
+          <default>true</default>
+        </quickLinksConfiguration>
+      </quickLinksConfigurations>
+
+      <themes>
+        <theme>
+          <fileName>directories.json</fileName>
+          <default>true</default>
+        </theme>
+      </themes>
+    </service>
+  </services>
+</metainfo>
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/package/scripts/alerts/alert_check_zeppelin.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/package/scripts/alerts/alert_check_zeppelin.py
new file mode 100644
index 00000000..caa80e54
--- /dev/null
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/package/scripts/alerts/alert_check_zeppelin.py
@@ -0,0 +1,108 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import socket
+
+from resource_management.core.exceptions import ComponentIsNotRunning
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.core.resources import Execute
+from resource_management.libraries.functions import format
+
+ZEPPELIN_PORT_KEY = '{{zeppelin-site/zeppelin.server.port}}'
+ZEPPELIN_PORT_SSL_KEY = '{{zeppelin-site/zeppelin.server.ssl.port}}'
+
+SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}'
+ZEPPELIN_KEYTAB_KEY = '{{zeppelin-site/zeppelin.server.kerberos.keytab}}'
+ZEPPELIN_PRINCIPAL_KEY = '{{zeppelin-site/zeppelin.server.kerberos.principal}}'
+ZEPPELIN_USER_KEY = '{{zeppelin-env/zeppelin_user}}'
+
+UI_SSL_ENABLED = '{{zeppelin-site/zeppelin.ssl}}'
+
+KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY = '{{kerberos-env/executable_search_paths}}'
+
+RESULT_CODE_OK = 'OK'
+RESULT_CODE_CRITICAL = 'CRITICAL'
+RESULT_CODE_UNKNOWN = 'UNKNOWN'
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (ZEPPELIN_USER_KEY, UI_SSL_ENABLED, SECURITY_ENABLED_KEY, ZEPPELIN_KEYTAB_KEY, ZEPPELIN_PRINCIPAL_KEY,
+          KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY, ZEPPELIN_PORT_KEY, ZEPPELIN_PORT_SSL_KEY)
+
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def execute(configurations={}, parameters={}, host_name=None):
+
+  if configurations is None:
+    return ('UNKNOWN', ['There were no configurations supplied to the script.'])
+
+  zeppelin_user = configurations[ZEPPELIN_USER_KEY]
+
+  ui_ssl_enabled = False
+  if UI_SSL_ENABLED in configurations:
+    ui_ssl_enabled = str(configurations[UI_SSL_ENABLED]).upper() == 'TRUE'
+
+  zeppelin_port = 9995
+  if UI_SSL_ENABLED in configurations:
+    zeppelin_port = configurations[ZEPPELIN_PORT_SSL_KEY]
+  else:
+    zeppelin_port = configurations[ZEPPELIN_PORT_KEY]
+
+  security_enabled = False
+  if SECURITY_ENABLED_KEY in configurations:
+    security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE'
+
+  if host_name is None:
+    host_name = socket.getfqdn()
+
+  zeppelin_kerberos_keytab = None
+  if ZEPPELIN_KEYTAB_KEY in configurations:
+    zeppelin_kerberos_keytab = configurations[ZEPPELIN_KEYTAB_KEY]
+
+  zeppelin_principal = None
+  if ZEPPELIN_PRINCIPAL_KEY in configurations:
+    zeppelin_principal = configurations[ZEPPELIN_PRINCIPAL_KEY]
+    zeppelin_principal = zeppelin_principal.replace('_HOST',host_name.lower())
+
+  if KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY in configurations:
+    kerberos_executable_search_paths = configurations[KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY]
+  else:
+    kerberos_executable_search_paths = None
+
+  kinit_path_local = get_kinit_path(kerberos_executable_search_paths)
+
+  try:
+    if security_enabled:
+      kinit_cmd = format("{kinit_path_local} -kt {zeppelin_kerberos_keytab} {zeppelin_principal}; ")
+      Execute(kinit_cmd, user=zeppelin_user)
+
+    scheme = "https" if ui_ssl_enabled else "http"
+    command = format("curl -s -o /dev/null -w'%{{http_code}}' --negotiate -u: -k {scheme}://{host_name}:{zeppelin_port}/api/version | grep 200")
+    Execute(command, tries = 10, try_sleep=3, user=zeppelin_user, logoutput=True)
+  except ComponentIsNotRunning as ex:
+    return (RESULT_CODE_CRITICAL, [str(ex)])
+  except:
+    return (RESULT_CODE_CRITICAL, ["Zeppelin is not running=" + str(command)])
+
+  return (RESULT_CODE_OK, ["Successful connection to Zeppelin"])
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/package/scripts/interpreter_json_template.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/package/scripts/interpreter_json_template.py
new file mode 100644
index 00000000..6a64d0c7
--- /dev/null
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/package/scripts/interpreter_json_template.py
@@ -0,0 +1,497 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+template = '''
+{
+  "interpreterSettings": {
+    "angular": {
+      "id": "angular",
+      "name": "angular",
+      "group": "angular",
+      "properties": {},
+      "status": "READY",
+      "interpreterGroup": [
+        {
+          "name": "angular",
+          "class": "org.apache.zeppelin.angular.AngularInterpreter",
+          "defaultInterpreter": false,
+          "editor": {
+            "editOnDblClick": true
+          }
+        }
+      ],
+      "dependencies": [],
+      "option": {
+        "remote": true,
+        "port": -1,
+        "perNote": "shared",
+        "perUser": "shared",
+        "isExistingProcess": false,
+        "setPermission": false,
+        "users": [],
+        "isUserImpersonate": false
+      }
+    },
+    "spark2": {
+      "id": "spark2",
+      "name": "spark2",
+      "group": "spark",
+      "properties": {
+        "spark.executor.memory": {
+          "type": "string", 
+          "name": "spark.executor.memory", 
+          "value": ""
+        },
+        "args": {
+          "type": "string", 
+          "name": "args", 
+          "value": ""
+        },
+		"zeppelin.spark.printREPLOutput": {
+          "type": "string", 
+          "name": "zeppelin.spark.printREPLOutput", 
+          "value": "true"
+        },
+        "spark.cores.max": {
+          "type": "string", 
+          "name": "spark.cores.max", 
+          "value": ""
+        },
+        "zeppelin.dep.additionalRemoteRepository": {
+          "type": "string", 
+          "name": "zeppelin.dep.additionalRemoteRepository", 
+          "value": "spark-packages,http://dl.bintray.com/spark-packages/maven,false;"
+        },
+        "zeppelin.spark.importImplicit": {
+          "type": "string", 
+          "name": "zeppelin.spark.importImplicit", 
+          "value": "true"
+        },
+        "zeppelin.spark.sql.stacktrace": {
+          "type": "string", 
+          "name": "zeppelin.spark.sql.stacktrace", 
+          "value": "false"
+        },
+        "zeppelin.spark.concurrentSQL": {
+          "type": "string", 
+          "name": "zeppelin.spark.concurrentSQL", 
+          "value": "false"
+        },
+        "zeppelin.spark.useHiveContext": {
+          "type": "string", 
+          "name": "zeppelin.spark.useHiveContext", 
+          "value": "true"
+        },
+        "zeppelin.pyspark.python": {
+          "type": "string", 
+          "name": "zeppelin.pyspark.python", 
+          "value": "python"
+        },
+        "zeppelin.dep.localrepo": {
+          "type": "string", 
+          "name": "zeppelin.dep.localrepo", 
+          "value": "local-repo"
+        },
+        "zeppelin.R.knitr": {
+          "type": "string", 
+          "name": "zeppelin.R.knitr", 
+          "value": "true"
+        },        
+        "zeppelin.spark.maxResult": {
+          "type": "string", 
+          "name": "zeppelin.spark.maxResult", 
+          "value": "1000"
+        },
+        "master": {
+          "type": "string", 
+          "name": "master", 
+          "value": "local[*]"
+        },
+        "spark.app.name": {
+          "type": "string", 
+          "name": "spark.app.name", 
+          "value": "Zeppelin"
+        },
+        "zeppelin.R.image.width": {
+          "type": "string", 
+          "name": "zeppelin.R.image.width", 
+          "value": "100%"
+        },
+        "zeppelin.R.render.options": {
+          "type": "string", 
+          "name": "zeppelin.R.render.options", 
+          "value": "out.format \u003d \u0027html\u0027, comment \u003d NA, echo \u003d FALSE, results \u003d \u0027asis\u0027, message \u003d F, warning \u003d F"
+        },
+        "zeppelin.R.cmd": {
+          "type": "string", 
+          "name": "zeppelin.R.cmd", 
+          "value": "R"
+        }        
+      },
+      "status": "READY",
+      "interpreterGroup": [
+        {
+          "name": "spark",
+          "class": "org.apache.zeppelin.spark.SparkInterpreter",
+          "defaultInterpreter": true
+        },
+        {
+          "name": "sql",
+          "class": "org.apache.zeppelin.spark.SparkSqlInterpreter",
+          "defaultInterpreter": false
+        },
+        {
+          "name": "dep",
+          "class": "org.apache.zeppelin.spark.DepInterpreter",
+          "defaultInterpreter": false
+        },
+        {
+          "name": "pyspark",
+          "class": "org.apache.zeppelin.spark.PySparkInterpreter",
+          "defaultInterpreter": false
+        },
+        {
+          "name": "r",
+          "class": "org.apache.zeppelin.spark.SparkRInterpreter",
+          "defaultInterpreter": false
+        }
+      ],
+      "dependencies": [],
+      "option": {
+        "remote": true,
+        "port": -1,
+        "perNoteSession": false,
+        "perNoteProcess": false,
+        "isExistingProcess": false,
+        "setPermission": false
+      }
+    },
+    "jdbc": {
+      "id": "jdbc",
+      "name": "jdbc",
+      "group": "jdbc",
+      "properties": {
+	    "default.password": {
+          "type": "string", 
+          "name": "default.password", 
+          "value": ""
+        },
+        "zeppelin.jdbc.auth.type": {
+          "type": "string", 
+          "name": "zeppelin.jdbc.auth.type", 
+          "value": ""
+        },
+        "common.max_count": {
+          "type": "string", 
+          "name": "common.max_count", 
+          "value": "1000"
+        },
+		"zeppelin.jdbc.principal": {
+          "type": "string", 
+          "name": "zeppelin.jdbc.principal", 
+          "value": ""
+        },
+        "default.user": {
+          "type": "string", 
+          "name": "default.user", 
+          "value": "gpadmin"
+        },
+        "default.url": {
+          "type": "string", 
+          "name": "default.url", 
+          "value": "jdbc:postgresql://localhost:5432/"
+        },
+        "default.driver": {
+          "type": "string", 
+          "name": "default.driver", 
+          "value": "org.postgresql.Driver"
+        },
+        "zeppelin.jdbc.keytab.location": {
+          "type": "string", 
+          "name": "zeppelin.jdbc.keytab.location", 
+          "value": ""
+        },
+        "zeppelin.jdbc.concurrent.use": {
+          "type": "string", 
+          "name": "zeppelin.jdbc.concurrent.use", 
+          "value": "true"
+        },
+        "zeppelin.jdbc.concurrent.max_connection": {
+          "type": "string", 
+          "name": "zeppelin.jdbc.concurrent.max_connection", 
+          "value": "10"
+        }        
+      },
+      "status": "READY",
+      "interpreterGroup": [
+        {
+          "name": "sql",
+          "class": "org.apache.zeppelin.jdbc.JDBCInterpreter",
+          "defaultInterpreter": false,
+          "editor": {
+            "language": "sql",
+            "editOnDblClick": false
+          }
+        }
+      ],
+      "dependencies": [],
+      "option": {
+        "remote": true,
+        "port": -1,
+        "perNote": "shared",
+        "perUser": "shared",
+        "isExistingProcess": false,
+        "setPermission": false,
+        "users": [],
+        "isUserImpersonate": false
+      }
+    },
+    "livy2": {
+      "id": "livy2",
+      "status": "READY",
+      "group": "livy",
+      "name": "livy2",
+      "properties": {
+	    "zeppelin.livy.keytab": {
+          "type": "string", 
+          "name": "zeppelin.livy.keytab", 
+          "value": ""
+        },
+        "zeppelin.livy.spark.sql.maxResult": {
+          "type": "string", 
+          "name": "zeppelin.livy.spark.sql.maxResult", 
+          "value": "1000"
+        },
+        "livy.spark.executor.instances": {
+          "type": "string", 
+          "name": "livy.spark.executor.instances", 
+          "value": ""
+        },
+        "livy.spark.executor.memory": {
+          "type": "string", 
+          "name": "livy.spark.executor.memory", 
+          "value": ""
+        },
+        "livy.spark.dynamicAllocation.enabled": {
+          "type": "string", 
+          "name": "livy.spark.dynamicAllocation.enabled", 
+          "value": ""
+        },
+        "livy.spark.dynamicAllocation.cachedExecutorIdleTimeout": {
+          "type": "string", 
+          "name": "livy.spark.dynamicAllocation.cachedExecutorIdleTimeout", 
+          "value": ""
+        },
+        "livy.spark.dynamicAllocation.initialExecutors": {
+          "type": "string", 
+          "name": "livy.spark.dynamicAllocation.initialExecutors", 
+          "value": ""
+        },
+        "zeppelin.livy.session.create_timeout": {
+          "type": "string", 
+          "name": "zeppelin.livy.session.create_timeout", 
+          "value": "120"
+        },
+        "livy.spark.driver.memory": {
+          "type": "string", 
+          "name": "livy.spark.driver.memory", 
+          "value": ""
+        },
+        "zeppelin.livy.displayAppInfo": {
+          "type": "string", 
+          "name": "zeppelin.livy.displayAppInfo", 
+          "value": "true"
+        },
+        "livy.spark.jars.packages": {
+          "type": "string", 
+          "name": "livy.spark.jars.packages", 
+          "value": ""
+        },
+        "livy.spark.dynamicAllocation.maxExecutors": {
+          "type": "string", 
+          "name": "livy.spark.dynamicAllocation.maxExecutors", 
+          "value": ""
+        },
+        "zeppelin.livy.concurrentSQL": {
+          "type": "string", 
+          "name": "zeppelin.livy.concurrentSQL", 
+          "value": "false"
+        },
+        "zeppelin.livy.principal": {
+          "type": "string", 
+          "name": "zeppelin.livy.principal", 
+          "value": ""
+        },
+        "livy.spark.executor.cores": {
+          "type": "string", 
+          "name": "livy.spark.executor.cores", 
+          "value": ""
+        },
+        "zeppelin.livy.url": {
+          "type": "string", 
+          "name": "zeppelin.livy.url", 
+          "value": "http://localhost:8998"
+        },
+        "zeppelin.livy.pull_status.interval.millis": {
+          "type": "string", 
+          "name": "zeppelin.livy.pull_status.interval.millis", 
+          "value": "1000"
+        },
+        "livy.spark.driver.cores": {
+          "type": "string", 
+          "name": "livy.spark.driver.cores", 
+          "value": ""
+        },
+        "livy.spark.dynamicAllocation.minExecutors": {
+          "type": "string", 
+          "name": "livy.spark.dynamicAllocation.minExecutors", 
+          "value": ""
+        }
+      },
+      "interpreterGroup": [
+        {
+          "class": "org.apache.zeppelin.livy.LivySparkInterpreter",
+          "editor": {
+            "editOnDblClick": false,
+            "language": "scala"
+          },
+          "name": "spark",
+          "defaultInterpreter": false
+        },
+        {
+          "class": "org.apache.zeppelin.livy.LivySparkSQLInterpreter",
+          "editor": {
+            "editOnDblClick": false,
+            "language": "sql"
+          },
+          "name": "sql",
+          "defaultInterpreter": false
+        },
+        {
+          "class": "org.apache.zeppelin.livy.LivyPySparkInterpreter",
+          "editor": {
+            "editOnDblClick": false,
+            "language": "python"
+          },
+          "name": "pyspark",
+          "defaultInterpreter": false
+        },
+        {
+          "class": "org.apache.zeppelin.livy.LivySparkRInterpreter",
+          "editor": {
+            "editOnDblClick": false,
+            "language": "r"
+          },
+          "name": "sparkr",
+          "defaultInterpreter": false
+        },
+        {
+          "name": "shared",
+          "class": "org.apache.zeppelin.livy.LivySharedInterpreter",
+          "defaultInterpreter": false
+        }
+      ],
+      "dependencies": [],
+      "option": {
+        "setPermission": false,
+        "remote": true,
+        "users": [],
+        "isExistingProcess": false,
+        "perUser": "scoped",
+        "isUserImpersonate": false,
+        "perNote": "shared",
+        "port": -1
+      }
+    },
+    "md": {
+      "id": "md",
+      "name": "md",
+      "group": "md",
+      "properties": {
+	    "markdown.parser.type": {
+          "type": "string", 
+          "name": "markdown.parser.type", 
+          "value": "markdown4j"
+        }        
+      },
+      "status": "READY",
+      "interpreterGroup": [
+        {
+          "name": "md",
+          "class": "org.apache.zeppelin.markdown.Markdown",
+          "defaultInterpreter": false,
+          "editor": {
+            "language": "markdown",
+            "editOnDblClick": true
+          }
+        }
+      ],
+      "dependencies": [],
+      "option": {
+        "remote": true,
+        "port": -1,
+        "perNote": "shared",
+        "perUser": "shared",
+        "isExistingProcess": false,
+        "setPermission": false,
+        "users": [],
+        "isUserImpersonate": false
+      }
+    }
+  },
+  "interpreterBindings": {},
+  "interpreterRepositories": [
+    {
+      "id": "central",
+      "type": "default",
+      "url": "http://repo1.maven.org/maven2/",
+      "releasePolicy": {
+        "enabled": true,
+        "updatePolicy": "daily",
+        "checksumPolicy": "warn"
+      },
+      "snapshotPolicy": {
+        "enabled": true,
+        "updatePolicy": "daily",
+        "checksumPolicy": "warn"
+      },
+      "mirroredRepositories": [],
+      "repositoryManager": false
+    },
+    {
+      "id": "local",
+      "type": "default",
+      "url": "file:///home/zeppelin/.m2/repository",
+      "releasePolicy": {
+        "enabled": true,
+        "updatePolicy": "daily",
+        "checksumPolicy": "warn"
+      },
+      "snapshotPolicy": {
+        "enabled": true,
+        "updatePolicy": "daily",
+        "checksumPolicy": "warn"
+      },
+      "mirroredRepositories": [],
+      "repositoryManager": false
+    }
+  ]
+}
+'''
\ No newline at end of file
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/package/scripts/master.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/package/scripts/master.py
new file mode 100644
index 00000000..619d4e08
--- /dev/null
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/package/scripts/master.py
@@ -0,0 +1,677 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import glob
+import os
+
+from resource_management.core import shell, sudo
+from resource_management.core.logger import Logger
+from resource_management.core.resources import Directory
+from resource_management.core.resources.system import Execute, File
+from resource_management.core.source import Template, InlineTemplate
+from resource_management.libraries import XmlConfig
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.check_process_status import \
+  check_process_status
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.generate_logfeeder_input_config import generate_logfeeder_input_config
+from resource_management.libraries.functions.stack_features import \
+  check_stack_feature
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.script.script import Script
+
+
+class Master(Script):
+  def install(self, env):
+    import params
+    env.set_params(params)
+    self.install_packages(env)
+
+    self.create_zeppelin_log_dir(env)
+
+    if params.spark_version:
+      Execute('echo spark_version:' + str(params.spark_version) + ' detected for spark_home: '
+              + params.spark_home + ' >> ' + params.zeppelin_log_file, user=params.zeppelin_user)
+    if params.spark2_version:
+      Execute('echo spark2_version:' + str(params.spark2_version) + ' detected for spark2_home: '
+              + params.spark2_home + ' >> ' + params.zeppelin_log_file, user=params.zeppelin_user)
+
+  def create_zeppelin_dir(self, params):
+    params.HdfsResource(format("/user/{zeppelin_user}"),
+                        type="directory",
+                        action="create_on_execute",
+                        owner=params.zeppelin_user,
+                        recursive_chown=True,
+                        recursive_chmod=True
+                        )
+    params.HdfsResource(format("/user/{zeppelin_user}/test"),
+                        type="directory",
+                        action="create_on_execute",
+                        owner=params.zeppelin_user,
+                        recursive_chown=True,
+                        recursive_chmod=True
+                        )
+    params.HdfsResource(format("/apps/zeppelin"),
+                        type="directory",
+                        action="create_on_execute",
+                        owner=params.zeppelin_user,
+                        recursive_chown=True,
+                        recursive_chmod=True
+                        )
+
+    spark_deps_full_path = self.get_zeppelin_spark_dependencies()[0]
+    spark_dep_file_name = os.path.basename(spark_deps_full_path)
+
+    params.HdfsResource(None, action="execute")
+
+  def create_zeppelin_log_dir(self, env):
+    import params
+    env.set_params(params)
+    Directory([params.zeppelin_log_dir],
+              owner=params.zeppelin_user,
+              group=params.zeppelin_group,
+              cd_access="a",
+              create_parents=True,
+              mode=0755
+              )
+
+  def create_zeppelin_hdfs_conf_dir(self, env):
+    import params
+    env.set_params(params)
+    Directory([params.external_dependency_conf],
+              owner=params.zeppelin_user,
+              group=params.zeppelin_group,
+              cd_access="a",
+              create_parents=True,
+              mode=0755
+              )
+
+  def chown_zeppelin_pid_dir(self, env):
+    import params
+    env.set_params(params)
+    Execute(("chown", "-R", format("{zeppelin_user}") + ":" + format("{zeppelin_group}"), params.zeppelin_pid_dir),
+            sudo=True)
+
+  def configure(self, env):
+    import params
+    import status_params
+    env.set_params(params)
+    env.set_params(status_params)
+    self.create_zeppelin_log_dir(env)
+
+    # create the pid and zeppelin dirs
+    Directory([params.zeppelin_pid_dir, params.zeppelin_dir],
+              owner=params.zeppelin_user,
+              group=params.zeppelin_group,
+              cd_access="a",
+              create_parents=True,
+              mode=0755
+    )
+    self.chown_zeppelin_pid_dir(env)
+
+    XmlConfig("zeppelin-site.xml",
+              conf_dir=params.conf_dir,
+              configurations=params.config['configurations']['zeppelin-site'],
+              owner=params.zeppelin_user,
+              group=params.zeppelin_group
+              )
+    # write out zeppelin-env.sh
+    env_content = InlineTemplate(params.zeppelin_env_content)
+    File(format("{params.conf_dir}/zeppelin-env.sh"), content=env_content,
+         owner=params.zeppelin_user, group=params.zeppelin_group)
+
+    # write out shiro.ini
+    shiro_ini_content = InlineTemplate(params.shiro_ini_content)
+    File(format("{params.conf_dir}/shiro.ini"), content=shiro_ini_content,
+         owner=params.zeppelin_user, group=params.zeppelin_group)
+
+    # write out log4j.properties
+    File(format("{params.conf_dir}/log4j.properties"), content=params.log4j_properties_content,
+         owner=params.zeppelin_user, group=params.zeppelin_group)
+
+    self.create_zeppelin_hdfs_conf_dir(env)
+
+    generate_logfeeder_input_config('zeppelin', Template("input.config-zeppelin.json.j2", extra_imports=[default]))
+
+    if len(params.hbase_master_hosts) > 0 and params.is_hbase_installed:
+      # copy hbase-site.xml
+      XmlConfig("hbase-site.xml",
+              conf_dir=params.external_dependency_conf,
+              configurations=params.config['configurations']['hbase-site'],
+              configuration_attributes=params.config['configurationAttributes']['hbase-site'],
+              owner=params.zeppelin_user,
+              group=params.zeppelin_group,
+              mode=0644)
+
+      XmlConfig("hdfs-site.xml",
+                conf_dir=params.external_dependency_conf,
+                configurations=params.config['configurations']['hdfs-site'],
+                configuration_attributes=params.config['configurationAttributes']['hdfs-site'],
+                owner=params.zeppelin_user,
+                group=params.zeppelin_group,
+                mode=0644)
+
+      XmlConfig("core-site.xml",
+                conf_dir=params.external_dependency_conf,
+                configurations=params.config['configurations']['core-site'],
+                configuration_attributes=params.config['configurationAttributes']['core-site'],
+                owner=params.zeppelin_user,
+                group=params.zeppelin_group,
+                mode=0644,
+                xml_include_file=params.mount_table_xml_inclusion_file_full_path)
+
+      if params.mount_table_content:
+        File(params.mount_table_xml_inclusion_file_full_path,
+             owner=params.zeppelin_user,
+             group=params.zeppelin_group,
+             content=params.mount_table_content,
+             mode=0644
+        )
+
+  def check_and_copy_notebook_in_hdfs(self, params):
+    notebook_dir = params.zeppelin_notebook_dir
+    if notebook_dir.startswith("/") or '://' in notebook_dir:
+      notebook_directory = notebook_dir
+    else:
+      notebook_directory = "/user/" + format("{zeppelin_user}") + "/" + notebook_dir
+
+    if not self.is_directory_exists_in_HDFS(notebook_directory, params.zeppelin_user):
+      params.HdfsResource(format("{notebook_directory}"),
+                          type="directory",
+                          action="create_on_execute",
+                          owner=params.zeppelin_user,
+                          recursive_chown=True,
+                          recursive_chmod=True
+                          )
+
+      params.HdfsResource(format("{notebook_directory}"),
+                          type="directory",
+                          action="create_on_execute",
+                          source=params.local_notebook_dir,
+                          owner=params.zeppelin_user,
+                          recursive_chown=True,
+                          recursive_chmod=True
+                          )
+
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    self.create_zeppelin_log_dir(env)
+    self.chown_zeppelin_pid_dir(env)
+    Execute(params.zeppelin_dir + '/bin/zeppelin-daemon.sh stop >> ' + params.zeppelin_log_file,
+            user=params.zeppelin_user)
+
+  def start(self, env, upgrade_type=None):
+    import params
+    import status_params
+    self.configure(env)
+
+    Execute(("chown", "-R", format("{zeppelin_user}") + ":" + format("{zeppelin_group}"), "/etc/zeppelin"),
+            sudo=True)
+    Execute(("chown", "-R", format("{zeppelin_user}") + ":" + format("{zeppelin_group}"), format("{local_notebook_dir}")), sudo=True)
+
+    if params.security_enabled:
+      zeppelin_kinit_cmd = format("{kinit_path_local} -kt {zeppelin_kerberos_keytab} {zeppelin_kerberos_principal}; ")
+      Execute(zeppelin_kinit_cmd, user=params.zeppelin_user)
+
+    if 'zeppelin.notebook.storage' in params.config['configurations']['zeppelin-site'] \
+        and params.config['configurations']['zeppelin-site']['zeppelin.notebook.storage'] == 'org.apache.zeppelin.notebook.repo.FileSystemNotebookRepo':
+      self.check_and_copy_notebook_in_hdfs(params)
+
+    zeppelin_spark_dependencies = self.get_zeppelin_spark_dependencies()
+    if zeppelin_spark_dependencies and os.path.exists(zeppelin_spark_dependencies[0]):
+      self.create_zeppelin_dir(params)
+
+    if params.conf_stored_in_hdfs:
+      if not self.is_directory_exists_in_HDFS(self.get_zeppelin_conf_FS_directory(params), params.zeppelin_user):
+        # hdfs dfs -mkdir {zeppelin's conf directory}
+        params.HdfsResource(self.get_zeppelin_conf_FS_directory(params),
+                            type="directory",
+                            action="create_on_execute",
+                            owner=params.zeppelin_user,
+                            recursive_chown=True,
+                            recursive_chmod=True
+                            )
+
+    # if first_setup:
+    if not glob.glob(params.conf_dir + "/interpreter.json") and \
+      not os.path.exists(params.conf_dir + "/interpreter.json"):
+      self.create_interpreter_json()
+
+    if params.zeppelin_interpreter_config_upgrade == True:
+      self.reset_interpreter_settings(upgrade_type)
+      self.update_zeppelin_interpreter()
+
+    Execute(params.zeppelin_dir + '/bin/zeppelin-daemon.sh restart >> '
+            + params.zeppelin_log_file, user=params.zeppelin_user)
+    pidfile = glob.glob(os.path.join(status_params.zeppelin_pid_dir,
+                                     'zeppelin-' + params.zeppelin_user + '*.pid'))[0]
+    Logger.info(format("Pid file is: {pidfile}"))
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+
+    try:
+        pid_file = glob.glob(status_params.zeppelin_pid_dir + '/zeppelin-' +
+                             status_params.zeppelin_user + '*.pid')[0]
+    except IndexError:
+        pid_file = ''
+    check_process_status(pid_file)
+
+  def reset_interpreter_settings(self, upgrade_type):
+    import json
+    import interpreter_json_template
+    interpreter_json_template = json.loads(interpreter_json_template.template)['interpreterSettings']
+    config_data = self.get_interpreter_settings()
+    interpreter_settings = config_data['interpreterSettings']
+
+    if upgrade_type is not None:
+      current_interpreters_keys = interpreter_settings.keys()
+      for key in current_interpreters_keys:
+        interpreter_data = interpreter_settings[key]
+        if interpreter_data["name"] == "sh" and interpreter_data["group"] == "sh":
+          del interpreter_settings[key]
+
+    for setting_key in interpreter_json_template.keys():
+      if setting_key not in interpreter_settings:
+        interpreter_settings[setting_key] = interpreter_json_template[
+          setting_key]
+      else:
+        templateGroups = interpreter_json_template[setting_key]['interpreterGroup']
+        groups = interpreter_settings[setting_key]['interpreterGroup']
+
+        templateProperties = interpreter_json_template[setting_key]['properties']
+        properties = interpreter_settings[setting_key]['properties']
+
+        templateOptions = interpreter_json_template[setting_key]['option']
+        options = interpreter_settings[setting_key]['option']
+
+        # search for difference in groups from current interpreter and template interpreter
+        # if any group exists in template but doesn't exist in current interpreter, it will be added
+        group_names = []
+        for group in groups:
+          group_names.append(group['name'])
+
+        for template_group in templateGroups:
+          if not template_group['name'] in group_names:
+            groups.append(template_group)
+
+
+        # search for difference in properties from current interpreter and template interpreter
+        # if any property exists in template but doesn't exist in current interpreter, it will be added
+        for template_property in templateProperties:
+          if not template_property in properties:
+            properties[template_property] = templateProperties[template_property]
+
+
+        # search for difference in options from current interpreter and template interpreter
+        # if any option exists in template but doesn't exist in current interpreter, it will be added
+        for template_option in templateOptions:
+          if not template_option in options:
+            options[template_option] = templateOptions[template_option]
+
+    self.set_interpreter_settings(config_data)
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Stack Upgrade pre-restart")
+    import params
+    env.set_params(params)
+
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, format_stack_version(params.version)):
+      stack_select.select_packages(params.version)
+
+  def get_zeppelin_conf_FS_directory(self, params):
+    hdfs_interpreter_config = params.config['configurations']['zeppelin-site']['zeppelin.config.fs.dir']
+
+    # if it doesn't start from "/" or doesn't contains "://" as in hdfs://, file://, etc then make it a absolute path
+    if not (hdfs_interpreter_config.startswith("/") or '://' in hdfs_interpreter_config):
+      hdfs_interpreter_config = "/user/" + format("{zeppelin_user}") + "/" + hdfs_interpreter_config
+
+    return hdfs_interpreter_config
+
+  def get_zeppelin_conf_FS(self, params):
+    return self.get_zeppelin_conf_FS_directory(params) + "/interpreter.json"
+
+  def is_directory_exists_in_HDFS(self, path, as_user):
+    import params
+    kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+    if params.security_enabled:
+      kinit_if_needed = format("{kinit_path_local} -kt {zeppelin_kerberos_keytab} {zeppelin_kerberos_principal};")
+    else:
+      kinit_if_needed = ''
+    #-d: if the path is a directory, return 0.
+    path_exists = shell.call(format("{kinit_if_needed} hdfs --config {hadoop_conf_dir} dfs -test -d {path};echo $?"),
+                             user=as_user)[1]
+
+    # if there is no kerberos setup then the string will contain "-bash: kinit: command not found"
+    if "\n" in path_exists:
+      path_exists = path_exists.split("\n").pop()
+
+    # '1' means it does not exists
+    if path_exists == '0':
+      return True
+    else:
+      return False
+
+  def is_file_exists_in_HDFS(self, path, as_user):
+    import params
+    kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+    if params.security_enabled:
+      kinit_if_needed = format("{kinit_path_local} -kt {zeppelin_kerberos_keytab} {zeppelin_kerberos_principal};")
+    else:
+      kinit_if_needed = ''
+
+    #-f: if the path is a file, return 0.
+    path_exists = shell.call(format("{kinit_if_needed} hdfs --config {hadoop_conf_dir} dfs -test -f {path};echo $?"),
+                             user=as_user)[1]
+
+    # if there is no kerberos setup then the string will contain "-bash: kinit: command not found"
+    if "\n" in path_exists:
+      path_exists = path_exists.split("\n").pop()
+
+    # '1' means it does not exists
+    if path_exists == '0':
+      #-z: if the file is zero length, return 0.
+      path_exists = shell.call(format("{kinit_if_needed} hdfs --config {hadoop_conf_dir} dfs -test -z {path};echo $?"),
+                               user=as_user)[1]
+
+      if "\n" in path_exists:
+        path_exists = path_exists.split("\n").pop()
+      if path_exists != '0':
+        return True
+
+    return False
+
+  def copy_interpreter_from_HDFS_to_FS(self, params):
+    if params.conf_stored_in_hdfs:
+      zeppelin_conf_fs = self.get_zeppelin_conf_FS(params)
+
+      if self.is_file_exists_in_HDFS(zeppelin_conf_fs, params.zeppelin_user):
+        # copy from hdfs to /etc/zeppelin/conf/interpreter.json
+        kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+        if params.security_enabled:
+          kinit_if_needed = format("{kinit_path_local} -kt {zeppelin_kerberos_keytab} {zeppelin_kerberos_principal};")
+        else:
+          kinit_if_needed = ''
+        interpreter_config = os.path.join(params.conf_dir, "interpreter.json")
+        shell.call(format("rm {interpreter_config};"
+            "{kinit_if_needed} hdfs --config {hadoop_conf_dir} dfs -get {zeppelin_conf_fs} {interpreter_config}"),
+            user=params.zeppelin_user)
+        return True
+    return False
+
+  def get_interpreter_settings(self):
+    import params
+    import json
+
+    self.copy_interpreter_from_HDFS_to_FS(params)
+    interpreter_config = os.path.join(params.conf_dir, "interpreter.json")
+    config_content = sudo.read_file(interpreter_config)
+    config_data = json.loads(config_content)
+    return config_data
+
+  def set_interpreter_settings(self, config_data):
+    import params
+    import json
+
+    interpreter_config = os.path.join(params.conf_dir, "interpreter.json")
+    File(interpreter_config,
+         group=params.zeppelin_group,
+         owner=params.zeppelin_user,
+         mode=0644,
+         content=json.dumps(config_data, indent=2))
+
+    if params.conf_stored_in_hdfs:
+      #delete file from HDFS, as the `replace_existing_files` logic checks length of file which can remain same.
+      params.HdfsResource(self.get_zeppelin_conf_FS(params),
+                          type="file",
+                          action="delete_on_execute")
+
+      #recreate file in HDFS from LocalFS
+      params.HdfsResource(self.get_zeppelin_conf_FS(params),
+                          type="file",
+                          action="create_on_execute",
+                          source=interpreter_config,
+                          owner=params.zeppelin_user,
+                          recursive_chown=True,
+                          recursive_chmod=True,
+                          replace_existing_files=True)
+
+  def update_kerberos_properties(self):
+    import params
+    config_data = self.get_interpreter_settings()
+    interpreter_settings = config_data['interpreterSettings']
+    for interpreter_setting in interpreter_settings:
+      interpreter = interpreter_settings[interpreter_setting]
+      if interpreter['group'] == 'livy':
+        if params.security_enabled and params.zeppelin_kerberos_principal and params.zeppelin_kerberos_keytab:
+          self.storePropertyToInterpreter(interpreter, 'zeppelin.livy.principal', 'string', params.zeppelin_kerberos_principal)
+          self.storePropertyToInterpreter(interpreter, 'zeppelin.livy.keytab', 'string', params.zeppelin_kerberos_keytab)
+        else:
+          self.storePropertyToInterpreter(interpreter, 'zeppelin.livy.principal', 'string', "")
+          self.storePropertyToInterpreter(interpreter, 'zeppelin.livy.keytab', 'string', "")
+      elif interpreter['group'] == 'spark':
+        if params.security_enabled and params.zeppelin_kerberos_principal and params.zeppelin_kerberos_keytab:
+          self.storePropertyToInterpreter(interpreter, 'spark.yarn.principal', 'string', params.zeppelin_kerberos_principal)
+          self.storePropertyToInterpreter(interpreter, 'spark.yarn.keytab', 'string', params.zeppelin_kerberos_keytab)
+        else:
+          self.storePropertyToInterpreter(interpreter, 'spark.yarn.principal', 'string', "")
+          self.storePropertyToInterpreter(interpreter, 'spark.yarn.keytab', 'string', "")
+      elif interpreter['group'] == 'jdbc':
+        if params.security_enabled and params.zeppelin_kerberos_principal and params.zeppelin_kerberos_keytab:
+          self.storePropertyToInterpreter(interpreter, 'zeppelin.jdbc.auth.type', 'string', "KERBEROS")
+          self.storePropertyToInterpreter(interpreter, 'zeppelin.jdbc.principal', 'string', params.zeppelin_kerberos_principal)
+          self.storePropertyToInterpreter(interpreter, 'zeppelin.jdbc.keytab.location', 'string', params.zeppelin_kerberos_keytab)
+          if params.zookeeper_znode_parent \
+              and params.hbase_zookeeper_quorum \
+              and 'phoenix.url' in interpreter['properties'] \
+              and 'value' in interpreter['properties']['phoenix.url'] \
+              and params.zookeeper_znode_parent not in interpreter['properties']['phoenix.url']['value']:
+            self.storePropertyToInterpreter(interpreter, 'phoenix.url', 'string', "jdbc:phoenix:" + \
+                                                                                  params.hbase_zookeeper_quorum + ':' + \
+                                                                                  params.zookeeper_znode_parent)
+        else:
+          self.storePropertyToInterpreter(interpreter, 'zeppelin.jdbc.auth.type', 'string', "SIMPLE")
+          self.storePropertyToInterpreter(interpreter, 'zeppelin.jdbc.principal', 'string', "")
+          self.storePropertyToInterpreter(interpreter, 'zeppelin.jdbc.keytab.location', 'string', "")
+
+    self.set_interpreter_settings(config_data)
+
+  def update_zeppelin_interpreter(self):
+    import params
+    config_data = self.get_interpreter_settings()
+    interpreter_settings = config_data['interpreterSettings']
+
+    exclude_interpreter_autoconfig_list = []
+    exclude_interpreter_property_groups_map = {}
+
+    if params.exclude_interpreter_autoconfig:
+      excluded_interpreters = params.exclude_interpreter_autoconfig.strip().split(";")
+      for interpreter in excluded_interpreters:
+        if interpreter and interpreter.strip():
+          splitted_line = interpreter.split('(')
+          interpreter_name = splitted_line[0].strip()
+          exclude_interpreter_autoconfig_list.append(interpreter_name)
+          if len(splitted_line) > 1:
+            property_groups_list = splitted_line[1].replace(')','').strip().split(',')
+            if len(property_groups_list) > 0 and property_groups_list[0]:
+              exclude_interpreter_property_groups_map[interpreter_name] = property_groups_list
+
+
+
+
+    if params.zeppelin_interpreter:
+      settings_to_delete = []
+      for settings_key, interpreter in interpreter_settings.items():
+        if interpreter['group'] not in params.zeppelin_interpreter:
+          settings_to_delete.append(settings_key)
+
+      for key in settings_to_delete:
+        del interpreter_settings[key]
+
+    hive_interactive_properties_key = 'hive_interactive'
+    for setting_key in interpreter_settings.keys():
+      interpreter = interpreter_settings[setting_key]
+      if interpreter['group'] == 'jdbc' and interpreter['name'] == 'jdbc' and ('jdbc' not in exclude_interpreter_autoconfig_list
+                                                               or 'jdbc' in exclude_interpreter_property_groups_map.keys()):
+        interpreter['dependencies'] = []
+        jdbc_property_groups = []
+        if 'jdbc' in exclude_interpreter_property_groups_map.keys():
+          jdbc_property_groups = exclude_interpreter_property_groups_map.get('jdbc')
+        if not params.hive_server_host and params.hive_server_interactive_hosts:
+          hive_interactive_properties_key = 'hive'
+
+        if params.hive_server_host and 'hive-server' not in jdbc_property_groups:
+          self.storePropertyToInterpreter(interpreter, 'hive.driver', 'string', 'org.apache.hive.jdbc.HiveDriver')
+          self.storePropertyToInterpreter(interpreter, 'hive.user', 'string', 'hive')
+          self.storePropertyToInterpreter(interpreter, 'hive.password', 'string', '')
+          self.storePropertyToInterpreter(interpreter, 'hive.proxy.user.property', 'string', 'hive.server2.proxy.user')
+          if params.hive_server2_support_dynamic_service_discovery:
+            self.storePropertyToInterpreter(interpreter, 'hive.url', 'string', 'jdbc:hive2://' + \
+                                                 params.hive_zookeeper_quorum + \
+                                                 '/;' + 'serviceDiscoveryMode=' + params.discovery_mode + ';zooKeeperNamespace=' + \
+                                                 params.hive_zookeeper_namespace)
+          else:
+            self.storePropertyToInterpreter(interpreter, 'hive.url', 'string', 'jdbc:hive2://' + \
+                                                                               params.hive_server_host + \
+                                                                               ':' + params.hive_server_port)
+          if 'hive.splitQueries' not in interpreter['properties']:
+            self.storePropertyToInterpreter(interpreter, "hive.splitQueries", 'string', "true")
+
+        if params.hive_server_interactive_hosts and 'hive-interactive' not in jdbc_property_groups:
+          self.storePropertyToInterpreter(interpreter, hive_interactive_properties_key + '.driver', 'string', 'org.apache.hive.jdbc.HiveDriver')
+          self.storePropertyToInterpreter(interpreter, hive_interactive_properties_key + '.user', 'string', 'hive')
+          self.storePropertyToInterpreter(interpreter, hive_interactive_properties_key + '.password', 'string', '')
+          self.storePropertyToInterpreter(interpreter, hive_interactive_properties_key + '.proxy.user.property', 'string', 'hive.server2.proxy.user')
+          if params.hive_server2_support_dynamic_service_discovery:
+            self.storePropertyToInterpreter(interpreter, hive_interactive_properties_key + '.url', 'string', 'jdbc:hive2://' + \
+                                                    params.hive_zookeeper_quorum + \
+                                                    '/;' + 'serviceDiscoveryMode=' + params.discovery_mode + ';zooKeeperNamespace=' + \
+                                                    params.hive_interactive_zookeeper_namespace)
+          else:
+            self.storePropertyToInterpreter(interpreter, hive_interactive_properties_key + '.url', 'string', 'jdbc:hive2://' + \
+                                                    params.hive_server_interactive_hosts + \
+                                                    ':' + params.hive_server_port)
+          if hive_interactive_properties_key + '.splitQueries' not in interpreter['properties']:
+            self.storePropertyToInterpreter(interpreter, hive_interactive_properties_key + '.splitQueries', 'string', "true")
+
+
+
+        if params.spark2_thrift_server_hosts and 'spark2' not in jdbc_property_groups:
+          self.storePropertyToInterpreter(interpreter, 'spark2.driver', 'string', 'org.apache.spark-project.org.apache.hive.jdbc.HiveDriver')
+          self.storePropertyToInterpreter(interpreter, 'spark2.user', 'string', 'hive')
+          self.storePropertyToInterpreter(interpreter, 'spark2.password', 'string', '')
+          self.storePropertyToInterpreter(interpreter, 'spark2.proxy.user.property', 'string', 'hive.server2.proxy.user')
+          self.storePropertyToInterpreter(interpreter, 'spark2.url', 'string', 'jdbc:hive2://' + \
+                          params.spark2_thrift_server_hosts + ':' + params.spark2_hive_thrift_port + '/')
+
+          if params.spark2_hive_principal:
+            self.storePropertyToInterpreter(interpreter, 'spark2.url', 'string', ';principal=' + params.spark2_hive_principal, 'add')
+          if params.spark2_transport_mode:
+            self.storePropertyToInterpreter(interpreter, 'spark2.url', 'string', ';transportMode=' + params.spark2_transport_mode, 'add')
+          if params.spark2_http_path:
+            self.storePropertyToInterpreter(interpreter, 'spark2.url', 'string', ';httpPath=' + params.spark2_http_path, 'add')
+          if params.spark2_ssl:
+            self.storePropertyToInterpreter(interpreter, 'spark2.url', 'string', ';ssl=true', 'add')
+          if 'spark2.splitQueries' not in interpreter['properties']:
+            self.storePropertyToInterpreter(interpreter, 'spark2.splitQueries', 'string', "true")
+
+        if params.zookeeper_znode_parent \
+                and params.hbase_zookeeper_quorum and 'hbase' not in jdbc_property_groups:
+            self.storePropertyToInterpreter(interpreter, 'phoenix.driver', 'string', 'org.apache.phoenix.jdbc.PhoenixDriver')
+            if 'phoenix.hbase.client.retries.number' not in interpreter['properties']:
+              self.storePropertyToInterpreter(interpreter, 'phoenix.hbase.client.retries.number', 'string', '1')
+            if 'phoenix.phoenix.query.numberFormat' not in interpreter['properties']:
+              self.storePropertyToInterpreter(interpreter, 'phoenix.phoenix.query.numberFormat', 'string', '#.#')
+            if 'phoenix.user' not in interpreter['properties']:
+              self.storePropertyToInterpreter(interpreter, 'phoenix.user', 'string', 'phoenixuser')
+            if 'phoenix.password' not in interpreter['properties']:
+              self.storePropertyToInterpreter(interpreter, 'phoenix.password', 'string', "")
+            self.storePropertyToInterpreter(interpreter, 'phoenix.url', 'string', "jdbc:phoenix:" + \
+                                                                                  params.hbase_zookeeper_quorum + ':' + \
+                                                                                  params.zookeeper_znode_parent)
+
+            if 'phoenix.splitQueries' not in interpreter['properties']:
+              self.storePropertyToInterpreter(interpreter, 'phoenix.splitQueries', 'string', "true")
+
+
+      elif interpreter['group'] == 'livy' and interpreter['name'] == 'livy2' and 'livy2' not in exclude_interpreter_autoconfig_list:
+        # Honor this Zeppelin setting if it exists
+        if 'zeppelin.livy.url' in params.config['configurations']['zeppelin-site']:
+          interpreter['properties']['zeppelin.livy.url'] = params.config['configurations']['zeppelin-site']['zeppelin.livy.url']
+        elif params.livy2_livyserver_host:
+          self.storePropertyToInterpreter(interpreter, 'zeppelin.livy.url', 'string', params.livy2_livyserver_protocol + \
+                                                                                      "://" + params.livy2_livyserver_host + \
+                                                                                      ":" + params.livy2_livyserver_port)
+        else:
+          del interpreter_settings[setting_key]
+
+
+      elif interpreter['group'] == 'spark' and interpreter['name'] == 'spark2' and 'spark2' not in exclude_interpreter_autoconfig_list:
+        if 'spark2-env' in params.config['configurations']:
+          self.storePropertyToInterpreter(interpreter, 'master', 'string', "yarn-client")
+          self.storePropertyToInterpreter(interpreter, 'SPARK_HOME', 'string', "/usr/hdp/current/spark2-client/")
+        else:
+          del interpreter_settings[setting_key]
+
+    self.set_interpreter_settings(config_data)
+    self.update_kerberos_properties()
+
+  def storePropertyToInterpreter(self, interpreter, property_name, property_type, property_value, mode='set'):
+    if property_name in interpreter['properties'] and 'value' in interpreter['properties'][property_name]:
+      if mode == 'set':
+        interpreter['properties'][property_name]['value'] = property_value
+      elif mode == 'add':
+        interpreter['properties'][property_name]['value'] += property_value
+    else:
+      interpreter['properties'][property_name] = {'name' : property_name, 'type' : property_type, 'value' : property_value}
+
+  def create_interpreter_json(self):
+    import interpreter_json_template
+    import params
+
+    if not self.copy_interpreter_from_HDFS_to_FS(params):
+      interpreter_json = interpreter_json_template.template
+      File(format("{params.conf_dir}/interpreter.json"),
+           content=interpreter_json,
+           owner=params.zeppelin_user,
+           group=params.zeppelin_group,
+           mode=0664)
+
+      if params.conf_stored_in_hdfs:
+        params.HdfsResource(self.get_zeppelin_conf_FS(params),
+                            type="file",
+                            action="create_on_execute",
+                            source=format("{params.conf_dir}/interpreter.json"),
+                            owner=params.zeppelin_user,
+                            recursive_chown=True,
+                            recursive_chmod=True,
+                            replace_existing_files=True)
+
+  def get_zeppelin_spark_dependencies(self):
+    import params
+    return glob.glob(params.zeppelin_dir + '/interpreter/spark/dep/zeppelin-spark-dependencies*.jar')
+
+if __name__ == "__main__":
+  Master().execute()
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/package/scripts/params.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/package/scripts/params.py
new file mode 100644
index 00000000..051e3e73
--- /dev/null
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/package/scripts/params.py
@@ -0,0 +1,318 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import functools
+import os
+import re
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.get_stack_version import get_stack_version
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.version import format_stack_version, get_major_version
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.script.script import Script
+
+def get_port_from_url(address):
+  if not (address is None):
+    return address.split(':')[-1]
+  else:
+    return address
+
+def extract_spark_version(spark_home):
+  try:
+    with open(spark_home + "/RELEASE") as fline:
+      return re.search('Spark (\d\.\d).+', fline.readline().rstrip()).group(1)
+  except:
+    pass
+  return None
+
+
+# server configurations
+config = Script.get_config()
+# stack_root = Script.get_stack_root()
+stack_root = "/usr/lib"
+
+# e.g. /var/lib/ambari-agent/cache/stacks/HDP/2.2/services/zeppelin-stack/package
+service_packagedir = os.path.realpath(__file__).split('/scripts')[0]
+
+zeppelin_dirname = 'zeppelin-server'
+
+install_dir = os.path.join(stack_root, "zeppelin")
+
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+ui_ssl_enabled = config['configurations']['zeppelin-site']['zeppelin.ssl']
+is_ui_ssl_enabled = str(ui_ssl_enabled).upper() == 'TRUE'
+
+setup_view = True
+temp_file = config['configurations']['zeppelin-env']['zeppelin.temp.file']
+
+spark_home = config['configurations']['zeppelin-env']['spark_home']
+spark_version = None
+spark2_home = ""
+spark2_version = None
+if 'spark-defaults' in config['configurations']:
+  spark_home = os.path.join(stack_root, "current", 'spark-client')
+  spark_version = extract_spark_version(spark_home)
+if 'spark2-defaults' in config['configurations']:
+  spark2_home = os.path.join(stack_root, "current", 'spark2-client')
+  spark2_version = extract_spark_version(spark2_home)
+
+# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
+version = default("/commandParams/version", None)
+stack_name = default("/clusterLevelParams/stack_name", None)
+
+# params from zeppelin-site
+zeppelin_port = str(config['configurations']['zeppelin-site']['zeppelin.server.port'])
+if is_ui_ssl_enabled:
+  zeppelin_port = str(config['configurations']['zeppelin-site']['zeppelin.server.ssl.port'])
+zeppelin_interpreter = None
+if 'zeppelin.interpreter.group.order' in config['configurations']['zeppelin-site']:
+  zeppelin_interpreter = str(config['configurations']['zeppelin-site']
+                             ['zeppelin.interpreter.group.order']).split(",")
+
+# params from zeppelin-env
+zeppelin_user = config['configurations']['zeppelin-env']['zeppelin_user']
+zeppelin_group = config['configurations']['zeppelin-env']['zeppelin_group']
+zeppelin_log_dir = config['configurations']['zeppelin-env']['zeppelin_log_dir']
+zeppelin_pid_dir = config['configurations']['zeppelin-env']['zeppelin_pid_dir']
+zeppelin_war_tempdir = config['configurations']['zeppelin-env']['zeppelin_war_tempdir']
+zeppelin_notebook_dir = config['configurations']['zeppelin-env']['zeppelin_notebook_dir']
+local_notebook_dir = "/var/lib/zeppelin/notebook"
+
+hbase_home = config['configurations']['zeppelin-env']['hbase_home']
+hbase_conf_dir = config['configurations']['zeppelin-env']['hbase_conf_dir']
+
+zeppelin_log_file = os.path.join(zeppelin_log_dir, 'zeppelin-setup.log')
+zeppelin_hdfs_user_dir = format("/user/{zeppelin_user}")
+
+zeppelin_dir = install_dir
+conf_dir = "/etc/zeppelin/conf"
+external_dependency_conf = "/etc/zeppelin/conf/external-dependency-conf"
+
+conf_stored_in_hdfs = False
+if 'zeppelin.config.fs.dir' in config['configurations']['zeppelin-site'] and \
+  not config['configurations']['zeppelin-site']['zeppelin.config.fs.dir'].startswith('file://'):
+  conf_stored_in_hdfs = True
+
+# zeppelin-env.sh
+zeppelin_env_content = config['configurations']['zeppelin-env']['zeppelin_env_content']
+
+# shiro.ini
+shiro_ini_content = config['configurations']['zeppelin-shiro-ini']['shiro_ini_content']
+
+# log4j.properties
+log4j_properties_content = config['configurations']['zeppelin-log4j-properties']['log4j_properties_content']
+
+# detect configs
+master_configs = config['clusterHostInfo']
+java64_home = config['ambariLevelParams']['java_home']
+ambari_host = str(config['ambariLevelParams']['ambari_server_host'])
+zeppelin_host = str(master_configs['zeppelin_master_hosts'][0])
+
+# detect HS2 details, if installed
+
+hive_server_host = None
+hive_metastore_host = '0.0.0.0'
+hive_metastore_port = None
+hive_server_port = None
+hive_zookeeper_quorum = None
+hive_server2_support_dynamic_service_discovery = None
+is_hive_installed = False
+hive_zookeeper_namespace = None
+hive_interactive_zookeeper_namespace = None
+
+if 'hive_server_hosts' in master_configs and len(master_configs['hive_server_hosts']) != 0:
+  is_hive_installed = True
+  spark_hive_properties = {
+    'hive.metastore.uris': default('/configurations/hive-site/hive.metastore.uris', '')
+  }
+  hive_server_host = str(master_configs['hive_server_hosts'][0])
+  hive_metastore_host = str(master_configs['hive_metastore_hosts'][0])
+  hive_metastore_port = str(
+    get_port_from_url(default('/configurations/hive-site/hive.metastore.uris', '')))
+  hive_server_port = str(config['configurations']['hive-site']['hive.server2.thrift.http.port'])
+  hive_zookeeper_quorum = config['configurations']['hive-site']['hive.zookeeper.quorum']
+  hive_zookeeper_namespace = config['configurations']['hive-site']['hive.server2.zookeeper.namespace']
+  hive_zookeeper_namespace = default('/configurations/hive-interactive-site/hive.server2.zookeeper.namespace', hive_zookeeper_namespace)
+  hive_server2_support_dynamic_service_discovery = config['configurations']['hive-site']['hive.server2.support.dynamic.service.discovery']
+
+discovery_mode = "zooKeeper"
+hive_server_interactive_hosts = None
+if 'hive_server_interactive_hosts' in master_configs and len(master_configs['hive_server_interactive_hosts']) != 0:
+    if len(master_configs['hive_server_interactive_hosts']) > 1:
+      discovery_mode = "zooKeeperHA"
+
+    hive_server_interactive_hosts = str(master_configs['hive_server_interactive_hosts'][0])
+    hive_interactive_zookeeper_namespace = config['configurations']['hive-interactive-site']['hive.server2.zookeeper.namespace']
+    hive_server_port = str(config['configurations']['hive-site']['hive.server2.thrift.http.port'])
+    hive_zookeeper_quorum = config['configurations']['hive-site']['hive.zookeeper.quorum']
+    hive_server2_support_dynamic_service_discovery = config['configurations']['hive-site']['hive.server2.support.dynamic.service.discovery']
+
+spark_thrift_server_hosts = None
+spark_hive_thrift_port = None
+spark_hive_principal = None
+hive_principal = None
+hive_transport_mode = None
+
+if 'hive-site' in config['configurations']:
+  if 'hive.server2.authentication.kerberos.principal' in config['configurations']['hive-site']:
+    hive_principal = config['configurations']['hive-site']['hive.server2.authentication.kerberos.principal']
+  if 'hive.server2.transport.mode' in config['configurations']['hive-site']:
+    hive_transport_mode = config['configurations']['hive-site']['hive.server2.transport.mode']
+
+spark2_transport_mode = hive_transport_mode
+spark2_http_path = None
+spark2_ssl = False
+if 'spark2-hive-site-override' in config['configurations']:
+  if 'hive.server2.transport.mode' in config['configurations']['spark2-hive-site-override']:
+    spark2_transport_mode = config['configurations']['spark2-hive-site-override']['hive.server2.transport.mode']
+
+  if 'hive.server2.http.endpoint' in config['configurations']['spark2-hive-site-override']:
+    spark2_http_path = config['configurations']['spark2-hive-site-override']['hive.server2.http.endpoint']
+
+  if 'hive.server2.use.SSL' in config['configurations']['spark2-hive-site-override']:
+    spark2_ssl = default("configurations/spark2-hive-site-override/hive.server2.use.SSL", False)
+
+if 'spark_thriftserver_hosts' in master_configs and len(master_configs['spark_thriftserver_hosts']) != 0:
+  spark_thrift_server_hosts = str(master_configs['spark_thriftserver_hosts'][0])
+  if config['configurations']['spark-hive-site-override']:
+    spark_hive_thrift_port = config['configurations']['spark-hive-site-override']['hive.server2.thrift.port']
+
+spark2_thrift_server_hosts = None
+spark2_hive_thrift_port = None
+spark2_hive_principal = None
+if 'spark2_thriftserver_hosts' in master_configs and len(master_configs['spark2_thriftserver_hosts']) != 0:
+  spark2_thrift_server_hosts = str(master_configs['spark2_thriftserver_hosts'][0])
+  if config['configurations']['spark2-hive-site-override']:
+    spark2_hive_thrift_port = config['configurations']['spark2-hive-site-override']['hive.server2.thrift.port']
+    if 'hive.server2.authentication.kerberos.principal' in config['configurations']['spark2-hive-site-override']:
+      spark2_hive_principal = config['configurations']['spark2-hive-site-override']['hive.server2.authentication.kerberos.principal']
+
+
+# detect hbase details if installed
+zookeeper_znode_parent = None
+hbase_zookeeper_quorum = None
+is_hbase_installed = False
+if 'hbase_master_hosts' in master_configs and 'hbase-site' in config['configurations']:
+  is_hbase_installed = True
+  zookeeper_znode_parent = config['configurations']['hbase-site']['zookeeper.znode.parent']
+  hbase_zookeeper_quorum = config['configurations']['hbase-site']['hbase.zookeeper.quorum']
+
+# detect spark queue
+if 'spark-defaults' in config['configurations'] and 'spark.yarn.queue' in config['configurations']['spark-defaults']:
+  spark_queue = config['configurations']['spark-defaults']['spark.yarn.queue']
+elif 'spark2-defaults' in config['configurations'] and 'spark.yarn.queue' in config['configurations']['spark2-defaults']:
+  spark_queue = config['configurations']['spark2-defaults']['spark.yarn.queue']
+else:
+  spark_queue = 'default'
+
+smoke_user = config['configurations']['cluster-env']['smokeuser']
+
+if security_enabled:
+  zeppelin_kerberos_keytab = config['configurations']['zeppelin-site']['zeppelin.server.kerberos.keytab']
+  zeppelin_kerberos_principal = config['configurations']['zeppelin-site']['zeppelin.server.kerberos.principal']
+
+  smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+  smokeuser_principal =  config['configurations']['cluster-env']['smokeuser_principal_name']
+
+if 'zeppelin.interpreter.config.upgrade' in config['configurations']['zeppelin-site']:
+  zeppelin_interpreter_config_upgrade = config['configurations']['zeppelin-site']['zeppelin.interpreter.config.upgrade']
+else:
+  zeppelin_interpreter_config_upgrade = False
+
+exclude_interpreter_autoconfig = default("/configurations/zeppelin-site/exclude.interpreter.autoconfig", None)
+
+# e.g. 2.3
+stack_version_unformatted = config['clusterLevelParams']['stack_version']
+
+# e.g. 2.3.0.0
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+major_stack_version = get_major_version(stack_version_formatted)
+
+# e.g. 2.3.0.0-2130
+full_stack_version = default("/commandParams/version", None)
+
+spark_client_version = get_stack_version('spark-client')
+
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+livy_hosts = default("/clusterHostInfo/livy_server_hosts", [])
+livy2_hosts = default("/clusterHostInfo/livy2_server_hosts", [])
+
+livy_livyserver_host = None
+livy_livyserver_port = None
+livy_livyserver_protocol = 'http'
+livy2_livyserver_host = None
+livy2_livyserver_port = None
+livy2_livyserver_protocol = 'http'
+if stack_version_formatted and check_stack_feature(StackFeature.SPARK_LIVY, stack_version_formatted) and \
+    len(livy_hosts) > 0:
+  livy_livyserver_host = str(livy_hosts[0])
+  livy_livyserver_port = config['configurations']['livy-conf']['livy.server.port']
+  if 'livy.keystore' in config['configurations']['livy-conf']:
+    livy_livyserver_protocol = 'https'
+
+if stack_version_formatted and check_stack_feature(StackFeature.SPARK_LIVY2, stack_version_formatted) and \
+    len(livy2_hosts) > 0:
+  livy2_livyserver_host = str(livy2_hosts[0])
+  livy2_livyserver_port = config['configurations']['livy2-conf']['livy.server.port']
+  if 'livy.keystore' in config['configurations']['livy2-conf']:
+    livy2_livyserver_protocol = 'https'
+
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+dfs_type = default("/clusterLevelParams/dfs_type", "")
+
+# create partial functions with common arguments for every HdfsResource call
+# to create hdfs directory we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  hdfs_resource_ignore_file="/var/lib/ambari-agent/data/.hdfs_resource_ignore",
+  security_enabled=security_enabled,
+  keytab=hdfs_user_keytab,
+  kinit_path_local=kinit_path_local,
+  hadoop_bin_dir=hadoop_bin_dir,
+  hadoop_conf_dir=hadoop_conf_dir,
+  principal_name=hdfs_principal_name,
+  hdfs_site=hdfs_site,
+  default_fs=default_fs,
+  dfs_type = dfs_type
+)
+
+mount_table_xml_inclusion_file_full_path = None
+mount_table_content = None
+if 'viewfs-mount-table' in config['configurations']:
+  xml_inclusion_file_name = 'viewfs-mount-table.xml'
+  mount_table = config['configurations']['viewfs-mount-table']
+
+  if 'content' in mount_table and mount_table['content'].strip():
+    mount_table_xml_inclusion_file_full_path = os.path.join(external_dependency_conf, xml_inclusion_file_name)
+    mount_table_content = mount_table['content']
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/package/scripts/service_check.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/package/scripts/service_check.py
new file mode 100644
index 00000000..d796ae96
--- /dev/null
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/package/scripts/service_check.py
@@ -0,0 +1,40 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agree in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.format import format
+from resource_management.core.resources.system import Execute
+
+class ZeppelinServiceCheck(Script):
+    def service_check(self, env):
+        import params
+        env.set_params(params)
+
+        if params.security_enabled:
+          kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal}; ")
+          Execute(kinit_cmd, user=params.smoke_user)
+
+        scheme = "https" if params.ui_ssl_enabled else "http"
+        Execute(format("curl -s -o /dev/null -w'%{{http_code}}' --negotiate -u: -k {scheme}://{zeppelin_host}:{zeppelin_port}/api/version | grep 200"),
+                tries=10,
+                try_sleep=3,
+                user=params.smoke_user,
+                logoutput=True)
+
+if __name__ == "__main__":
+    ZeppelinServiceCheck().execute()
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/package/scripts/status_params.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/package/scripts/status_params.py
new file mode 100644
index 00000000..35360c6f
--- /dev/null
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/package/scripts/status_params.py
@@ -0,0 +1,29 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management.libraries.script import Script
+
+config = Script.get_config()
+
+zeppelin_pid_dir = config['configurations']['zeppelin-env']['zeppelin_pid_dir']
+zeppelin_user = config['configurations']['zeppelin-env']['zeppelin_user']
+zeppelin_group = config['configurations']['zeppelin-env']['zeppelin_group']
+zeppelin_log_dir = config['configurations']['zeppelin-env']['zeppelin_log_dir']
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/package/templates/input.config-zeppelin.json.j2 b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/package/templates/input.config-zeppelin.json.j2
new file mode 100644
index 00000000..8c545a4b
--- /dev/null
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/package/templates/input.config-zeppelin.json.j2
@@ -0,0 +1,48 @@
+{#
+ # Licensed to the Apache Software Foundation (ASF) under one
+ # or more contributor license agreements.  See the NOTICE file
+ # distributed with this work for additional information
+ # regarding copyright ownership.  The ASF licenses this file
+ # to you under the Apache License, Version 2.0 (the
+ # "License"); you may not use this file except in compliance
+ # with the License.  You may obtain a copy of the License at
+ #
+ #   http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ #}
+{
+  "input":[
+    {
+      "type":"zeppelin",
+      "rowtype":"service",
+      "path":"{{default('/configurations/zeppelin-env/zeppelin_log_dir', '/var/log/zeppelin')}}/zeppelin-{{default('/configurations/zeppelin-env/zeppelin_user', 'zeppelin')}}-*.log"
+    }
+  ],
+  "filter":[
+    {
+      "filter":"grok",
+      "conditions":{
+        "fields":{
+          "type":[
+            "zeppelin"
+          ]
+        }
+      },
+      "log4j_format":"",
+      "multiline_pattern":"^(%{SPACE}%{LOGLEVEL:level}%{SPACE}\\[%{TIMESTAMP_ISO8601:logtime}\\])",
+      "message_pattern":"(?m)^%{SPACE}%{LOGLEVEL:level}%{SPACE}\\[%{TIMESTAMP_ISO8601:logtime}\\]%{SPACE}\\(\\{{"{"}}%{DATA:thread_name}\\{{"}"}}%{SPACE}%{JAVAFILE:file}\\[%{JAVAMETHOD:method}\\]:%{INT:line_number}\\)%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}",
+      "post_map_values":{
+        "logtime":{
+          "map_date":{
+            "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS"
+          }
+        }
+      }
+    }
+  ]
+}
\ No newline at end of file
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/quicklinks/quicklinks.json b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/quicklinks/quicklinks.json
new file mode 100644
index 00000000..9bd159c3
--- /dev/null
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/quicklinks/quicklinks.json
@@ -0,0 +1,36 @@
+{
+    "name": "default",
+    "description": "default quick links configuration",
+    "configuration": {
+      "protocol":
+      {
+        "type":"https",
+        "checks":[
+          {
+            "property":"zeppelin.ssl",
+            "desired":"true",
+            "site":"zeppelin-site"
+          }
+        ]
+      },
+  
+      "links": [
+        {
+          "name": "zeppelin_ui",
+          "label": "Zeppelin UI",
+          "requires_user_name": "false",
+          "component_name": "ZEPPELIN_MASTER",
+          "url":"%@://%@:%@/",
+          "port":{
+            "http_property": "zeppelin.server.port",
+            "http_default_port": "9995",
+            "https_property": "zeppelin.server.port",
+            "https_default_port": "9995",
+            "regex": "^(\\d+)$",
+            "site": "zeppelin-site"
+          }
+        }
+      ]
+    }
+  }
+  
\ No newline at end of file
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/role_command_order.json b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/role_command_order.json
new file mode 100644
index 00000000..1862d5e8
--- /dev/null
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/role_command_order.json
@@ -0,0 +1,8 @@
+{
+    "general_deps" : {
+      "_comment" : "dependencies for ZEPPELIN",
+      "ZEPPELIN_MASTER-START" : ["NAMENODE-START"],
+      "ZEPPELIN_SERVICE_CHECK-SERVICE_CHECK" : ["ZEPPELIN_MASTER-START"]
+    }
+  }
+  
\ No newline at end of file
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/service_advisor.py b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/service_advisor.py
new file mode 100644
index 00000000..cfdf8ab4
--- /dev/null
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/service_advisor.py
@@ -0,0 +1,293 @@
+#!/usr/bin/env ambari-python-wrap
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+# Python imports
+import imp
+import os
+import traceback
+import re
+import socket
+import fnmatch
+
+
+from resource_management.core.logger import Logger
+
+SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
+STACKS_DIR = os.path.join(SCRIPT_DIR, '../../../../../stacks/')
+PARENT_FILE = os.path.join(STACKS_DIR, 'service_advisor.py')
+
+try:
+  if "BASE_SERVICE_ADVISOR" in os.environ:
+    PARENT_FILE = os.environ["BASE_SERVICE_ADVISOR"]
+  with open(PARENT_FILE, 'rb') as fp:
+    service_advisor = imp.load_module('service_advisor', fp, PARENT_FILE, ('.py', 'rb', imp.PY_SOURCE))
+except Exception as e:
+  traceback.print_exc()
+  print "Failed to load parent"
+
+class ZeppelinServiceAdvisor(service_advisor.ServiceAdvisor):
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(ZeppelinServiceAdvisor, self)
+    self.as_super.__init__(*args, **kwargs)
+
+    # Always call these methods
+    self.modifyMastersWithMultipleInstances()
+    self.modifyCardinalitiesDict()
+    self.modifyHeapSizeProperties()
+    self.modifyNotValuableComponents()
+    self.modifyComponentsNotPreferableOnServer()
+    self.modifyComponentLayoutSchemes()
+
+  def modifyMastersWithMultipleInstances(self):
+    """
+    Modify the set of masters with multiple instances.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyCardinalitiesDict(self):
+    """
+    Modify the dictionary of cardinalities.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyHeapSizeProperties(self):
+    """
+    Modify the dictionary of heap size properties.
+    Must be overriden in child class.
+    """
+    pass
+
+  def modifyNotValuableComponents(self):
+    """
+    Modify the set of components whose host assignment is based on other services.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyComponentsNotPreferableOnServer(self):
+    """
+    Modify the set of components that are not preferable on the server.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def modifyComponentLayoutSchemes(self):
+    """
+    Modify layout scheme dictionaries for components.
+    The scheme dictionary basically maps the number of hosts to
+    host index where component should exist.
+    Must be overriden in child class.
+    """
+    # Nothing to do
+    pass
+
+  def getServiceComponentLayoutValidations(self, services, hosts):
+    """
+    Get a list of errors.
+    Must be overriden in child class.
+    """
+
+    return self.getServiceComponentCardinalityValidations(services, hosts, "ZEPPELIN")
+
+  def getServiceConfigurationRecommendations(self, configurations, clusterData, services, hosts):
+    """
+    Entry point.
+    Must be overriden in child class.
+    """
+    #Logger.info("Class: %s, Method: %s. Recommending Service Configurations." %
+    #            (self.__class__.__name__, inspect.stack()[0][3]))
+
+    recommender = ZeppelinRecommender()
+    recommender.recommendZeppelinConfigurationsFromHDP25(configurations, clusterData, services, hosts)
+    recommender.recommendZeppelinConfigurationsFromHDP30(configurations, clusterData, services, hosts)
+
+  def getServiceConfigurationsValidationItems(self, configurations, recommendedDefaults, services, hosts):
+    """
+    Entry point.
+    Validate configurations for the service. Return a list of errors.
+    The code for this function should be the same for each Service Advisor.
+    """
+    #Logger.info("Class: %s, Method: %s. Validating Configurations." %
+    #            (self.__class__.__name__, inspect.stack()[0][3]))
+
+    validator = ZeppelinValidator()
+    # Calls the methods of the validator using arguments,
+    # method(siteProperties, siteRecommendations, configurations, services, hosts)
+    return validator.validateListOfConfigUsingMethod(configurations, recommendedDefaults, services, hosts, validator.validators)
+
+  @staticmethod
+  def isKerberosEnabled(services, configurations):
+    """
+    Determine if Kerberos is enabled for Zeppelin.
+
+    If zeppelin-env/zeppelin.kerberos.enabled exists and is set to "true", return True;
+    otherwise return false.
+
+    The value of this property is first tested in the updated configurations (configurations) then
+    tested in the current configuration set (services)
+
+    :type services: dict
+    :param services: the dictionary containing the existing configuration values
+    :type configurations: dict
+    :param configurations: the dictionary containing the updated configuration values
+    :rtype: bool
+    :return: True or False
+    """
+    if configurations and "zeppelin-env" in configurations and \
+            "zeppelin.kerberos.enabled" in configurations["zeppelin-env"]["properties"]:
+      return configurations["zeppelin-env"]["properties"]["zeppelin.kerberos.enabled"].lower() == "true"
+    elif services and "zeppelin-env" in services["configurations"] and \
+            "zeppelin.kerberos.enabled" in services["configurations"]["zeppelin-env"]["properties"]:
+      return services["configurations"]["zeppelin-env"]["properties"]["zeppelin.kerberos.enabled"].lower() == "true"
+    else:
+      return False
+
+
+
+class ZeppelinRecommender(service_advisor.ServiceAdvisor):
+  """
+  Zeppelin Recommender suggests properties when adding the service for the first time or modifying configs via the UI.
+  """
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(ZeppelinRecommender, self)
+    self.as_super.__init__(*args, **kwargs)
+
+  def recommendZeppelinConfigurationsFromHDP25(self, configurations, clusterData, services, hosts):
+    """
+    :type configurations dict
+    :type clusterData dict
+    :type services dict
+    :type hosts dict
+    """
+    self.__recommendLivySuperUsers(configurations, services)
+
+    zeppelin_shiro_ini = self.getServicesSiteProperties(services, "zeppelin-shiro-ini")
+    zeppelin_site = self.getServicesSiteProperties(services, "zeppelin-site")
+    putZeppelinShiroIniProperty = self.putProperty(configurations, "zeppelin-shiro-ini", services)
+
+    if zeppelin_shiro_ini and "shiro_ini_content" in zeppelin_shiro_ini:
+      shiro_ini_content = zeppelin_shiro_ini['shiro_ini_content']
+
+      if zeppelin_site and "zeppelin.ssl" in zeppelin_site and zeppelin_site["zeppelin.ssl"] == 'true':
+        shiro_ini_content = shiro_ini_content.replace("#cookie.secure = true", "cookie.secure = true")
+        putZeppelinShiroIniProperty('shiro_ini_content', str(shiro_ini_content))
+
+      else:
+        if not "#cookie.secure = true" in shiro_ini_content:
+          shiro_ini_content = shiro_ini_content.replace("cookie.secure = true", "#cookie.secure = true")
+          putZeppelinShiroIniProperty('shiro_ini_content', str(shiro_ini_content))
+
+
+  def recommendZeppelinConfigurationsFromHDP30(self, configurations, clusterData, services, hosts):
+    """
+    :type configurations dict
+    :type clusterData dict
+    :type services dict
+    :type hosts dict
+    """
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    if "SPARK2" in servicesList and "spark2-atlas-application-properties-override" in services["configurations"] \
+            and 'atlas.spark.enabled' in services['configurations']['spark2-atlas-application-properties-override']['properties']:
+      sac_enabled = str(services['configurations']['spark2-atlas-application-properties-override']['properties']['atlas.spark.enabled']).upper() == 'TRUE'
+      zeppelin_env_properties = self.getServicesSiteProperties(services, "zeppelin-env")
+      putZeppelinEnvProperty = self.putProperty(configurations, "zeppelin-env", services)
+      content = zeppelin_env_properties["zeppelin_env_content"]
+      content_in_lines = content.splitlines()
+      result_list = []
+      for line in content_in_lines:
+        if "ZEPPELIN_INTP_CLASSPATH_OVERRIDES" in line:
+          if sac_enabled:
+            if line.lstrip().startswith("#"):
+              line = "export ZEPPELIN_INTP_CLASSPATH_OVERRIDES=\"{{external_dependency_conf}}:/usr/hdp/current/spark-atlas-connector/*\""
+            elif "{{external_dependency_conf}}" in line:
+              line = line.replace("{{external_dependency_conf}}", "{{external_dependency_conf}}:/usr/hdp/current/spark-atlas-connector/*")
+            else:
+              k = line.rfind("\"")
+              line = line[:k] + ":/usr/hdp/current/spark-atlas-connector/*\"" + line[k+1:]
+          else:
+            if ":/usr/hdp/current/spark-atlas-connector" in line:
+              line = line.replace(":/usr/hdp/current/spark-atlas-connector/*", "")
+            elif "/usr/hdp/current/spark-atlas-connector" in line:
+              line = line.replace("/usr/hdp/current/spark-atlas-connector/*", "")
+
+        result_list.append(line)
+
+      content = "\n".join(result_list)
+      putZeppelinEnvProperty("zeppelin_env_content", content)
+
+
+
+  def __recommendLivySuperUsers(self, configurations, services):
+    """
+    If Kerberos is enabled AND Zeppelin is installed and Spark Livy Server is installed, then set
+    livy-conf/livy.superusers to contain the Zeppelin principal name from
+    zeppelin-site/zeppelin.server.kerberos.principal
+
+    :param configurations:
+    :param services:
+    """
+    if ZeppelinServiceAdvisor.isKerberosEnabled(services, configurations):
+      zeppelin_site = self.getServicesSiteProperties(services, "zeppelin-site")
+
+      if zeppelin_site and 'zeppelin.server.kerberos.principal' in zeppelin_site:
+        zeppelin_principal = zeppelin_site['zeppelin.server.kerberos.principal']
+        zeppelin_user = zeppelin_principal.split('@')[0] if zeppelin_principal else None
+
+        if zeppelin_user:
+          self.__conditionallyUpdateSuperUsers('livy2-conf', 'livy.superusers', zeppelin_user, configurations, services)
+
+  def __conditionallyUpdateSuperUsers(self, config_name, property_name, user_to_add, configurations, services):
+    config = self.getServicesSiteProperties(services, config_name)
+
+    if config:
+      superusers = config[property_name] if property_name in config else None
+
+      # add the user to the set of users
+      if superusers:
+        _superusers = superusers.split(',')
+        _superusers = [x.strip() for x in _superusers]
+        _superusers = filter(None, _superusers)  # Removes empty string elements from array
+      else:
+        _superusers = []
+
+      if user_to_add not in _superusers:
+        _superusers.append(user_to_add)
+
+        putProperty = self.putProperty(configurations, config_name, services)
+        putProperty(property_name, ','.join(_superusers))
+
+class ZeppelinValidator(service_advisor.ServiceAdvisor):
+  """
+  Zeppelin Validator checks the correctness of properties whenever the service is first added or the user attempts to
+  change configs via the UI.
+  """
+
+  def __init__(self, *args, **kwargs):
+    self.as_super = super(ZeppelinValidator, self)
+    self.as_super.__init__(*args, **kwargs)
+
+    self.validators = []
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/themes/directories.json b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/themes/directories.json
new file mode 100644
index 00000000..95bea56e
--- /dev/null
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/bgtp-ambari-mpack/src/main/resources/stacks/BGTP/1.0/services/ZEPPELIN/themes/directories.json
@@ -0,0 +1,89 @@
+{
+    "name": "directories",
+    "description": "Directories theme for ZEPPELIN service",
+    "configuration": {
+      "layouts": [
+        {
+          "name": "directories",
+          "tabs": [
+            {
+              "name": "directories",
+              "display-name": "Directories",
+              "layout": {
+                "tab-columns": "1",
+                "tab-rows": "2",
+                "sections": [
+                  {
+                    "name": "subsection-log-dirs",
+                    "display-name": "LOG DIRS",
+                    "row-index": "1",
+                    "column-index": "0",
+                    "row-span": "1",
+                    "column-span": "1",
+                    "section-columns": "1",
+                    "section-rows": "1",
+                    "subsections": [
+                      {
+                        "name": "subsection-log-dirs",
+                        "row-index": "0",
+                        "column-index": "0",
+                        "row-span": "1",
+                        "column-span": "1"
+                      }
+                    ]
+                  },
+                  {
+                    "name": "subsection-pid-dirs",
+                    "display-name": "PID DIRS",
+                    "row-index": "2",
+                    "column-index": "0",
+                    "row-span": "1",
+                    "column-span": "1",
+                    "section-columns": "1",
+                    "section-rows": "1",
+                    "subsections": [
+                      {
+                        "name": "subsection-pid-dirs",
+                        "row-index": "0",
+                        "column-index": "0",
+                        "row-span": "1",
+                        "column-span": "1"
+                      }
+                    ]
+                  }
+                ]
+              }
+            }
+          ]
+        }
+      ],
+      "placement": {
+        "configuration-layout": "default",
+        "configs": [
+          {
+            "config": "zeppelin-env/zeppelin_log_dir",
+            "subsection-name": "subsection-log-dirs"
+          },
+          {
+            "config": "zeppelin-env/zeppelin_pid_dir",
+            "subsection-name": "subsection-pid-dirs"
+          }
+        ]
+      },
+      "widgets": [
+        {
+          "config": "zeppelin-env/zeppelin_log_dir",
+          "widget": {
+            "type": "text-field"
+          }
+        },
+        {
+          "config": "zeppelin-env/zeppelin_pid_dir",
+          "widget": {
+            "type": "text-field"
+          }
+        }
+      ]
+    }
+  }
+  
\ No newline at end of file
diff --git a/bigtop-packages/src/common/bigtop-ambari-mpack/dev-support/docker/centos7/build-containers.sh b/bigtop-packages/src/common/bigtop-ambari-mpack/dev-support/docker/centos7/build-containers.sh
index 8297a26d..486708f5 100755
--- a/bigtop-packages/src/common/bigtop-ambari-mpack/dev-support/docker/centos7/build-containers.sh
+++ b/bigtop-packages/src/common/bigtop-ambari-mpack/dev-support/docker/centos7/build-containers.sh
@@ -41,7 +41,7 @@ echo -e "\033[32mSetting up ambari-server\033[0m"
 docker exec ambari-server bash -c "ambari-server setup --java-home=/usr/lib/jvm/java --database=mysql --databasehost=localhost --databaseport=3306 --databasename=ambari --databaseusername=root --databasepassword=root -s"
 
 echo -e "\033[32mCreating container ambari-agent-01\033[0m"
-docker run -d --name ambari-agent-01 --hostname ambari-agent-01 --network ambari --privileged -e "container=docker" -v /sys/fs/cgroup:/sys/fs/cgroup:ro ambari:2.7.5 /usr/sbin/init
+docker run -d -p 9995:9995 --name ambari-agent-01 --hostname ambari-agent-01 --network ambari --privileged -e "container=docker" -v /sys/fs/cgroup:/sys/fs/cgroup:ro ambari:2.7.5 /usr/sbin/init
 docker exec ambari-agent-01 bash -c "echo '$SERVER_PUB_KEY' > /root/.ssh/authorized_keys"
 docker exec ambari-agent-01 /bin/systemctl enable sshd
 docker exec ambari-agent-01 /bin/systemctl start sshd