You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by jo...@apache.org on 2017/07/13 16:39:07 UTC

[01/12] ambari git commit: Merge branch 'branch-feature-AMBARI-21348' into branch-2.5

Repository: ambari
Updated Branches:
  refs/heads/branch-feature-AMBARI-21348 94eb0ddf5 -> 133baa53f


Merge branch 'branch-feature-AMBARI-21348' into branch-2.5


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/da44c5c1
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/da44c5c1
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/da44c5c1

Branch: refs/heads/branch-feature-AMBARI-21348
Commit: da44c5c1a5476b78887cd7729d0efc5afbf0dae9
Parents: a6ac40b 267cd8b
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Wed Jul 12 14:47:40 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Wed Jul 12 14:47:40 2017 -0400

----------------------------------------------------------------------
 .../ambari/server/controller/internal/UpgradeResourceProvider.java | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------



[02/12] ambari git commit: Revert "AMBARI-21420 HBase master crashed during/post EU on an IOP4.2.5/HDP2.6 migrated cluster (dili)"

Posted by jo...@apache.org.
Revert "AMBARI-21420 HBase master crashed during/post EU on an IOP4.2.5/HDP2.6 migrated cluster (dili)"

This reverts commit cb86bf06f878efeccdb38ec87eb160eac2e6ed57.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/c2b2210b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/c2b2210b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/c2b2210b

Branch: refs/heads/branch-feature-AMBARI-21348
Commit: c2b2210b3635e800c17621dbdbadec7761a988c1
Parents: da44c5c
Author: Di Li <di...@apache.org>
Authored: Wed Jul 12 14:50:55 2017 -0400
Committer: Di Li <di...@apache.org>
Committed: Wed Jul 12 14:50:55 2017 -0400

----------------------------------------------------------------------
 .../0.96.0.2.0/package/scripts/hbase_service.py    | 17 -----------------
 .../0.96.0.2.0/package/scripts/params_linux.py     |  9 ---------
 .../BigInsights/4.2.5/upgrades/config-upgrade.xml  | 11 -----------
 .../upgrades/nonrolling-upgrade-to-hdp-2.6.xml     |  5 -----
 .../BigInsights/4.2/upgrades/config-upgrade.xml    | 11 -----------
 .../4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml |  7 +------
 6 files changed, 1 insertion(+), 59 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/c2b2210b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_service.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_service.py
index 2e2fa10..a1003dc 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_service.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_service.py
@@ -19,7 +19,6 @@ limitations under the License.
 """
 
 from resource_management import *
-from resource_management.core.logger import Logger
 
 def hbase_service(
   name,
@@ -33,22 +32,6 @@ def hbase_service(
     pid_expression = as_sudo(["cat", pid_file])
     no_op_test = as_sudo(["test", "-f", pid_file]) + format(" && ps -p `{pid_expression}` >/dev/null 2>&1")
     
-    # delete wal log if HBase version has moved down
-    if params.to_backup_wal_dir:
-      wal_directory = params.wal_directory
-      timestamp = datetime.datetime.now()
-      format = '%Y%m%d%H%M%S'
-      wal_directory_backup = '%s_%s' % (wal_directory, timestamp.strftime(format))
-
-      rm_cmd = format("hadoop fs -mv {wal_directory} {wal_directory_backup}")
-      try:
-        Execute ( rm_cmd,
-          user = params.hbase_user
-        )
-      except Exception, e:
-        #Should still allow HBase Start/Stop to proceed
-        Logger.error("Failed to backup HBase WAL directory, command: {0} . Exception: {1}".format(rm_cmd, e.message))
-
     if action == 'start':
       daemon_cmd = format("{cmd} start {role}")
       

http://git-wip-us.apache.org/repos/asf/ambari/blob/c2b2210b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
index 6617a80..1ee5248 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
@@ -44,7 +44,6 @@ from resource_management.libraries.script.script import Script
 from resource_management.libraries.functions.expect import expect
 from ambari_commons.ambari_metrics_helper import select_metric_collector_hosts_from_hostnames
 from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs, generate_ranger_service_config
-from resource_management.libraries.functions.constants import Direction
 
 # server configurations
 config = Script.get_config()
@@ -441,11 +440,3 @@ if has_atlas:
   atlas_with_managed_hbase = len(zk_hosts_matches) > 0
 else:
   atlas_with_managed_hbase = False
-
-wal_directory = "/apps/hbase/data/MasterProcWALs"
-
-backup_wal_dir = default('/configurations/hbase-env/backup_wal_dir', False)
-
-#Need to make sure not to keep removing WAL logs once EU is finalized.
-upgrade_direction = default("/commandParams/upgrade_direction", None)
-to_backup_wal_dir = upgrade_direction is not None and upgrade_direction == Direction.UPGRADE and backup_wal_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/c2b2210b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
index b51a744..42999b2 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
@@ -52,17 +52,6 @@
       </component>
     </service>
     
-    <service name="HBASE">
-      <component name="HBASE_MASTER">
-        <changes>
-          <definition xsi:type="configure" id="biginsights_4_2_hbase_env_config" summary="Update HBase configurations">
-            <type>hbase-env</type>
-            <set key="backup_wal_dir" value="true"/>
-          </definition>
-        </changes>
-      </component>
-    </service>
-
     <service name="HIVE">
       <component name="HIVE_SERVER">
         <changes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c2b2210b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
index f3c73a0..a96ede9 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
@@ -182,11 +182,6 @@
         <task xsi:type="configure" id="biginsights_4_2_mapreduce_application_framework_patch" />
       </execute-stage>
       
-      <!-- HBASE -->
-      <execute-stage service="HBASE" component="HBASE_MASTER" title="Apply config changes for HBase">
-        <task xsi:type="configure" id="biginsights_4_2_hbase_env_config" />
-      </execute-stage>
-
       <!-- HIVE -->
       <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply config changes for Hive Server">
         <task xsi:type="configure" id="biginsights_4_2_0_0_hive_env_configure"/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c2b2210b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
index b46f476..f9e3e15 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
@@ -52,17 +52,6 @@
       </component>
     </service>
     
-    <service name="HBASE">
-      <component name="HBASE_MASTER">
-        <changes>
-          <definition xsi:type="configure" id="biginsights_4_2_hbase_env_config" summary="Update HBase configurations">
-            <type>hbase-env</type>
-            <set key="backup_wal_dir" value="true"/>
-          </definition>
-        </changes>
-      </component>
-    </service>
-
     <service name="HIVE">
       <component name="HIVE_SERVER">
         <changes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/c2b2210b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
index 4867626..a96ede9 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
@@ -181,12 +181,7 @@
       <execute-stage service="MAPREDUCE2" component="HISTORYSERVER" title="Apply config changes for HistoryServer">
         <task xsi:type="configure" id="biginsights_4_2_mapreduce_application_framework_patch" />
       </execute-stage>
-
-      <!-- HBASE -->
-      <execute-stage service="HBASE" component="HBASE_MASTER" title="Apply config changes for HBase">
-        <task xsi:type="configure" id="biginsights_4_2_hbase_env_config" />
-      </execute-stage>
-
+      
       <!-- HIVE -->
       <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply config changes for Hive Server">
         <task xsi:type="configure" id="biginsights_4_2_0_0_hive_env_configure"/>


[04/12] ambari git commit: AMBARI-21420 HBase master crashed during/post EU on an IOP4.2.5/HDP2.6 migrated cluster (dili)

Posted by jo...@apache.org.
AMBARI-21420 HBase master crashed during/post EU on an IOP4.2.5/HDP2.6 migrated cluster (dili)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/1f54c6e2
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/1f54c6e2
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/1f54c6e2

Branch: refs/heads/branch-feature-AMBARI-21348
Commit: 1f54c6e272a5a2ad176619062b31ca18bbdf93ea
Parents: 83761d4
Author: Di Li <di...@apache.org>
Authored: Wed Jul 12 15:59:35 2017 -0400
Committer: Di Li <di...@apache.org>
Committed: Wed Jul 12 15:59:35 2017 -0400

----------------------------------------------------------------------
 .../0.96.0.2.0/package/scripts/hbase_service.py    | 17 +++++++++++++++++
 .../0.96.0.2.0/package/scripts/params_linux.py     |  9 +++++++++
 .../BigInsights/4.2.5/upgrades/config-upgrade.xml  | 11 +++++++++++
 .../upgrades/nonrolling-upgrade-to-hdp-2.6.xml     |  5 +++++
 .../BigInsights/4.2/upgrades/config-upgrade.xml    | 11 +++++++++++
 .../4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml |  7 ++++++-
 6 files changed, 59 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/1f54c6e2/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_service.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_service.py
index a1003dc..3b8e494 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_service.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_service.py
@@ -19,6 +19,7 @@ limitations under the License.
 """
 
 from resource_management import *
+from resource_management.core.logger import Logger
 
 def hbase_service(
   name,
@@ -32,6 +33,22 @@ def hbase_service(
     pid_expression = as_sudo(["cat", pid_file])
     no_op_test = as_sudo(["test", "-f", pid_file]) + format(" && ps -p `{pid_expression}` >/dev/null 2>&1")
     
+    # delete wal log if HBase version has moved down
+    if params.to_backup_wal_dir:
+      wal_directory = params.wal_directory
+      timestamp = datetime.datetime.now()
+      timestamp_format = '%Y%m%d%H%M%S'
+      wal_directory_backup = '%s_%s' % (wal_directory, timestamp.strftime(timestamp_format))
+
+      rm_cmd = format("hadoop fs -mv {wal_directory} {wal_directory_backup}")
+      try:
+        Execute ( rm_cmd,
+          user = params.hbase_user
+        )
+      except Exception, e:
+        #Should still allow HBase Start/Stop to proceed
+        Logger.error("Failed to backup HBase WAL directory, command: {0} . Exception: {1}".format(rm_cmd, e.message))
+
     if action == 'start':
       daemon_cmd = format("{cmd} start {role}")
       

http://git-wip-us.apache.org/repos/asf/ambari/blob/1f54c6e2/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
index 1ee5248..6617a80 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
@@ -44,6 +44,7 @@ from resource_management.libraries.script.script import Script
 from resource_management.libraries.functions.expect import expect
 from ambari_commons.ambari_metrics_helper import select_metric_collector_hosts_from_hostnames
 from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs, generate_ranger_service_config
+from resource_management.libraries.functions.constants import Direction
 
 # server configurations
 config = Script.get_config()
@@ -440,3 +441,11 @@ if has_atlas:
   atlas_with_managed_hbase = len(zk_hosts_matches) > 0
 else:
   atlas_with_managed_hbase = False
+
+wal_directory = "/apps/hbase/data/MasterProcWALs"
+
+backup_wal_dir = default('/configurations/hbase-env/backup_wal_dir', False)
+
+#Need to make sure not to keep removing WAL logs once EU is finalized.
+upgrade_direction = default("/commandParams/upgrade_direction", None)
+to_backup_wal_dir = upgrade_direction is not None and upgrade_direction == Direction.UPGRADE and backup_wal_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/1f54c6e2/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
index 42999b2..b51a744 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
@@ -52,6 +52,17 @@
       </component>
     </service>
     
+    <service name="HBASE">
+      <component name="HBASE_MASTER">
+        <changes>
+          <definition xsi:type="configure" id="biginsights_4_2_hbase_env_config" summary="Update HBase configurations">
+            <type>hbase-env</type>
+            <set key="backup_wal_dir" value="true"/>
+          </definition>
+        </changes>
+      </component>
+    </service>
+
     <service name="HIVE">
       <component name="HIVE_SERVER">
         <changes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1f54c6e2/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
index a96ede9..f3c73a0 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
@@ -182,6 +182,11 @@
         <task xsi:type="configure" id="biginsights_4_2_mapreduce_application_framework_patch" />
       </execute-stage>
       
+      <!-- HBASE -->
+      <execute-stage service="HBASE" component="HBASE_MASTER" title="Apply config changes for HBase">
+        <task xsi:type="configure" id="biginsights_4_2_hbase_env_config" />
+      </execute-stage>
+
       <!-- HIVE -->
       <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply config changes for Hive Server">
         <task xsi:type="configure" id="biginsights_4_2_0_0_hive_env_configure"/>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1f54c6e2/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
index f9e3e15..b46f476 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
@@ -52,6 +52,17 @@
       </component>
     </service>
     
+    <service name="HBASE">
+      <component name="HBASE_MASTER">
+        <changes>
+          <definition xsi:type="configure" id="biginsights_4_2_hbase_env_config" summary="Update HBase configurations">
+            <type>hbase-env</type>
+            <set key="backup_wal_dir" value="true"/>
+          </definition>
+        </changes>
+      </component>
+    </service>
+
     <service name="HIVE">
       <component name="HIVE_SERVER">
         <changes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/1f54c6e2/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
index a96ede9..4867626 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
@@ -181,7 +181,12 @@
       <execute-stage service="MAPREDUCE2" component="HISTORYSERVER" title="Apply config changes for HistoryServer">
         <task xsi:type="configure" id="biginsights_4_2_mapreduce_application_framework_patch" />
       </execute-stage>
-      
+
+      <!-- HBASE -->
+      <execute-stage service="HBASE" component="HBASE_MASTER" title="Apply config changes for HBase">
+        <task xsi:type="configure" id="biginsights_4_2_hbase_env_config" />
+      </execute-stage>
+
       <!-- HIVE -->
       <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply config changes for Hive Server">
         <task xsi:type="configure" id="biginsights_4_2_0_0_hive_env_configure"/>


[11/12] ambari git commit: AMBARI-21462. Readd TITAN, R4ML, SYSTEMML, JNBG to BigInsights - addendum: schema-validity

Posted by jo...@apache.org.
AMBARI-21462. Readd TITAN, R4ML, SYSTEMML, JNBG to BigInsights - addendum: schema-validity


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a389f85b
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a389f85b
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a389f85b

Branch: refs/heads/branch-feature-AMBARI-21348
Commit: a389f85b6e2a0cb23ff7fcf629fd55ab4e203560
Parents: a7b6d5a
Author: Attila Doroszlai <ad...@hortonworks.com>
Authored: Thu Jul 13 18:10:09 2017 +0200
Committer: Attila Doroszlai <ad...@hortonworks.com>
Committed: Thu Jul 13 18:10:31 2017 +0200

----------------------------------------------------------------------
 .../common-services/JNBG/0.2.0/configuration/jnbg-env.xml          | 1 +
 .../BigInsights/4.2/services/TITAN/configuration/titan-env.xml     | 2 ++
 .../4.2/services/TITAN/configuration/titan-hbase-solr.xml          | 1 +
 .../BigInsights/4.2/services/TITAN/configuration/titan-log4j.xml   | 1 +
 4 files changed, 5 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/a389f85b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/configuration/jnbg-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/configuration/jnbg-env.xml b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/configuration/jnbg-env.xml
index f9da01e..ed49b26 100755
--- a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/configuration/jnbg-env.xml
+++ b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/configuration/jnbg-env.xml
@@ -75,6 +75,7 @@
     <value>/apps/jnbg/spark-warehouse</value>
     <display-name>spark.sql.warehouse.dir</display-name>
     <description>Warehouse for Notebook applications</description>
+    <on-ambari-upgrade add="false"/>
   </property>
   <property>
     <name>jkg_port</name>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a389f85b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-env.xml
index 86e09f1..dda05e4 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-env.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-env.xml
@@ -27,6 +27,7 @@
     <description>User to run Titan as</description>
     <property-type>USER</property-type>
     <value>titan</value>
+    <on-ambari-upgrade add="false"/>
   </property>
 
   <property>
@@ -41,6 +42,7 @@ export HADOOP_CONF_DIR={{hadoop_config_dir}}
 export HBASE_CONF_DIR={{hbase_config_dir}}
 CLASSPATH=$HADOOP_CONF_DIR:$HBASE_CONF_DIR:$CLASSPATH
     </value>
+    <on-ambari-upgrade add="false"/>
   </property>
 
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a389f85b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-hbase-solr.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-hbase-solr.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-hbase-solr.xml
index 0ca6807..2a7b366 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-hbase-solr.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-hbase-solr.xml
@@ -61,6 +61,7 @@ index.search.solr.configset=titan
     <value-attributes>
       <show-property-name>false</show-property-name>
     </value-attributes>
+    <on-ambari-upgrade add="false"/>
   </property>
 
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/a389f85b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-log4j.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-log4j.xml
index 3363d81..a5522f3 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-log4j.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-log4j.xml
@@ -60,6 +60,7 @@
     <value-attributes>
       <show-property-name>false</show-property-name>
     </value-attributes>
+    <on-ambari-upgrade add="false"/>
   </property>
 
 </configuration>


[06/12] ambari git commit: AMBARI-21462. Readd TITAN, R4ML, SYSTEMML, JNBG to BigInsights and fix HBase backup during EU and imports (alejandro)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/files/titanSmoke.groovy
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/files/titanSmoke.groovy b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/files/titanSmoke.groovy
new file mode 100755
index 0000000..79438be
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/files/titanSmoke.groovy
@@ -0,0 +1,20 @@
+/*Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License */
+
+import com.thinkaurelius.titan.core.TitanFactory;
+
+graph = TitanFactory.open("/etc/titan/conf/titan-hbase-solr.properties")
+g = graph.traversal()
+l = g.V().values('name').toList()

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/params.py
new file mode 100755
index 0000000..8019748
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/params.py
@@ -0,0 +1,202 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.functions.get_stack_version import get_stack_version
+
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+stack_root= Script.get_stack_root()
+
+stack_name = default("/hostLevelParams/stack_name", None)
+
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+stack_version_formatted = format_stack_version(stack_version_unformatted)
+full_stack_version = get_stack_version('titan-client')
+
+# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
+version = default("/commandParams/version", None)
+
+titan_user = config['configurations']['titan-env']['titan_user']
+user_group = config['configurations']['cluster-env']['user_group']
+titan_log_dir = config['configurations']['titan-env']['titan_log_dir']
+titan_server_port = config['configurations']['titan-env']['titan_server_port']
+titan_hdfs_home_dir = config['configurations']['titan-env']['titan_hdfs_home_dir']
+titan_log_file = format("{titan_log_dir}/titan-{titan_server_port}.log")
+titan_err_file = format("{titan_log_dir}/titan-{titan_server_port}.err")
+
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
+
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+
+
+if security_enabled:
+  _hostname_lowercase = config['hostname'].lower()
+  titan_jaas_princ = config['configurations']['titan-env']['titan_principal_name'].replace('_HOST',_hostname_lowercase)
+  titan_keytab_path = config['configurations']['titan-env']['titan_keytab_path']
+
+titan_bin_dir = format('{stack_root}/current/titan-client/bin')
+titan_data_dir = format('{stack_root}/current/titan-server/data')
+# titan configurations
+titan_conf_dir = format('{stack_root}/current/titan-server/conf')
+titan_hbase_solr_props = config['configurations']['titan-hbase-solr']['content']
+titan_env_props = config['configurations']['titan-env']['content']
+log4j_console_props = config['configurations']['titan-log4j']['content']
+
+# titan server configurations
+titan_server_conf_dir=format('{stack_root}/current/titan-server/conf/gremlin-server')
+gremlin_server_configs = config['configurations']['gremlin-server']['content']
+
+titan_server_sasl= str(config['configurations']['titan-env']['SimpleAuthenticator']).lower()
+titan_server_simple_authenticator = ""
+if titan_server_sasl == "true" and 'knox-env' not in config['configurations']:
+  titan_server_simple_authenticator = """authentication: {
+  className: org.apache.tinkerpop.gremlin.server.auth.SimpleAuthenticator,
+  config: {
+    credentialsDb: conf/tinkergraph-empty.properties,
+    credentialsDbLocation: data/credentials.kryo}}"""
+
+titan_server_ssl= str(config['configurations']['titan-env']['ssl.enabled']).lower()
+titan_server_ssl_key_cert_file = default('/configurations/titan-env/ssl.keyCertChainFile', None)
+if titan_server_ssl_key_cert_file:
+  titan_server_ssl_key_cert_file = format(", keyCertChainFile: {titan_server_ssl_key_cert_file}")
+titan_server_ssl_key_file = default('/configurations/titan-env/ssl.keyFile', None)
+if titan_server_ssl_key_file:
+  titan_server_ssl_key_file = format(", keyFile: {titan_server_ssl_key_file}")
+titan_server_ssl_key_password = default('/configurations/titan-env/ssl.keyPassword', None)
+if titan_server_ssl_key_password:
+  titan_server_ssl_key_password = format(", keyPassword: {titan_server_ssl_key_password}")
+titan_server_ssl_trust_cert_chain_file=default('/configurations/titan-env/ssl.trustCertChainFile', None)
+# not supporting 32 bit jdk.
+java64_home = config['hostLevelParams']['java_home']
+hadoop_config_dir = conf_select.get_hadoop_conf_dir()
+hbase_config_dir = format('{stack_root}/current/hbase-client/conf')
+
+# Titan SparkGraphComputer configuration
+yarn_home_dir = format('{stack_root}/current/hadoop-yarn-client')
+spark_home_dir = format('{stack_root}/current/spark2-client')
+spark_config_dir = format('{stack_root}/current/spark2-client/conf')
+titan_home_dir = format('{stack_root}/current/titan-client')
+titan_conf_dir = format('{stack_root}/current/titan-client/conf')
+titan_conf_hadoop_graph_dir = format('{stack_root}/current/titan-client/conf/hadoop-graph')
+hadoop_lib_native_dir = format('{stack_root}/current/hadoop-client/lib/native')
+titan_hadoop_gryo_props = config['configurations']['hadoop-gryo']['content']
+hadoop_hbase_read_props = config['configurations']['hadoop-hbase-read']['content']
+titan_hdfs_data_dir = "/user/titan/data"
+titan_hdfs_spark_lib_dir = "/user/spark/share/lib/spark"
+titan_ext_spark_plugin_dir = format('{stack_root}/current/titan-server/ext/spark-client/plugin')
+platform_name = format('{stack_root}').split('/')[2]
+titan_spark2_archive_dir = format('/{platform_name}/apps/{full_stack_version}/spark2')
+titan_spark2_archive_file = format('spark2-{platform_name}-yarn-archive.tar.gz')
+local_components = default("/localComponents", [])
+yarn_client_installed = ( 'YARN_CLIENT' in local_components)
+hbase_master_installed = ( 'HBASE_CLIENT' in local_components)
+
+# Titan required 'storage.hostname' which is hbase cluster in IOP 4.2.
+# The host name should be zooKeeper quorum
+storage_hosts = config['clusterHostInfo']['zookeeper_hosts']
+zookeeper_port = config['configurations']['zoo.cfg']['clientPort']
+storage_host_list = []
+titan_zookeeper_solr_host_list = []
+for hostname in storage_hosts:
+  titan_zookeeper_solr_hostname = hostname+format(':{zookeeper_port}/solr')
+  titan_zookeeper_solr_host_list.append(titan_zookeeper_solr_hostname)
+  storage_host_list.append(hostname)
+storage_host = ",".join(storage_host_list)
+zookeeper_solr_for_titan_hostname  = ",".join(titan_zookeeper_solr_host_list)
+hbase_zookeeper_parent = config['configurations']['hbase-site']['zookeeper.znode.parent']
+
+if 'titan_server_hosts' in config['clusterHostInfo'] and len(config['clusterHostInfo']['titan_server_hosts']) > 0:
+  titan_host = config['clusterHostInfo']['titan_server_hosts'][0]
+
+# jts jar should be copy to solr site
+titan_dir = format('{stack_root}/current/titan-client')
+titan_ext_dir = format('{stack_root}/current/titan-client/ext')
+titan_solr_conf_dir = format('{stack_root}/current/titan-client/conf/solr')
+titan_solr_jar_file = format('{stack_root}/current/titan-client/lib/jts-1.13.jar')
+# jaas file for solr when security is enabled
+titan_solr_jaas_file = format('{titan_solr_conf_dir}/titan_solr_jaas.conf')
+titan_solr_client_jaas_file = format('{titan_solr_conf_dir}/titan_solr_client_jaas.conf')
+titan_solr_client_jaas_config = "index.search.solr.jaas-file=" + format('{titan_solr_conf_dir}/titan_solr_client_jaas.conf')
+if not security_enabled:
+  titan_solr_client_jaas_config=""
+# config for solr collection creation
+index = 0
+zookeeper_quorum = ""
+for host in config['clusterHostInfo']['zookeeper_hosts']:
+  zookeeper_quorum += host + ":" + str(zookeeper_port)
+  index += 1
+  if index < len(config['clusterHostInfo']['zookeeper_hosts']):
+    zookeeper_quorum += ","
+if "solr-env" in config['configurations']:
+    solr_znode = default('/configurations/solr-env/solr_znode', '/solr')
+infra_solr_hosts = default("/clusterHostInfo/infra_solr_hosts", [])
+infra_solr_replication_factor = 2 if len(infra_solr_hosts) > 1 else 1
+titan_solr_shards = 1
+titan_solr_hdfs_dir = "/apps/titan"
+titan_solr_hdfs_conf_dir = "/apps/titan/conf"
+titan_solr_hdfs_jar = "/apps/titan/jts-1.13.jar"
+titan_tmp_dir = format('{tmp_dir}/titan')
+titan_solr_dir = format('{titan_tmp_dir}/solr_installed')
+configuration_tags = config['configurationTags']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+titan_hdfs_mode = 0775
+solr_conf_dir=format('{stack_root}/current/solr-server/conf')
+titan_solr_configset = 'titan'
+titan_solr_collection_name = 'titan'
+solr_port=config['configurations']['solr-env']['solr_port']
+solr_user= solr_user=config['configurations']['solr-env']['solr_user']
+solr_conf_trg_file = format('{stack_root}/current/solr-server/server/solr/configsets/{titan_solr_configset}/conf/solrconfig.xml')
+#for create_hdfs_directory
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+kinit_path_local = get_kinit_path()
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+hdfs_site = config['configurations']['hdfs-site']
+hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', 'missing_principal').replace("_HOST", hostname)
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
+import functools
+#to create hdfs directory we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user = hdfs_user,
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
+)
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/params_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/params_server.py b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/params_server.py
new file mode 100755
index 0000000..edc264f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/params_server.py
@@ -0,0 +1,37 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+
+
+# server configurations
+config = Script.get_config()
+
+titan_pid_dir = config['configurations']['titan-env']['titan_pid_dir']
+titan_pid_file = format("{titan_pid_dir}/titan.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/service_check.py
new file mode 100755
index 0000000..118eea3
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/service_check.py
@@ -0,0 +1,88 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import os
+from resource_management import *
+from resource_management.core.resources.system import Execute, File
+from resource_management.core.source import InlineTemplate, StaticFile
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.version import compare_versions
+from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.validate import call_and_match_output
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+
+class TitanServiceCheck(Script):
+    pass
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class TitanServiceCheckDefault(TitanServiceCheck):
+    def service_check(self, env):
+        import params
+        env.set_params(params)
+	
+        File( format("{tmp_dir}/titanSmoke.groovy"),
+              content = StaticFile("titanSmoke.groovy"),
+              mode = 0755
+              )
+
+
+        if params.security_enabled:
+            kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
+            Execute(kinit_cmd,
+                    user=params.smokeuser
+                    )
+        
+	secure=""
+        if params.titan_server_ssl == "true" :
+                secure="-k"
+                if params.titan_server_ssl_key_cert_file:
+                    secure="--cacert " + params.titan_server_ssl_key_cert_file.split(":")[1]
+        grepresult=""" | grep 99"""
+        if len(params.titan_server_simple_authenticator) > 0:
+            grepresult = ""
+        headers=""" -XPOST -Hcontent-type:application/json -d '{"gremlin":"100-1"}' """
+        http="http://"
+        if params.titan_server_ssl == "true":
+            http="https://"
+        titan_server_host = http + format("{titan_host}")
+        titan_port=format("{titan_server_port}")
+        cmd = "curl " + secure + headers + titan_server_host + ":" + titan_port + grepresult
+       
+        Execute((cmd),
+                tries     = 40,
+                try_sleep = 5,
+                path      = format('{titan_bin_dir}:/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'),
+                user      = params.smokeuser,
+                logoutput = True
+                )
+
+        Execute(format("gremlin {tmp_dir}/titanSmoke.groovy"),
+                tries     = 3,
+                try_sleep = 5,
+                path      = format('{titan_bin_dir}:/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'),
+                user      = params.smokeuser,
+                logoutput = True
+                )
+
+if __name__ == "__main__":
+    # print "Track service check status"
+    TitanServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/titan.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/titan.py b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/titan.py
new file mode 100755
index 0000000..43dcb2d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/titan.py
@@ -0,0 +1,143 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import os
+from resource_management import *
+from resource_management.core.source import InlineTemplate, StaticFile
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def titan(type = None, upgrade_type=None):
+    import params
+    import params_server
+    if type == 'server':
+        File(format("{params.titan_server_conf_dir}/gremlin-server.yaml"),
+             mode=0644,
+             group=params.user_group,
+             owner=params.titan_user,
+             content=InlineTemplate(params.gremlin_server_configs)
+             )
+        credentials_file = format("{params.titan_data_dir}/credentials.kryo")
+        if not os.path.isfile(credentials_file):
+             File(credentials_file,
+                  mode=0644,
+                  group=params.user_group,
+                  owner=params.titan_user,
+                  content=""
+                  )
+        credentials_property_file = format("{params.titan_conf_dir}/tinkergraph-empty.properties")
+        if not os.path.isfile(credentials_property_file):
+             File(credentials_property_file,
+                  mode=0644,
+                  group=params.user_group,
+                  owner=params.titan_user,
+                  content=StaticFile("tinkergraph-empty.properties")
+                  )
+        Directory(params.titan_log_dir,
+                  create_parents=True,
+                  owner=params.titan_user,
+                  group=params.user_group,
+                  mode=0775
+                  )
+        Directory(params_server.titan_pid_dir,
+                  create_parents=True,
+                  owner=params.titan_user,
+                  group=params.user_group,
+                  mode=0775
+                  )
+        File(format("{params.titan_bin_dir}/gremlin-server-script.sh"),
+             mode=0755,
+             group='root',
+             owner='root',
+             content = StaticFile("gremlin-server-script.sh")
+             )
+
+    Directory(params.titan_conf_dir,
+              create_parents = True,
+              owner=params.titan_user,
+              group=params.user_group
+              )
+
+    File(format("{params.titan_conf_dir}/titan-env.sh"),
+             mode=0644,
+             group=params.user_group,
+             owner=params.titan_user,
+             content=InlineTemplate(params.titan_env_props)
+             )
+    jaas_client_file = format('{titan_solr_client_jaas_file}')
+
+    if not os.path.isfile(jaas_client_file) and params.security_enabled:
+        File(jaas_client_file,
+             owner   = params.titan_user,
+             group   = params.user_group,
+             mode    = 0644,
+             content = Template('titan_solr_client_jaas.conf.j2')
+             )
+
+# SparkGraphComputer
+    Directory(params.titan_conf_hadoop_graph_dir,
+              create_parents = True,
+              owner=params.titan_user,
+              group=params.user_group
+              )
+
+    File(format("{params.titan_conf_hadoop_graph_dir}/hadoop-gryo.properties"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.titan_user,
+         content=InlineTemplate(params.titan_hadoop_gryo_props)
+         )
+
+    File(format("{params.titan_conf_hadoop_graph_dir}/hadoop-hbase-read.properties"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.titan_user,
+         content=InlineTemplate(params.hadoop_hbase_read_props)
+         )
+
+    # titan-hbase-solr_properties is always set to a default even if it's not in the payload
+    File(format("{params.titan_conf_dir}/titan-hbase-solr.properties"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.titan_user,
+         content=InlineTemplate(params.titan_hbase_solr_props)
+         )
+
+    if (params.log4j_console_props != None):
+        File(format("{params.titan_conf_dir}/log4j-console.properties"),
+             mode=0644,
+             group=params.user_group,
+             owner=params.titan_user,
+             content=InlineTemplate(params.log4j_console_props)
+             )
+    elif (os.path.exists(format("{params.titan_conf_dir}/log4j-console.properties"))):
+        File(format("{params.titan_conf_dir}/log4j-console.properties"),
+             mode=0644,
+             group=params.user_group,
+             owner=params.titan_user
+             )
+    # Change titan ext directory for multiple user access
+    Directory(params.titan_ext_dir,
+               create_parents = True,
+               owner=params.titan_user,
+                     group=params.user_group,
+               mode=0775
+               )

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/titan_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/titan_client.py b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/titan_client.py
new file mode 100755
index 0000000..9bb1aad
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/titan_client.py
@@ -0,0 +1,61 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+import os
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+import titan
+
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+
+class TitanClient(Script):
+    def get_component_name(self):
+        return "titan-client"
+
+    def configure(self, env):
+        import params
+        env.set_params(params)
+        titan.titan()
+
+    def status(self, env):
+        raise ClientComponentHasNoStatus()
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class TitanClientLinux(TitanClient):
+
+    def pre_rolling_restart(self, env):
+        import params
+        env.set_params(params)
+
+        if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+            conf_select.select(params.stack_name, "titan", params.version)
+            stack_select.select("titan-client", params.version)
+
+    def install(self, env):
+        self.install_packages(env)
+        self.configure(env)
+
+if __name__ == "__main__":
+    TitanClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/titan_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/titan_server.py b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/titan_server.py
new file mode 100755
index 0000000..5dcc7e9
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/titan_server.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.functions.check_process_status import check_process_status
+from titan_service import titan_service
+import titan
+
+class TitanServer(Script):
+  def get_component_name(self):
+    return "titan-server"
+
+  def install(self, env):
+    self.install_packages(env)
+
+  def configure(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    titan.titan(type='server', upgrade_type=upgrade_type)
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    Logger.info("Executing Stack Upgrade pre-restart")
+    import params
+    env.set_params(params)
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      stack_select.select("titan-server", params.version)
+      conf_select.select(params.stack_name, "titan", params.version)
+
+  def start(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    self.configure(env)
+    titan_service(action = 'start')
+
+  def stop(self, env, upgrade_type=None):
+    import params
+    env.set_params(params)
+    titan_service(action = 'stop')
+
+  def status(self, env, upgrade_type=None):
+    import params_server
+    check_process_status(params_server.titan_pid_file)
+
+if __name__ == "__main__":
+  TitanServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/titan_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/titan_service.py b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/titan_service.py
new file mode 100755
index 0000000..f958599
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/scripts/titan_service.py
@@ -0,0 +1,150 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import os
+from resource_management import *
+from resource_management.libraries.functions.validate import call_and_match_output
+from resource_management.libraries.functions import solr_cloud_util
+from resource_management.libraries.resources.xml_config import XmlConfig
+
+def titan_service(action='start'):
+  import params
+  import params_server
+  cmd = format("{titan_bin_dir}/gremlin-server-script.sh")
+  cmd_params = params_server.titan_pid_file + " " + params.titan_log_file +" " + params.titan_err_file + " " +  params.titan_bin_dir + " " + params.titan_server_conf_dir + " " +params.titan_log_dir
+  if action == 'start':
+    if params.security_enabled:
+      kinit_cmd = format("{kinit_path_local} -kt {titan_keytab_path} {titan_jaas_princ};")
+      Execute(kinit_cmd,
+              user=params.titan_user
+              )
+    XmlConfig("hbase-site.xml",
+              not_if = params.hbase_master_installed,
+              conf_dir=params.titan_conf_dir,
+              configurations=params.config['configurations']['hbase-site'],
+              configuration_attributes=params.config['configuration_attributes']['hbase-site'],
+              group=params.user_group,
+              owner=params.titan_user,
+              mode=0644
+              )
+
+    #Add for SparkGraphComputer, prepare dir /user/titan/data on HDFS, and upload spark jars to /user/spark/share/lib/spark for spark.yarn.jars of Spark on YARN.
+    #create hdfs dir /user/titan/data
+    titan_create_data_dir_command = format("hadoop fs -mkdir -p {titan_hdfs_data_dir}; hadoop fs -chown -R titan:hdfs /user/titan")
+    titan_data_exist_command = format("hadoop fs -test -e {titan_hdfs_data_dir}>/dev/null 2>&1")
+    Execute(titan_create_data_dir_command,
+            not_if = titan_data_exist_command,
+            logoutput=True,user=params.hdfs_user)
+
+    #create spark plugin dir for spark jars
+    titan_create_spark_plugin_dir_command = format("mkdir -p {titan_ext_spark_plugin_dir}")
+    titan_ext_spark_plugin_dir_exist_command = format("ls {titan_ext_spark_plugin_dir}>/dev/null 2>&1")
+    Execute(titan_create_spark_plugin_dir_command,
+            not_if = titan_ext_spark_plugin_dir_exist_command,
+            logoutput=True,user=params.titan_user)
+
+    #get spark arhive from hdfs
+    titan_get_spark_tar_command = format("hadoop fs -get {titan_spark2_archive_dir}/{titan_spark2_archive_file} {titan_ext_spark_plugin_dir}")
+    titan_sparktargz_exist_command= format("ls {titan_ext_spark_plugin_dir}/{titan_spark2_archive_file}>/dev/null 2>&1")
+    Execute(titan_get_spark_tar_command,
+            not_if = titan_sparktargz_exist_command,
+            logoutput=True,user=params.titan_user)
+
+    #extract spark targz
+    titan_x_spark_targz_command = format("tar -xzvf {titan_ext_spark_plugin_dir}/{titan_spark2_archive_file} -C {titan_ext_spark_plugin_dir}/>/dev/null 2>&1")
+    titan_sparkjars_exist_command= format("ls {titan_ext_spark_plugin_dir}/*.jar>/dev/null 2>&1")
+    Execute(titan_x_spark_targz_command,
+            not_if = titan_sparkjars_exist_command,
+            logoutput=True,user=params.titan_user)
+
+    #create hdfs dir /user/spark/share/lib/spark
+    titan_create_spark_dir_command = format("hadoop fs -mkdir -p {titan_hdfs_spark_lib_dir}")
+    titan_spark_exist_command = format("hadoop fs -test -e {titan_hdfs_spark_lib_dir}>/dev/null 2>&1")
+    Execute(titan_create_spark_dir_command,
+            not_if = titan_spark_exist_command,
+            logoutput=True,user=params.hdfs_user)
+
+    #upload spark jars to hdfs /user/spark/share/lib/spark
+    titan_put_spark_jar_command = format("hadoop fs -put -f {titan_ext_spark_plugin_dir}/* {titan_hdfs_spark_lib_dir}; hadoop fs -rm -r {titan_hdfs_spark_lib_dir}/guava*.jar; hadoop fs -put -f {titan_home_dir}/lib/guava*.jar {titan_hdfs_spark_lib_dir}")
+    titan_sparkjar_exist_command = format("hadoop fs -test -e {titan_hdfs_spark_lib_dir}/*.jar>/dev/null 2>&1")
+    Execute(titan_put_spark_jar_command,
+            not_if = titan_sparkjar_exist_command,
+            logoutput=True,user=params.hdfs_user)
+
+    #rm guava*.jar slf4j-log4j12*.jar spark-core*.jar for conflict
+    titan_rm_conflict_jars_command = format("rm -rf {titan_ext_spark_plugin_dir}/guava*.jar; rm -rf {titan_ext_spark_plugin_dir}/slf4j-log4j12*.jar; rm -rf {titan_ext_spark_plugin_dir}/spark-core*.jar; ")
+    titan_guava_exist_command = format("ls {titan_ext_spark_plugin_dir}/guava*.jar>/dev/null 2>&1")
+    Execute(titan_rm_conflict_jars_command,
+            only_if = titan_guava_exist_command,
+            logoutput=True,user=params.titan_user)
+
+    #generate yarn-site.xml in Titan conf if no yarn-client installed
+    XmlConfig("yarn-site.xml",
+              not_if = params.yarn_client_installed,
+              conf_dir=params.titan_conf_dir,
+              configurations=params.config['configurations']['yarn-site'],
+              configuration_attributes=params.config['configuration_attributes']['yarn-site'],
+              group=params.user_group,
+              owner=params.titan_user,
+              mode=0644
+              )
+
+    #create jaas file for solr when security enabled
+    jaas_file = format('{titan_solr_jaas_file}')
+    if not os.path.isfile(jaas_file) and params.security_enabled:
+      File(jaas_file,
+           owner   = params.titan_user,
+           group   = params.user_group,
+           mode    = 0644,
+           content = Template('titan_solr_jaas.conf.j2')
+           )
+    #upload config to zookeeper
+    solr_cloud_util.upload_configuration_to_zk(
+        zookeeper_quorum = params.zookeeper_quorum,
+        solr_znode = params.solr_znode,
+        config_set = params.titan_solr_configset,
+        config_set_dir = params.titan_solr_conf_dir,
+        tmp_dir = params.tmp_dir,
+        java64_home = params.java64_home,
+        jaas_file=jaas_file,
+        retry=30, interval=5)
+
+    #create solr collection
+    solr_cloud_util.create_collection(
+        zookeeper_quorum = params.zookeeper_quorum,
+        solr_znode = params.solr_znode,
+        collection = params.titan_solr_collection_name,
+        config_set = params.titan_solr_configset,
+        java64_home = params.java64_home,
+        shards = params.titan_solr_shards,
+        replication_factor = int(params.infra_solr_replication_factor),
+        jaas_file = jaas_file)
+
+    daemon_cmd = format(cmd+" start " + cmd_params)
+    no_op_test = format("ls {params_server.titan_pid_file} >/dev/null 2>&1 && ps `cat {params_server.titan_pid_file}` >/dev/null 2>&1")
+    Execute(daemon_cmd,
+            not_if=no_op_test,
+            user=params.titan_user
+    )
+      
+  elif action == 'stop':
+    import params_server
+    daemon_cmd = format("{titan_bin_dir}/gremlin-server-script.sh stop " + params_server.titan_pid_file)
+    Execute(daemon_cmd, user=params.titan_user)

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/templates/titan_solr_client_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/templates/titan_solr_client_jaas.conf.j2 b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/templates/titan_solr_client_jaas.conf.j2
new file mode 100755
index 0000000..cd4b53e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/templates/titan_solr_client_jaas.conf.j2
@@ -0,0 +1,23 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+ com.sun.security.auth.module.Krb5LoginModule required
+ useKeyTab=false
+ useTicketCache=true;
+};
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/templates/titan_solr_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/templates/titan_solr_jaas.conf.j2 b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/templates/titan_solr_jaas.conf.j2
new file mode 100755
index 0000000..bf562f8
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/templates/titan_solr_jaas.conf.j2
@@ -0,0 +1,26 @@
+{#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#}
+
+Client {
+ com.sun.security.auth.module.Krb5LoginModule required
+ useKeyTab=true
+ storeKey=true
+ useTicketCache=false
+ keyTab="{{titan_keytab_path}}"
+ principal="{{titan_jaas_princ}}";
+};
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/role_command_order.json b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/role_command_order.json
index 35fc0d8..dc4811b 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/role_command_order.json
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/role_command_order.json
@@ -4,14 +4,22 @@
   "general_deps" : {
     "_comment" : "dependencies for all cases",
     "HIVE_SERVER_INTERACTIVE-START": ["RESOURCEMANAGER-START", "NODEMANAGER-START", "MYSQL_SERVER-START"],
-    "RESOURCEMANAGER-STOP": ["HIVE_SERVER_INTERACTIVE-STOP", "SPARK2_THRIFTSERVER-STOP"],
+    "RESOURCEMANAGER-STOP": ["HIVE_SERVER_INTERACTIVE-STOP", "SPARK2_THRIFTSERVER-STOP", "KERNEL_GATEWAY-STOP" ],
     "NODEMANAGER-STOP": ["HIVE_SERVER_INTERACTIVE-STOP", "KERNEL_GATEWAY-STOP" ],
     "NAMENODE-STOP": ["HIVE_SERVER_INTERACTIVE-STOP"],
     "HIVE_SERVER_INTERACTIVE-RESTART": ["NODEMANAGER-RESTART", "MYSQL_SERVER-RESTART"],
     "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", "HIVE_METASTORE-START", "WEBHCAT_SERVER-START", "HIVE_SERVER_INTERACTIVE-START"],
     "RANGER_ADMIN-START": ["ZOOKEEPER_SERVER-START", "INFRA_SOLR-START"],
     "SPARK2_SERVICE_CHECK-SERVICE_CHECK" : ["SPARK2_JOBHISTORYSERVER-START", "APP_TIMELINE_SERVER-START"],
-    "HBASE_REST_SERVER-START": ["HBASE_MASTER-START"]
+    "HBASE_REST_SERVER-START": ["HBASE_MASTER-START"],
+    "TITAN_SERVER-START" : ["HBASE_SERVICE_CHECK-SERVICE_CHECK", "SOLR-START"],
+    "TITAN_SERVICE_CHECK-SERVICE_CHECK": ["TITAN_SERVER-START"],
+    "KERNEL_GATEWAY-INSTALL": ["SPARK2_CLIENT-INSTALL"],
+    "PYTHON_CLIENT-INSTALL": ["KERNEL_GATEWAY-INSTALL"],
+    "KERNEL_GATEWAY-START": ["NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START", "SPARK2_JOBHISTORYSERVER-START"],
+    "JNBG_SERVICE_CHECK-SERVICE_CHECK": ["KERNEL_GATEWAY-START"],
+    "R4ML-INSTALL": ["SPARK2_CLIENT-INSTALL", "SYSTEMML-INSTALL"],
+    "R4ML_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START", "SPARK2_JOBHISTORYSERVER-START"]
   },
   "_comment" : "Dependencies that are used when GLUSTERFS is not present in cluster",
   "optional_no_glusterfs": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/JNBG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/JNBG/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/JNBG/metainfo.xml
new file mode 100755
index 0000000..3520a32
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/JNBG/metainfo.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>JNBG</name>
+      <extends>common-services/JNBG/0.2.0</extends>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/R4ML/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/R4ML/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/R4ML/metainfo.xml
new file mode 100755
index 0000000..d1c708d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/R4ML/metainfo.xml
@@ -0,0 +1,37 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>R4ML</name>
+      <version>0.8.0</version>
+      <extends>common-services/R4ML/0.8.0</extends>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>r4ml_4_2_5_*</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/SYSTEMML/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/SYSTEMML/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/SYSTEMML/metainfo.xml
new file mode 100755
index 0000000..7a0e125
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/SYSTEMML/metainfo.xml
@@ -0,0 +1,37 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>SYSTEMML</name>
+      <version>0.13.0</version>
+      <extends>common-services/SYSTEMML/0.10.0</extends>		
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>*systemml_4_2_5_*</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/TITAN/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/TITAN/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/TITAN/metainfo.xml
new file mode 100755
index 0000000..d00e707
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/TITAN/metainfo.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>TITAN</name>
+      <version>1.0.0</version>
+      <extends>common-services/TITAN/1.0.0</extends>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>titan_4_2_5_*</name>
+            </package>
+	        <package>
+	          <name>ambari-infra-solr-client-*</name>
+	        </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/stack_advisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/stack_advisor.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/stack_advisor.py
index 1caa307..8883f57 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/stack_advisor.py
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/services/stack_advisor.py
@@ -26,7 +26,9 @@ class BigInsights425StackAdvisor(BigInsights42StackAdvisor):
     parentRecommendConfDict = super(BigInsights425StackAdvisor, self).getServiceConfigurationRecommenderDict()
     childRecommendConfDict = {
       "HDFS": self.recommendHDFSConfigurations,
+      "JNBG": self.recommendJNBGConfigurations,
       "SOLR": self.recommendSolrConfigurations,
+      "TITAN": self.recommendTitanConfigurations,
       "RANGER": self.recommendRangerConfigurations
     }
     parentRecommendConfDict.update(childRecommendConfDict)
@@ -35,11 +37,55 @@ class BigInsights425StackAdvisor(BigInsights42StackAdvisor):
   def getServiceConfigurationValidators(self):
     parentValidators = super(BigInsights425StackAdvisor, self).getServiceConfigurationValidators()
     childValidators = {
+      "JNBG": {"jnbg-env": self.validateJNBGConfigurations},
       "SOLR": {"ranger-solr-plugin-properties": self.validateSolrRangerPluginConfigurations}
     }
     self.mergeValidators(parentValidators, childValidators)
     return parentValidators
 
+  def recommendJNBGConfigurations(self, configurations, clusterData, services, hosts):
+    putJNBGEnvProperty = self.putProperty(configurations, "jnbg-env", services)
+    putJNBGEnvPropertyAttribute = self.putPropertyAttribute(configurations, "jnbg-env")
+   
+    distro_version = platform.linux_distribution()[1]
+    # On RHEL 6.x default path does not point to a Python 2.7
+    # so empty out the field and force user to update the path
+    if distro_version < "7.0":
+      putJNBGEnvProperty('python_interpreter_path', "")
+
+  def validateJNBGConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
+    validationItems = []
+    jnbg_env = getSiteProperties(configurations, "jnbg-env")
+    py_exec = jnbg_env.get("python_interpreter_path") if jnbg_env and "python_interpreter_path" in jnbg_env else []
+
+    # Test that it is a valid executable path before proceeding
+    if not os.path.isfile(py_exec) and not os.access(py_exec, os.X_OK):
+      validationItems.append({"config-name": "python_interpreter_path",
+                              "item": self.getErrorItem("Invalid Python interpreter path specified")})
+      return self.toConfigurationValidationProblems(validationItems, "jnbg-env")
+
+    distro_version = platform.linux_distribution()[1]
+    if distro_version < "7.0" and (py_exec == "/opt/rh/python27/root/usr/bin/python" or py_exec == "/opt/rh/python27/root/usr/bin/python2" or py_exec == "/opt/rh/python27/root/usr/bin/python2.7"):
+      # Special handling for RHSCL Python 2.7
+      proc = Popen(['/usr/bin/scl', 'enable', 'python27', '/opt/rh/python27/root/usr/bin/python' ' -V'], stderr=PIPE)
+    else:
+      proc = Popen([py_exec, '-V'], stderr=PIPE)
+    py_string = proc.communicate()[1]
+    py_version = py_string.split()[1]
+
+    if "Python" not in py_string:
+      validationItems.append({"config-name": "python_interpreter_path",
+                              "item": self.getErrorItem("Path specified does not appear to be a Python interpreter")})
+      return self.toConfigurationValidationProblems(validationItems, "jnbg-env")
+
+    # Validate that the specified python is 2.7.x (not > 2.x.x and not < 2.7)
+    if not py_version.split('.')[0] == '2' or (py_version.split('.')[0] == '2' and py_version.split('.')[1] < '7'):
+      validationItems.append({"config-name": "python_interpreter_path",
+                              "item": self.getErrorItem("Specified Python interpreter must be version >= 2.7 and < 3.0")})
+      return self.toConfigurationValidationProblems(validationItems, "jnbg-env")
+
+    return self.toConfigurationValidationProblems(validationItems, "jnbg-env")
+
   def recommendRangerConfigurations(self, configurations, clusterData, services, hosts):
     super(BigInsights425StackAdvisor, self).recommendRangerConfigurations(configurations, clusterData, services, hosts)
     putRangerAdminProperty = self.putProperty(configurations, "ranger-admin-site", services)
@@ -50,6 +96,13 @@ class BigInsights425StackAdvisor(BigInsights42StackAdvisor):
     zookeeper_host_port = ",".join(zookeeper_host_port)
     ranger_audit_zk_port = '{0}/{1}'.format(zookeeper_host_port, 'solr')
     putRangerAdminProperty('ranger.audit.solr.zookeepers', ranger_audit_zk_port)
+
+  def recommendTitanConfigurations(self, configurations, clusterData, services, hosts):
+    putTitanPropertyAttribute = self.putPropertyAttribute(configurations, "titan-env")
+    servicesList = [service["StackServices"]["service_name"] for service in services["services"]]
+    knox_enabled = "KNOX" in servicesList
+    if knox_enabled:
+      putTitanPropertyAttribute("SimpleAuthenticator", "visible", "false")
  
   def recommendSolrConfigurations(self, configurations, clusterData, services, hosts):
     super(BigInsights425StackAdvisor, self).recommendSolrConfigurations(configurations, clusterData, services, hosts)

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
index f3c73a0..2c82cb3 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
@@ -107,7 +107,7 @@
       <execute-stage service="HBASE" component="HBASE_MASTER" title="Snapshot HBASE">
         <task xsi:type="execute" hosts="master">
           <script>scripts/hbase_upgrade.py</script>
-          <function>snapshot</function>
+          <function>take_snapshot</function>
         </task>
       </execute-stage>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/role_command_order.json b/ambari-server/src/main/resources/stacks/BigInsights/4.2/role_command_order.json
index cc45213..5ee4b32 100755
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/role_command_order.json
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/role_command_order.json
@@ -19,7 +19,8 @@
     "ATLAS_SERVICE_CHECK-SERVICE_CHECK": ["ATLAS_SERVER-START"],
     "PHOENIX_QUERY_SERVER-START": ["HBASE_MASTER-START"],
     "OOZIE_SERVER-START": ["FALCON_SERVER-START"],
-    "FALCON_SERVICE_CHECK-SERVICE_CHECK": ["FALCON_SERVER-START"]
+    "FALCON_SERVICE_CHECK-SERVICE_CHECK": ["FALCON_SERVER-START"],
+    "TITAN_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_SERVICE_CHECK-SERVICE_CHECK"]
   },
   "_comment" : "Dependencies that are used when GLUSTERFS is not present in cluster",
   "optional_no_glusterfs": {

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/metainfo.xml
new file mode 100755
index 0000000..b73e31e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/metainfo.xml
@@ -0,0 +1,77 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>SYSTEMML</name>
+      <displayName>SystemML</displayName>
+      <comment>Apache SystemML is a distributed and declarative machine learning platform.</comment>
+      <version>0.10.0.4.2</version>
+      <components>
+        <component>
+          <name>SYSTEMML</name>
+          <displayName>SystemML</displayName>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/systemml_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <configFiles>
+          </configFiles>          
+        </component>
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>apache_systemml*</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <requiredServices>
+        <service>HDFS</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+      </configuration-dependencies>
+
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/__init__.py
new file mode 100755
index 0000000..5561e10
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/params.py
new file mode 100755
index 0000000..dd7e46c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/params.py
@@ -0,0 +1,40 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.script.script import Script
+
+# server configurations
+config = Script.get_config()
+stack_root = Script.get_stack_root()
+
+systemml_home_dir = format("{stack_root}/current/systemml-client")
+systemml_lib_dir = format("{systemml_home_dir}/lib")
+systemml_scripts_dir = format("{systemml_home_dir}/scripts")
+
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+stack_version = format_stack_version(stack_version_unformatted)
+
+# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
+version = default("/commandParams/version", None)
+stack_name = default("/hostLevelParams/stack_name", None)
+
+java_home = config['hostLevelParams']['java_home']

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/service_check.py
new file mode 100755
index 0000000..c15b907
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/service_check.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.libraries.functions.format import format
+import subprocess
+import os
+
+class SystemMLServiceCheck(Script):
+    def service_check(self, env):
+        import params
+        env.set_params(params)
+        
+        if os.path.exists(params.systemml_lib_dir):
+            cp = format("{params.stack_root}/current/hadoop-client/*:{params.stack_root}/current/hadoop-mapreduce-client/*:{params.stack_root}/current/hadoop-client/lib/*:{params.systemml_lib_dir}/systemml.jar")
+            java = format("{params.java_home}/bin/java")
+            command = [java, "-cp", cp, "org.apache.sysml.api.DMLScript", "-s", "print('Apache SystemML');"]
+            process = subprocess.Popen(command, stdout=subprocess.PIPE)
+            output = process.communicate()[0]
+            print output
+        
+            if 'Apache SystemML' not in output:
+                raise Fail("Expected output Apache SystemML not found.")
+
+if __name__ == "__main__":
+    SystemMLServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/systemml_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/systemml_client.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/systemml_client.py
new file mode 100755
index 0000000..2d45b68
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/SYSTEMML/package/scripts/systemml_client.py
@@ -0,0 +1,49 @@
+#!/usr/bin/python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+#from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+
+
+class SystemMLClient(Script):
+
+  def get_component_name(self):
+    return "systemml-client"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      #conf_select.select(params.stack_name, "systemml", params.version)
+      stack_select.select("systemml-client", params.version)
+
+  def install(self, env):
+    self.install_packages(env)
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+
+if __name__ == "__main__":
+  SystemMLClient().execute()
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-env.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-env.xml
new file mode 100755
index 0000000..86e09f1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-env.xml
@@ -0,0 +1,46 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false" supports_do_not_extend="true">
+
+  <property>
+    <name>titan_user</name>
+    <description>User to run Titan as</description>
+    <property-type>USER</property-type>
+    <value>titan</value>
+  </property>
+
+  <property>
+    <name>content</name>
+    <description>This is the template for titan-env.sh file</description>
+    <value>
+# Set JAVA HOME
+export JAVA_HOME={{java64_home}}
+
+# Add hadoop and hbase configuration directories into classpath
+export HADOOP_CONF_DIR={{hadoop_config_dir}}
+export HBASE_CONF_DIR={{hbase_config_dir}}
+CLASSPATH=$HADOOP_CONF_DIR:$HBASE_CONF_DIR:$CLASSPATH
+    </value>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-hbase-solr.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-hbase-solr.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-hbase-solr.xml
new file mode 100755
index 0000000..0ca6807
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-hbase-solr.xml
@@ -0,0 +1,66 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false" supports_adding_forbidden="true" supports_do_not_extend="true">
+
+  <property>
+    <name>content</name>
+    <description>Describe the configurations for Solr</description>
+    <value># Titan configuration sample: HBase and Solr
+# ATTENTION: If you would like to use this property, do manually execute titan-solr-connection.sh before build index.
+
+# This file connects to HBase using a Zookeeper quorum
+# (storage.hostname) consisting solely of localhost. It also
+# connects to Solr running on localhost using Solr's HTTP API.
+# Zookeeper, the HBase services, and Solr must already be
+# running and available before starting Titan with this file.
+storage.backend=hbase
+storage.hostname={{storage_host}}
+storage.hbase.table=titan_solr
+storage.hbase.ext.zookeeper.znode.parent={{hbase_zookeeper_parent}}
+
+cache.db-cache = true
+cache.db-cache-clean-wait = 20
+cache.db-cache-time = 180000
+cache.db-cache-size = 0.5
+
+# The indexing backend used to extend and optimize Titan's query
+# functionality. This setting is optional. Titan can use multiple
+# heterogeneous index backends. Hence, this option can appear more than
+# once, so long as the user-defined name between "index" and "backend" is
+# unique among appearances.Similar to the storage backend, this should be
+# set to one of Titan's built-in shorthand names for its standard index
+# backends (shorthands: lucene, elasticsearch, es, solr) or to the full
+# package and classname of a custom/third-party IndexProvider
+# implementation.
+
+index.search.backend=solr
+index.search.solr.mode=cloud
+index.search.solr.zookeeper-url={{solr_server_host}}/solr
+index.search.solr.configset=titan
+    </value>
+    <value-attributes>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-log4j.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-log4j.xml
new file mode 100755
index 0000000..3363d81
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/configuration/titan-log4j.xml
@@ -0,0 +1,65 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false" supports_do_not_extend="true">
+
+  <property>
+    <name>content</name>
+    <description>Custom log4j-console.properties</description>
+    <value>
+      # Used by gremlin.sh
+
+      log4j.appender.A2=org.apache.log4j.ConsoleAppender
+      log4j.appender.A2.Threshold=TRACE
+      log4j.appender.A2.layout=org.apache.log4j.PatternLayout
+      log4j.appender.A2.layout.ConversionPattern=%d{HH:mm:ss} %-5p %c %x - %m%n
+
+      log4j.rootLogger=${gremlin.log4j.level}, A2
+
+      #log4j.logger.com.thinkaurelius.titan.graphdb.database.idassigner.placement=DEBUG
+      #log4j.logger.com.thinkaurelius.titan.diskstorage.hbase.HBaseStoreManager=DEBUG
+
+      # Disable spurious Hadoop config deprecation warnings under 2.2.0.
+      #
+      # See https://issues.apache.org/jira/browse/HADOOP-10178
+      #
+      # This can and should be deleted when we upgrade our Hadoop 2.2.0
+      # dependency to 2.3.0 or 3.0.0.
+      log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=OFF
+
+      # Configure MR at its own loglevel. We usually want MR at INFO,
+      # even if the rest of the loggers are at WARN or ERROR or FATAL,
+      # because job progress information is at INFO.
+      log4j.logger.org.apache.hadoop.mapred=${gremlin.mr.log4j.level}
+      log4j.logger.org.apache.hadoop.mapreduce=${gremlin.mr.log4j.level}
+
+      # This generates 3 INFO lines per jar on the classpath -- usually more
+      # noise than desirable in the REPL. Switching it to the default
+      # log4j level means it will be at WARN by default, which is ideal.
+      log4j.logger.org.apache.hadoop.mapred.LocalDistributedCacheManager=${gremlin.log4j.level}
+    </value>
+    <value-attributes>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/kerberos.json b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/kerberos.json
new file mode 100755
index 0000000..ccabbf0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/kerberos.json
@@ -0,0 +1,17 @@
+{
+  "services": [
+    {
+      "name": "TITAN",
+      "components": [
+        {
+          "name": "TITAN",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/metainfo.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/metainfo.xml
new file mode 100755
index 0000000..73f4635
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/metainfo.xml
@@ -0,0 +1,88 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>TITAN</name>
+      <displayName>Titan</displayName>
+      <comment>Titan is a scalable graph database optimized for storing and querying graphs containing hundreds of
+        billions of vertices and edges distributed across a multi-machine cluster.</comment>
+      <version>1.0.0</version>
+      <components>
+        <component>
+          <name>TITAN</name>
+          <displayName>Titan</displayName>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/titan_client.py</script>
+            <scriptType>PYTHON</scriptType>
+	        <timeout>600</timeout>
+          </commandScript>
+          <configFiles>
+            <configFile>
+               <type>env</type>
+               <fileName>titan-env.sh</fileName>
+               <dictionaryName>titan-env</dictionaryName>
+            </configFile>
+            <configFile>
+                <type>env</type>
+                <fileName>log4j-console.properties</fileName>
+                <dictionaryName>titan-log4j</dictionaryName>
+            </configFile>
+            <configFile>
+                <type>env</type>
+                <fileName>titan-hbase-solr.properties</fileName>
+                <dictionaryName>titan-hbase-solr</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>titan_4_2_*</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <requiredServices>		
+        <service>HDFS</service>
+        <service>HBASE</service>
+        <service>SOLR</service>
+      </requiredServices>
+      
+      <configuration-dependencies>
+        <config-type>titan-env</config-type>
+        <config-type>titan-hbase-solr</config-type>
+        <config-type>titan-log4j</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/files/titanSmoke.groovy
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/files/titanSmoke.groovy b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/files/titanSmoke.groovy
new file mode 100755
index 0000000..79438be
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/files/titanSmoke.groovy
@@ -0,0 +1,20 @@
+/*Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License */
+
+import com.thinkaurelius.titan.core.TitanFactory;
+
+graph = TitanFactory.open("/etc/titan/conf/titan-hbase-solr.properties")
+g = graph.traversal()
+l = g.V().values('name').toList()


[12/12] ambari git commit: Merge branch 'branch-2.5' into branch-feature-AMBARI-21348

Posted by jo...@apache.org.
Merge branch 'branch-2.5' into branch-feature-AMBARI-21348


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/133baa53
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/133baa53
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/133baa53

Branch: refs/heads/branch-feature-AMBARI-21348
Commit: 133baa53fac0e6a6bb372e4183172236920fd54c
Parents: 94eb0dd a389f85
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Thu Jul 13 12:38:57 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Thu Jul 13 12:38:57 2017 -0400

----------------------------------------------------------------------
 .../libraries/functions/stack_tools.py          |  13 ++
 .../upgrades/ChangeStackReferencesAction.java   |   4 +-
 .../0.96.0.2.0/package/scripts/hbase_master.py  |  10 +-
 .../0.96.0.2.0/package/scripts/hbase_service.py |  41 ++--
 .../0.12.0.2.0/package/scripts/params_linux.py  |   4 +
 .../0.12.0.2.0/package/scripts/service_check.py |   3 +-
 .../common-services/JNBG/0.2.0/alerts.json      |  32 +++
 .../JNBG/0.2.0/configuration/jnbg-env.xml       | 209 +++++++++++++++++++
 .../common-services/JNBG/0.2.0/kerberos.json    |  59 ++++++
 .../common-services/JNBG/0.2.0/metainfo.xml     | 108 ++++++++++
 .../JNBG/0.2.0/package/files/jkg_install.sh     | 169 +++++++++++++++
 .../JNBG/0.2.0/package/files/jkg_start.sh       |  84 ++++++++
 .../JNBG/0.2.0/package/files/log4j_setup.sh     |  79 +++++++
 .../0.2.0/package/files/pyspark_configure.sh    | 104 +++++++++
 .../JNBG/0.2.0/package/files/pythonenv_setup.sh | 138 ++++++++++++
 .../JNBG/0.2.0/package/files/toree_configure.sh | 151 ++++++++++++++
 .../JNBG/0.2.0/package/files/toree_install.sh   | 176 ++++++++++++++++
 .../JNBG/0.2.0/package/scripts/jkg_toree.py     | 134 ++++++++++++
 .../0.2.0/package/scripts/jkg_toree_params.py   | 177 ++++++++++++++++
 .../JNBG/0.2.0/package/scripts/jnbg_helpers.py  |  81 +++++++
 .../JNBG/0.2.0/package/scripts/jnbg_params.py   |  66 ++++++
 .../JNBG/0.2.0/package/scripts/py_client.py     |  63 ++++++
 .../0.2.0/package/scripts/py_client_params.py   |  39 ++++
 .../JNBG/0.2.0/package/scripts/service_check.py |  44 ++++
 .../JNBG/0.2.0/package/scripts/status_params.py |  26 +++
 .../0.5.0.2.2/package/scripts/params_linux.py   |   8 +
 .../KNOX/0.5.0.2.2/package/scripts/upgrade.py   |   2 +-
 .../R4ML/0.8.0/configuration/r4ml-env.xml       |  48 +++++
 .../common-services/R4ML/0.8.0/metainfo.xml     |  92 ++++++++
 .../R4ML/0.8.0/package/files/Install.R          |  25 +++
 .../R4ML/0.8.0/package/files/ServiceCheck.R     |  28 +++
 .../R4ML/0.8.0/package/files/localr.repo        |  22 ++
 .../R4ML/0.8.0/package/scripts/__init__.py      |  19 ++
 .../R4ML/0.8.0/package/scripts/params.py        |  80 +++++++
 .../R4ML/0.8.0/package/scripts/r4ml_client.py   | 201 ++++++++++++++++++
 .../R4ML/0.8.0/package/scripts/service_check.py |  45 ++++
 .../SYSTEMML/0.10.0/metainfo.xml                |  77 +++++++
 .../SYSTEMML/0.10.0/package/scripts/__init__.py |  19 ++
 .../SYSTEMML/0.10.0/package/scripts/params.py   |  40 ++++
 .../0.10.0/package/scripts/service_check.py     |  43 ++++
 .../0.10.0/package/scripts/systemml_client.py   |  49 +++++
 .../common-services/TITAN/1.0.0/alerts.json     |  33 +++
 .../1.0.0/configuration/gremlin-server.xml      |  85 ++++++++
 .../TITAN/1.0.0/configuration/hadoop-gryo.xml   |  94 +++++++++
 .../1.0.0/configuration/hadoop-hbase-read.xml   | 102 +++++++++
 .../TITAN/1.0.0/configuration/titan-env.xml     | 157 ++++++++++++++
 .../1.0.0/configuration/titan-hbase-solr.xml    |  69 ++++++
 .../TITAN/1.0.0/configuration/titan-log4j.xml   |  65 ++++++
 .../common-services/TITAN/1.0.0/kerberos.json   |  52 +++++
 .../common-services/TITAN/1.0.0/metainfo.xml    | 124 +++++++++++
 .../package/alerts/alert_check_titan_server.py  |  65 ++++++
 .../package/files/gremlin-server-script.sh      |  86 ++++++++
 .../package/files/tinkergraph-empty.properties  |  18 ++
 .../TITAN/1.0.0/package/files/titanSmoke.groovy |  20 ++
 .../TITAN/1.0.0/package/scripts/params.py       | 202 ++++++++++++++++++
 .../1.0.0/package/scripts/params_server.py      |  37 ++++
 .../1.0.0/package/scripts/service_check.py      |  88 ++++++++
 .../TITAN/1.0.0/package/scripts/titan.py        | 143 +++++++++++++
 .../TITAN/1.0.0/package/scripts/titan_client.py |  61 ++++++
 .../TITAN/1.0.0/package/scripts/titan_server.py |  67 ++++++
 .../1.0.0/package/scripts/titan_service.py      | 150 +++++++++++++
 .../templates/titan_solr_client_jaas.conf.j2    |  23 ++
 .../package/templates/titan_solr_jaas.conf.j2   |  26 +++
 .../BigInsights/4.2.5/role_command_order.json   |  12 +-
 .../4.2.5/services/JNBG/metainfo.xml            |  26 +++
 .../4.2.5/services/R4ML/metainfo.xml            |  37 ++++
 .../4.2.5/services/SYSTEMML/metainfo.xml        |  37 ++++
 .../4.2.5/services/TITAN/metainfo.xml           |  40 ++++
 .../BigInsights/4.2.5/services/stack_advisor.py |  53 +++++
 .../4.2.5/upgrades/config-upgrade.xml           |  68 ++++++
 .../upgrades/nonrolling-upgrade-to-hdp-2.6.xml  | 166 ++++++++++++++-
 .../BigInsights/4.2/role_command_order.json     |   3 +-
 .../4.2/services/SYSTEMML/metainfo.xml          |  77 +++++++
 .../SYSTEMML/package/scripts/__init__.py        |  19 ++
 .../services/SYSTEMML/package/scripts/params.py |  40 ++++
 .../SYSTEMML/package/scripts/service_check.py   |  43 ++++
 .../SYSTEMML/package/scripts/systemml_client.py |  49 +++++
 .../services/TITAN/configuration/titan-env.xml  |  48 +++++
 .../TITAN/configuration/titan-hbase-solr.xml    |  67 ++++++
 .../TITAN/configuration/titan-log4j.xml         |  66 ++++++
 .../4.2/services/TITAN/kerberos.json            |  17 ++
 .../BigInsights/4.2/services/TITAN/metainfo.xml |  88 ++++++++
 .../TITAN/package/files/titanSmoke.groovy       |  20 ++
 .../services/TITAN/package/scripts/params.py    | 128 ++++++++++++
 .../TITAN/package/scripts/service_check.py      |  64 ++++++
 .../4.2/services/TITAN/package/scripts/titan.py |  70 +++++++
 .../TITAN/package/scripts/titan_client.py       |  58 +++++
 .../BigInsights/4.2/upgrades/config-upgrade.xml |  94 +++++++++
 .../upgrades/nonrolling-upgrade-to-hdp-2.6.xml  | 192 ++++++++++++++++-
 .../services/HIVE/configuration/hive-env.xml    |  78 +++----
 .../HIVE/configuration/hive-interactive-env.xml |  62 +++---
 .../stacks/HDP/2.6/services/HIVE/kerberos.json  | 151 ++++++++++++++
 .../stacks/HDP/2.6/services/YARN/kerberos.json  |   2 +-
 .../ChangeStackReferencesActionTest.java        |   1 +
 94 files changed, 6360 insertions(+), 95 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/133baa53/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/ambari/blob/133baa53/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
----------------------------------------------------------------------


[08/12] ambari git commit: AMBARI-21462. Readd TITAN, R4ML, SYSTEMML, JNBG to BigInsights and fix HBase backup during EU and imports (alejandro)

Posted by jo...@apache.org.
AMBARI-21462. Readd TITAN, R4ML, SYSTEMML, JNBG to BigInsights and fix HBase backup during EU and imports (alejandro)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/69e492f2
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/69e492f2
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/69e492f2

Branch: refs/heads/branch-feature-AMBARI-21348
Commit: 69e492f288340e797cce62bfd42e677bec958158
Parents: 1f54c6e
Author: Alejandro Fernandez <af...@hortonworks.com>
Authored: Wed Jul 12 15:14:30 2017 -0700
Committer: Alejandro Fernandez <af...@hortonworks.com>
Committed: Wed Jul 12 16:17:07 2017 -0700

----------------------------------------------------------------------
 .../0.96.0.2.0/package/scripts/hbase_master.py  |  10 +-
 .../0.96.0.2.0/package/scripts/hbase_service.py |  37 ++--
 .../common-services/JNBG/0.2.0/alerts.json      |  32 +++
 .../JNBG/0.2.0/configuration/jnbg-env.xml       | 208 +++++++++++++++++++
 .../common-services/JNBG/0.2.0/kerberos.json    |  59 ++++++
 .../common-services/JNBG/0.2.0/metainfo.xml     | 108 ++++++++++
 .../JNBG/0.2.0/package/files/jkg_install.sh     | 169 +++++++++++++++
 .../JNBG/0.2.0/package/files/jkg_start.sh       |  84 ++++++++
 .../JNBG/0.2.0/package/files/log4j_setup.sh     |  79 +++++++
 .../0.2.0/package/files/pyspark_configure.sh    | 104 ++++++++++
 .../JNBG/0.2.0/package/files/pythonenv_setup.sh | 138 ++++++++++++
 .../JNBG/0.2.0/package/files/toree_configure.sh | 151 ++++++++++++++
 .../JNBG/0.2.0/package/files/toree_install.sh   | 176 ++++++++++++++++
 .../JNBG/0.2.0/package/scripts/jkg_toree.py     | 134 ++++++++++++
 .../0.2.0/package/scripts/jkg_toree_params.py   | 177 ++++++++++++++++
 .../JNBG/0.2.0/package/scripts/jnbg_helpers.py  |  81 ++++++++
 .../JNBG/0.2.0/package/scripts/jnbg_params.py   |  66 ++++++
 .../JNBG/0.2.0/package/scripts/py_client.py     |  63 ++++++
 .../0.2.0/package/scripts/py_client_params.py   |  39 ++++
 .../JNBG/0.2.0/package/scripts/service_check.py |  44 ++++
 .../JNBG/0.2.0/package/scripts/status_params.py |  26 +++
 .../R4ML/0.8.0/configuration/r4ml-env.xml       |  48 +++++
 .../common-services/R4ML/0.8.0/metainfo.xml     |  92 ++++++++
 .../R4ML/0.8.0/package/files/Install.R          |  25 +++
 .../R4ML/0.8.0/package/files/ServiceCheck.R     |  28 +++
 .../R4ML/0.8.0/package/files/localr.repo        |  22 ++
 .../R4ML/0.8.0/package/scripts/__init__.py      |  19 ++
 .../R4ML/0.8.0/package/scripts/params.py        |  80 +++++++
 .../R4ML/0.8.0/package/scripts/r4ml_client.py   | 201 ++++++++++++++++++
 .../R4ML/0.8.0/package/scripts/service_check.py |  45 ++++
 .../SYSTEMML/0.10.0/metainfo.xml                |  77 +++++++
 .../SYSTEMML/0.10.0/package/scripts/__init__.py |  19 ++
 .../SYSTEMML/0.10.0/package/scripts/params.py   |  40 ++++
 .../0.10.0/package/scripts/service_check.py     |  43 ++++
 .../0.10.0/package/scripts/systemml_client.py   |  49 +++++
 .../common-services/TITAN/1.0.0/alerts.json     |  33 +++
 .../1.0.0/configuration/gremlin-server.xml      |  85 ++++++++
 .../TITAN/1.0.0/configuration/hadoop-gryo.xml   |  94 +++++++++
 .../1.0.0/configuration/hadoop-hbase-read.xml   | 102 +++++++++
 .../TITAN/1.0.0/configuration/titan-env.xml     | 157 ++++++++++++++
 .../1.0.0/configuration/titan-hbase-solr.xml    |  69 ++++++
 .../TITAN/1.0.0/configuration/titan-log4j.xml   |  65 ++++++
 .../common-services/TITAN/1.0.0/kerberos.json   |  52 +++++
 .../common-services/TITAN/1.0.0/metainfo.xml    | 124 +++++++++++
 .../package/alerts/alert_check_titan_server.py  |  65 ++++++
 .../package/files/gremlin-server-script.sh      |  86 ++++++++
 .../package/files/tinkergraph-empty.properties  |  18 ++
 .../TITAN/1.0.0/package/files/titanSmoke.groovy |  20 ++
 .../TITAN/1.0.0/package/scripts/params.py       | 202 ++++++++++++++++++
 .../1.0.0/package/scripts/params_server.py      |  37 ++++
 .../1.0.0/package/scripts/service_check.py      |  88 ++++++++
 .../TITAN/1.0.0/package/scripts/titan.py        | 143 +++++++++++++
 .../TITAN/1.0.0/package/scripts/titan_client.py |  61 ++++++
 .../TITAN/1.0.0/package/scripts/titan_server.py |  67 ++++++
 .../1.0.0/package/scripts/titan_service.py      | 150 +++++++++++++
 .../templates/titan_solr_client_jaas.conf.j2    |  23 ++
 .../package/templates/titan_solr_jaas.conf.j2   |  26 +++
 .../BigInsights/4.2.5/role_command_order.json   |  12 +-
 .../4.2.5/services/JNBG/metainfo.xml            |  26 +++
 .../4.2.5/services/R4ML/metainfo.xml            |  37 ++++
 .../4.2.5/services/SYSTEMML/metainfo.xml        |  37 ++++
 .../4.2.5/services/TITAN/metainfo.xml           |  40 ++++
 .../BigInsights/4.2.5/services/stack_advisor.py |  53 +++++
 .../upgrades/nonrolling-upgrade-to-hdp-2.6.xml  |   2 +-
 .../BigInsights/4.2/role_command_order.json     |   3 +-
 .../4.2/services/SYSTEMML/metainfo.xml          |  77 +++++++
 .../SYSTEMML/package/scripts/__init__.py        |  19 ++
 .../services/SYSTEMML/package/scripts/params.py |  40 ++++
 .../SYSTEMML/package/scripts/service_check.py   |  43 ++++
 .../SYSTEMML/package/scripts/systemml_client.py |  49 +++++
 .../services/TITAN/configuration/titan-env.xml  |  46 ++++
 .../TITAN/configuration/titan-hbase-solr.xml    |  66 ++++++
 .../TITAN/configuration/titan-log4j.xml         |  65 ++++++
 .../4.2/services/TITAN/kerberos.json            |  17 ++
 .../BigInsights/4.2/services/TITAN/metainfo.xml |  88 ++++++++
 .../TITAN/package/files/titanSmoke.groovy       |  20 ++
 .../services/TITAN/package/scripts/params.py    | 128 ++++++++++++
 .../TITAN/package/scripts/service_check.py      |  64 ++++++
 .../4.2/services/TITAN/package/scripts/titan.py |  70 +++++++
 .../TITAN/package/scripts/titan_client.py       |  58 ++++++
 .../upgrades/nonrolling-upgrade-to-hdp-2.6.xml  |   2 +-
 81 files changed, 5583 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py
index 30674a8..8151572 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_master.py
@@ -19,10 +19,14 @@ limitations under the License.
 """
 
 import sys
-from resource_management import *
+
+from resource_management.libraries.script.script import Script
+from resource_management.core.resources.service import Service
 from resource_management.libraries.functions.security_commons import build_expectations, \
   cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
   FILE_TYPE_XML
+from resource_management.libraries.functions.check_process_status import check_process_status
+from resource_management.libraries.functions.format import format
 from hbase import hbase
 from hbase_service import hbase_service
 from hbase_decommission import hbase_decommission
@@ -31,6 +35,8 @@ from setup_ranger_hbase import setup_ranger_hbase
 from ambari_commons import OSCheck, OSConst
 from ambari_commons.os_family_impl import OsFamilyImpl
 
+if OSCheck.is_windows_family():
+  from resource_management.libraries.functions.windows_service_utils import check_windows_service_status
 
 class HbaseMaster(Script):
   def configure(self, env):
@@ -83,7 +89,7 @@ class HbaseMasterDefault(HbaseMaster):
     env.set_params(params)
     self.configure(env) # for security
     setup_ranger_hbase(upgrade_type=upgrade_type, service_name="hbase-master")
-    hbase_service('master', action = 'start')
+    hbase_service('master', action='start')
     
   def stop(self, env, upgrade_type=None):
     import params

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_service.py b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_service.py
index 3b8e494..1d618ed 100644
--- a/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_service.py
+++ b/ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_service.py
@@ -17,14 +17,17 @@ See the License for the specific language governing permissions and
 limitations under the License.
 
 """
+from datetime import datetime
 
-from resource_management import *
+from resource_management.core.resources.system import Execute
+from resource_management.core.resources.system import File
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.format import format
+from resource_management.core.shell import as_sudo
+from resource_management.libraries.functions.show_logs import show_logs
 from resource_management.core.logger import Logger
 
-def hbase_service(
-  name,
-  action = 'start'): # 'start' or 'stop' or 'status'
-    
+def hbase_service(name, action='start'):
     import params
   
     role = name
@@ -36,18 +39,28 @@ def hbase_service(
     # delete wal log if HBase version has moved down
     if params.to_backup_wal_dir:
       wal_directory = params.wal_directory
-      timestamp = datetime.datetime.now()
+      timestamp = datetime.now()
       timestamp_format = '%Y%m%d%H%M%S'
       wal_directory_backup = '%s_%s' % (wal_directory, timestamp.strftime(timestamp_format))
 
-      rm_cmd = format("hadoop fs -mv {wal_directory} {wal_directory_backup}")
+      check_if_wal_dir_exists = format("hdfs dfs -ls {wal_directory}")
+      wal_dir_exists = False
       try:
-        Execute ( rm_cmd,
-          user = params.hbase_user
-        )
+        Execute(check_if_wal_dir_exists,
+                user=params.hbase_user
+                )
+        wal_dir_exists = True
       except Exception, e:
-        #Should still allow HBase Start/Stop to proceed
-        Logger.error("Failed to backup HBase WAL directory, command: {0} . Exception: {1}".format(rm_cmd, e.message))
+        Logger.error(format("Did not find HBase WAL directory {wal_directory}. It's possible that it was already moved. Exception: {e.message}"))
+
+      if wal_dir_exists:
+        move_wal_dir_cmd = format("hdfs dfs -mv {wal_directory} {wal_directory_backup}")
+        try:
+          Execute(move_wal_dir_cmd,
+            user=params.hbase_user
+          )
+        except Exception, e:
+          Logger.error(format("Failed to backup HBase WAL directory, command: {move_wal_dir_cmd} . Exception: {e.message}"))
 
     if action == 'start':
       daemon_cmd = format("{cmd} start {role}")

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/JNBG/0.2.0/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/alerts.json b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/alerts.json
new file mode 100755
index 0000000..963c687
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/alerts.json
@@ -0,0 +1,32 @@
+{
+  "JNBG": {
+    "service": [],
+    "KERNEL_GATEWAY": [
+      {
+        "name": "jupyter_kernel_gateway",
+        "label": "Jupyter Kernel Gateway Process",
+        "description": "This host-level alert is triggered if the Jupyter Kernel Gateway cannot be determined to be up.",
+        "interval": 1,
+        "scope": "HOST",
+        "source": {
+          "type": "PORT",
+          "uri": "{{jnbg-env/jnbg_port}}",
+          "default_port": 8888,
+          "reporting": {
+            "ok": {
+              "text": "TCP OK - {0:.3f}s response on port {1}"
+            },
+            "warning": {
+              "text": "TCP OK - {0:.3f}s response on port {1}",
+              "value": 1.5
+            },
+            "critical": {
+              "text": "Connection failed: {0} to {1}:{2}",
+              "value": 5.0
+            }
+          }
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/JNBG/0.2.0/configuration/jnbg-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/configuration/jnbg-env.xml b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/configuration/jnbg-env.xml
new file mode 100755
index 0000000..f9da01e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/configuration/jnbg-env.xml
@@ -0,0 +1,208 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="false">
+  <property>
+    <name>kernel_spark_opts</name>
+    <value>--master=yarn --deploy-mode=client --driver-java-options="-Dlog4j.logFile=/var/log/jnbg/spark-driver-USER.log -Dlog4j.configuration=file:/var/lib/jnbg/conf/log4j.properties"</value>
+    <display-name>spark_opts</display-name>
+    <description>
+      SPARK_OPTS used for all kernels (ToreeInstall.spark_opts, PYSPARK_SUBMIT_ARGS).
+      Optionally include -Dlog4j.logLevel and -Dlog4j.fileSize in --driver-java-options
+      to influence logging behavior. Default: -Dlog4j.logLevel=INFO -Dlog4j.fileSize=10MB
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>toree_opts</name>
+    <value></value>
+    <display-name>ToreeInstall.toree_opts</display-name>
+    <description>__TOREE_OPTS__ for Apache Toree kernel</description>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>notebook_user</name>
+    <value>notebook</value>
+    <display-name>Notebook service user</display-name>
+    <description>User to run JKG and kernel processes</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>notebook_group</name>
+    <value>notebook</value>
+    <display-name>Notebook service user group</display-name>
+    <description>Service user's group</description>
+    <value-attributes>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>spark_home</name>
+    <value>/usr/iop/current/spark2-client</value>
+    <display-name>spark_home</display-name>
+    <description>SPARK_HOME for kernels</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>spark_sql_warehouse_dir</name>
+    <value>/apps/jnbg/spark-warehouse</value>
+    <display-name>spark.sql.warehouse.dir</display-name>
+    <description>Warehouse for Notebook applications</description>
+  </property>
+  <property>
+    <name>jkg_port</name>
+    <value>8888</value>
+    <display-name>KernelGatewayApp.port</display-name>
+    <description>Jupyter Kernel Gateway port</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>jkg_loglevel</name>
+    <value>INFO</value>
+    <display-name>Application.log_level</display-name>
+    <description>Jupyter Kernel Gateway Log level</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>jkg_pid_dir_prefix</name>
+    <value>/var/run/jnbg</value>
+    <display-name>JNBG pid directory prefix</display-name>
+    <description>JNBG pid directory prefix for storing process ID</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>jkg_log_dir</name>
+    <value>/var/log/jnbg</value>
+    <display-name>Kernel Gateway log directory</display-name>
+    <description>Jupyter Kernel Gateway logfile directory</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>max_kernels</name>
+    <value>15</value>
+    <display-name>Maximum number of kernel instances</display-name>
+    <description>
+      Limits the number of kernel instances allowed to run by this gateway.
+      Unbounded by default.
+
+      Note: Number of kernel instances is also affected by the Spark2 property spark.port.maxRetries. Increase spark.port.maxRetries from its default value to a much higher value to enable controlling the number of active kernel instances using max_kernels.
+    </description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>cull_idle_kernel_period</name>
+    <value>43200</value>
+    <display-name>Idle kernel culling period</display-name>
+    <description>Period in seconds kernel can idle before being culled</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>cull_idle_kernel_interval</name>
+    <value>300</value>
+    <display-name>Idle kernel culling interval</display-name>
+    <description>Check for idle kernels to cull every specified number of seconds</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>python_interpreter_path</name>
+    <value>/usr/bin/python</value>
+    <display-name>Python interpreter path</display-name>
+    <description>
+      PYTHON_EXE for virtualenv
+      Python interpreter must be version 2.7.x
+    </description>
+    <value-attributes>
+      <type>file</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>python_virtualenv_path_prefix</name>
+    <value>/var/lib/jnbg</value>
+    <display-name>Python virtualenv path prefix</display-name>
+    <description>
+      Python virtualenv path prefix
+      $VIRTUAL_ENV=python_virtualenv_path_prefix/python2.7
+    </description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>python_virtualenv_restrictive</name>
+    <value>true</value>
+    <display-name>Python virtualenv restrictive mode</display-name>
+    <description> 
+      Python virtualenv restrictive mode.
+      Check for restrictive mode so that service users cannot modify it.
+      Uncheck so that service users can install packages with "pip install ..."
+    </description>
+    <final>true</final>
+    <value-attributes>
+      <type>boolean</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>pythonpath</name>
+    <value>/usr/iop/current/spark2-client/python:/usr/iop/current/spark2-client/python/lib/pyspark.zip:/usr/iop/current/spark2-client/python/lib/py4j-0.10.4-src.zip</value>
+    <display-name>PYTHONPATH</display-name>
+    <description>PYTHONPATH for PySpark kernel</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>spark_conf_dir</name>
+    <value>/var/lib/jnbg/conf</value>
+    <display-name>SPARK_CONF_DIR</display-name>
+    <description>Spark configuration directory, currently only contains log4j.properties (see "-Dlog4j.configuration=file:/var/lib/jnbg/conf/log4j.properties" in spark_opts)</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/JNBG/0.2.0/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/kerberos.json b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/kerberos.json
new file mode 100755
index 0000000..8777709
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/kerberos.json
@@ -0,0 +1,59 @@
+{
+  "services": [
+    {
+      "name": "JNBG",
+      "identities": [
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "components": [
+        {
+          "name": "KERNEL_GATEWAY",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "jnbg_principal",
+              "principal": {
+                "value": "${jnbg-env/notebook_user}/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "jnbg-env/jnbg.service.kerberos.principal",
+                "local_username" : "${jnbg-env/notebook_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/jnbg.service.keytab",
+                "owner": {
+                  "name": "${jnbg-env/notebook_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "jnbg-env/jnbg.service.kerberos.keytab"
+              }
+            }
+          ]
+        },
+        {
+          "name": "PYTHON_CLIENT",
+          "identities": [
+            {
+              "name": "/JNBG/KERNEL_GATEWAY/jnbg_principal"
+            }
+          ]
+        }
+      ],
+
+      "configurations": [
+        {
+          "jnbg-env": {
+            "jnbg.kerberos.enabled": "true"
+          }
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/JNBG/0.2.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/metainfo.xml b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/metainfo.xml
new file mode 100755
index 0000000..5afe904
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/metainfo.xml
@@ -0,0 +1,108 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>JNBG</name>
+      <displayName>JNBG</displayName>
+      <comment>Jupyter Notebook Kernel Gateway with Apache Toree</comment>
+      <version>0.2.0</version>
+      <components>
+        <component>
+          <name>KERNEL_GATEWAY</name>
+          <displayName>Jupyter Kernel Gateway</displayName>
+          <category>MASTER</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>false</versionAdvertised>
+          <commandScript>
+            <script>scripts/jkg_toree.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>3000</timeout>
+          </commandScript>
+          <dependencies>
+            <dependency>
+              <name>JNBG/PYTHON_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>SPARK/SPARK2_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>YARN/YARN_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+        </component>
+        <component>
+          <name>PYTHON_CLIENT</name>
+          <displayName>Python Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>false</versionAdvertised>
+          <commandScript>
+            <script>scripts/py_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>3000</timeout>
+          </commandScript>
+          <dependencies>
+            <dependency>
+              <name>SPARK/SPARK2_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+        </component>
+      </components>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <requiredServices>
+        <service>SPARK2</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+        <config-type>jnbg-env</config-type>
+      </configuration-dependencies>
+      <restartRequiredAfterChange>true</restartRequiredAfterChange>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/jkg_install.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/jkg_install.sh b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/jkg_install.sh
new file mode 100755
index 0000000..2027c9f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/jkg_install.sh
@@ -0,0 +1,169 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+set -x
+
+PY_EXEC=$1
+PY_VENV_PATH_PREFIX=$2
+PY_VENV_OWNER=$3
+KINIT_CMD=$4
+
+checkPipInstall()
+{
+  pip show $1 2>&1 > /dev/null
+}
+
+checkSuccess()
+{
+  if [ $? != 0 ]; then
+    set +x
+    echo "Error encountered at line $1 while attempting to: "
+    if [ -n "$2" ]; then
+      echo $2
+    fi
+    echo Exiting.
+    exit 1
+  fi
+  set -x
+}
+
+# /etc/pip.conf overrides all
+if [ -f /etc/pip.conf ]; then
+  PYPI_URL=$(cat  /etc/pip.conf | grep -i extra-index-url | awk '{print $3}')
+  PYPI_HOST=$(cat  /etc/pip.conf | grep -i extra-index-url | awk '{print $3}' | sed -e 's/^.*\/\///' | sed -e 's/:.*$//')
+  PYPI_PORT=$(cat  /etc/pip.conf | grep -i extra-index-url | awk '{print $3}'  | sed -e 's/^.*:*://' | sed -e 's/\/.*$//')
+else
+  # If no pip.conf then try to determine based on repo URLs in use
+  if [ -f /etc/yum.repos.d/IOP.repo ]; then
+    cat /etc/yum.repos.d/IOP.repo | grep baseurl |  grep -w http
+    httpurl=$?
+    cat /etc/yum.repos.d/IOP.repo | grep baseurl |  grep -w https
+    httpsurl=$?
+    if [ "$httpurl" -eq 0 ]; then
+      PYPI_HOST=$(cat /etc/yum.repos.d/IOP.repo | grep baseurl | sed -e 's/baseurl=http:\/\///' | cut -f1 -d"/")
+      PYPI_PORT=8080
+      PYPI_URL=http://${PYPI_HOST}:${PYPI_PORT}/simple/
+    elif [ "$httpsurl" -eq 0 ]; then
+      PYPI_HOST=$(cat /etc/yum.repos.d/IOP.repo | grep baseurl | sed -e 's/baseurl=https:\/\///' | cut -f1 -d"/")
+      PYPI_PORT=8080
+      PYPI_URL=http://${PYPI_HOST}:${PYPI_PORT}/simple/
+    fi
+  else
+    # fallback default
+    PYPI_HOST=ibm-open-platform.ibm.com
+    PYPI_PORT=8080
+    PYPI_URL=http://ibm-open-platform.ibm.com:8080/simple/
+  fi
+fi
+
+if [[ -z "${PYPI_URL}" || -z "${PYPI_HOST}" || -z "${PYPI_PORT}" ]];then
+  PYPI_HOST=ibm-open-platform.ibm.com
+  PYPI_PORT=8080
+  PYPI_URL=http://ibm-open-platform.ibm.com:8080/simple/
+fi
+
+PLATFORM=`uname -p`
+rhver=7
+
+if [ "$PY_EXEC" = "/opt/rh/python27/root/usr/bin/python" ]; then
+  rhscl=1
+else
+  rhscl=0
+fi
+
+if [ "$PLATFORM" == "x86_64" ]
+then
+  if [ -x /usr/bin/lsb_release ]; then
+    rhver=$(/usr/bin/lsb_release -rs | cut -f1 -d.)
+  fi
+
+  if [ "$rhver" -eq 6 ];then
+    if [ "$rhscl" -eq 1 ] && [ ! -f /opt/rh/python27/enable ]; then
+      echo "Installation failed; Install Python 2.7 using Red Hat Software Collections and retry."
+      exit 1
+    elif [ "$rhscl" -eq 1 ]; then
+      source /opt/rh/python27/enable
+      # uninstall older pip version that accompanies SCL
+      pip uninstall -y pip
+    fi
+  fi
+fi
+
+pyver=`echo $(${PY_EXEC} -V 2>&1 | awk '{ print $2 }') | sed -e 's/\.//g'`
+if [ "$pyver" -lt 270 ]; then
+  echo "Installation failed; Ensure that the specified python_interpreter_path is Python version 2.7."
+  exit 1
+fi
+
+easy_install pip
+checkSuccess $LINENO "-  easy_install pip"
+pip -V
+
+pip install --trusted-host ${PYPI_HOST} --no-cache-dir --index-url http://${PYPI_HOST}:${PYPI_PORT}/simple virtualenv --upgrade
+checkPipInstall virtualenv
+checkSuccess $LINENO "-  pip install virtualenv"
+
+if [ -d "${PY_VENV_PATH_PREFIX}/python2.7" ]; then
+  # Warning only to tolerate pre-existing virtual env. from failed installs
+  echo "Installation warning: ${PY_VENV_PATH_PREFIX}/python2.7 exists."
+  echo "This might indicate remnants from a prior or failed installation."
+  echo "Check specified property value for python_virtualenv_path_prefix."
+fi
+
+if [ ! -x "${PY_EXEC}" ]; then
+  echo "Installation failed: ${PY_EXEC} does not appear to be a valid python executable; Use a different python_interpreter_path."
+  exit 1
+fi
+
+virtualenv -p ${PY_EXEC} ${PY_VENV_PATH_PREFIX}/python2.7
+checkSuccess $LINENO "-  create virtualenv using ${PY_EXEC}"
+
+source ${PY_VENV_PATH_PREFIX}/python2.7/bin/activate
+pip -V
+
+if [ "$rhver" -eq 6 ]; then
+  if [ "$rhscl" -eq 1 ]; then
+    pip -V
+    # uninstall older pip version that accompanies virtualenv with SCL
+    pip uninstall -y pip
+    easy_install pip
+    checkPipInstall pip
+    checkSuccess $LINENO "- easy_install pip"
+  fi
+fi
+
+pip -V
+# Use --index-url and not --extra-index-url as we are trying to install
+# specific package versions
+pip install --trusted-host ${PYPI_HOST} --no-cache-dir --index-url http://${PYPI_HOST}:${PYPI_PORT}/simple setuptools --upgrade
+checkPipInstall setuptools
+checkSuccess $LINENO "- pip install setuptools"
+
+# Using --upgrade enables updating missing dependencies after failed installs
+pip install --trusted-host ${PYPI_HOST} --no-cache-dir --index-url http://${PYPI_HOST}:${PYPI_PORT}/simple/ jupyter_kernel_gateway --upgrade
+checkPipInstall jupyter_kernel_gateway
+checkSuccess $LINENO "- pip install jupyter_kernel_gateway"
+
+# Set ownership of the created virtualenv if configured via python_virtualenv_restrictive
+if [ "${PY_VENV_OWNER}" != "root" ]; then
+  echo ====== Virtualenv owner = $PY_VENV_OWNER =========
+  chown -R ${PY_VENV_OWNER}: ${PY_VENV_PATH_PREFIX}/python2.7
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/jkg_start.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/jkg_start.sh b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/jkg_start.sh
new file mode 100755
index 0000000..fdc9e59
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/jkg_start.sh
@@ -0,0 +1,84 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+set -x
+
+START_CMD=$1
+SPARK_HOME=$2
+PY_EXEC=$3
+PY_VENV_PATH_PREFIX=$4
+KINIT_CMD=$5
+LOG=$6
+PIDFILE=$7
+
+source ${PY_VENV_PATH_PREFIX}/python2.7/bin/activate
+
+PLATFORM=`uname -p`
+rhver=7
+
+if [ "$PY_EXEC" = "/opt/rh/python27/root/usr/bin/python" ]; then
+  rhscl=1
+else
+  rhscl=0
+fi
+
+if [ "$PLATFORM" == "x86_64" ]
+then
+  if [ -x /usr/bin/lsb_release ]; then
+    rhver=$(/usr/bin/lsb_release -rs | cut -f1 -d.)
+  fi
+
+  if [ "$rhver" -eq 6 ];then
+    if [ "$rhscl" -eq 1 ] && [ ! -f /opt/rh/python27/enable ]; then
+      echo "Detected invalid installation state: Install Python 2.7 using Red Hat Software Collections and try reinstalling the service."
+      exit 1
+    elif [ "$rhscl" -eq 1 ]; then
+      source /opt/rh/python27/enable
+      # uninstall older pip version that accompanies SCL
+      pip uninstall -y pip
+    fi
+  fi
+fi
+
+pyver=`echo $(${PY_EXEC} -V 2>&1 | awk '{ print $2 }') | sed -e 's/\.//g'`
+if [ "$pyver" -lt 270 ]; then
+  echo "Detected invalid installation state: Ensure that the specified python_interpreter_path is Python version 2.7."
+  exit 1
+fi
+
+if [ ! -d "${PY_VENV_PATH_PREFIX}/python2.7" ]; then
+  echo "Did not find necessary virtual environment to execute service startup. This state in unexpected and inconsistent when the service is in the INSTALLED state. Delete the service and reinstall."
+  exit 1
+fi
+source ${PY_VENV_PATH_PREFIX}/python2.7/bin/activate
+
+# Required for supporting Python 2 kernel
+export PYTHONPATH=${SPARK_HOME}/python/lib/pyspark.zip:${SPARK_HOME}/python:${SPARK_HOME}/python/lib/py4j-0.10.4-src.zip
+
+export SPARK_CONF_DIR=$SPARK_HOME/conf
+source $SPARK_CONF_DIR/spark-env.sh
+set +x
+eval "$START_CMD >> $LOG 2>&1 &"
+if [ $? -eq 0 ]; then
+  echo $! > $PIDFILE
+  exit 0
+fi
+exit 1

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/log4j_setup.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/log4j_setup.sh b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/log4j_setup.sh
new file mode 100755
index 0000000..921045d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/log4j_setup.sh
@@ -0,0 +1,79 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+set -x
+
+SPARK_CONFIG_DIR=$1
+
+log4j_properties_file="${SPARK_CONFIG_DIR}/log4j.properties"
+
+cat <<'EOF' > "${log4j_properties_file}"
+
+# default log file, overridden by Java System property -Dlog4j.logFile=...
+log4j.logFile=/var/log/jnbg/spark-driver-${user.name}.log
+
+# default (root) log level, overridable by Java System property -Dlog4j.logLevel=...
+log4j.logLevel=INFO
+
+# default log file size limit, overridable by Java System property -Dlog4j.fileSize=... (KB, MB, GB)
+log4j.fileSize=10MB
+
+# default max number of log file backups, overridable by Java System property -Dlog4j.backupFiles=...
+log4j.backupFiles=10
+
+# log to file using rolling log strategy with one backup file
+# NOTE: Spark REPL overrides rootCategory, set log4j.logLevel above
+log4j.rootCategory=${log4j.logLevel}, logfile
+log4j.appender.logfile=org.apache.log4j.RollingFileAppender
+log4j.appender.logfile.File=${log4j.logFile}
+log4j.appender.logfile.MaxFileSize=${log4j.fileSize}
+log4j.appender.logfile.MaxBackupIndex=${log4j.backupFiles}
+log4j.appender.logfile.encoding=UTF-8
+log4j.appender.logfile.layout=org.apache.log4j.PatternLayout
+log4j.appender.logfile.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n
+
+# Reduce Toree related "noise"
+log4j.logger.org.apache.toree.kernel.protocol.v5.stream.KernelOutputStream=ERROR
+
+# Modified Spark 2.1 default settings:
+
+# Spark overrides rootCategory level with the level set for the Scala & PySpark REPLs (default=WARN)
+# This is intended to reduce log verbosity while working with a Spark shell or PySpark shell.
+# However, notebook kernels internally use the spark-shell and pyspark shell implementation, but
+# since notebooks are logging to a log file, we want potentially more verbose logs.
+# We need to set the spark-shell and pyspark shell log level to the same level as the rootCategory.
+# See: org.apache.spark.internal.Logging#initializeLogging(isInterpreter=true)
+log4j.logger.org.apache.spark.repl.Main=${log4j.rootCategory}
+log4j.logger.org.apache.spark.api.python.PythonGatewayServer=${log4j.rootCategory}
+
+# Settings to quiet third party logs that are too verbose
+log4j.logger.org.spark_project.jetty=WARN
+log4j.logger.org.spark_project.jetty.util.component.AbstractLifeCycle=ERROR
+log4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=INFO
+log4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO
+log4j.logger.org.apache.parquet=ERROR
+log4j.logger.parquet=ERROR
+
+# SPARK-9183: Settings to avoid annoying messages when looking up nonexistent UDFs in SparkSQL with Hive support
+log4j.logger.org.apache.hadoop.hive.metastore.RetryingHMSHandler=FATAL
+log4j.logger.org.apache.hadoop.hive.ql.exec.FunctionRegistry=ERROR
+
+EOF

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/pyspark_configure.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/pyspark_configure.sh b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/pyspark_configure.sh
new file mode 100755
index 0000000..59cd28d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/pyspark_configure.sh
@@ -0,0 +1,104 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+set -x
+
+PY_EXEC=$1
+PY_VENV_PATH_PREFIX=$2
+PY_VENV_OWNER=$3
+KINIT_CMD=$4
+SPARK_HOME=$5
+PYTHONPATH=$6
+SPARK_OPTS=$7
+
+if [ ! -d "${PY_VENV_PATH_PREFIX}/python2.7" ]; then
+  echo "Unexpected state of installation. No Python client installation detected while trying to install PySpark kernel."
+  exit 0
+fi
+
+source ${PY_VENV_PATH_PREFIX}/python2.7/bin/activate
+
+if [ -z "${VIRTUAL_ENV}" ]; then
+  echo "Unexpected condition detected; Unable to find virtualenv environment."
+  exit 1
+fi
+
+# assume --sys-prefix used for Toree kernel installs
+kernel_dir=${VIRTUAL_ENV}/share/jupyter/kernels/spark_2.1_python2
+kernel_run_file=$kernel_dir/bin/run.sh
+kernel_json_file=$kernel_dir/kernel.json
+
+mkdir -p $kernel_dir/bin
+rm -f $kernel_json_file $kernel_run_file
+
+cat <<'EOF' >> $kernel_run_file
+#!/usr/bin/env bash
+echo
+echo "Starting Python 2 kernel with Spark 2.1 for user ${KERNEL_USERNAME}"
+echo
+
+CONF_ARGS="--name '${KERNEL_USERNAME:-Notebook} Python' \
+           --conf spark.sql.catalogImplementation=in-memory"
+
+PYSPARK_SUBMIT_ARGS="${CONF_ARGS} ${PYSPARK_SUBMIT_ARGS}"
+
+# replace generic log file name with user-specific log file name based on authenticated end-user
+PYSPARK_SUBMIT_ARGS="${PYSPARK_SUBMIT_ARGS//spark-driver-USER.log/spark-driver-${KERNEL_USERNAME:-all}.log}"
+
+echo "PYSPARK_SUBMIT_ARGS=\"${PYSPARK_SUBMIT_ARGS}\""
+
+EOF
+
+# For kerberized clusters
+if [ -n "${KINIT_CMD}" ]; then
+  sed -i "$ a ${KINIT_CMD}\n" $kernel_run_file
+fi
+
+sed -i "$ a ${PY_VENV_PATH_PREFIX}/python2.7/bin/python2 -m ipykernel -f \${2}" $kernel_run_file
+
+chmod 755 $kernel_run_file
+
+# Escape double-quotes in the user specified SPARK_OPTS value
+SPARK_OPTS="${SPARK_OPTS//\"/\\\"}"
+
+cat <<EOF >> $kernel_json_file
+{
+  "language": "python",
+  "display_name": "Spark 2.1 - Python 2",
+  "env": {
+    "SPARK_HOME": "${SPARK_HOME}",
+    "PYTHONPATH": "${PYTHONPATH}",
+    "PYTHONSTARTUP": "${SPARK_HOME}/python/pyspark/shell.py",
+    "PYSPARK_SUBMIT_ARGS": "${SPARK_OPTS} pyspark-shell"
+  },
+  "argv": [
+    "$kernel_run_file",
+    "-f",
+    "{connection_file}"
+  ]
+}
+EOF
+
+# Set ownership of the created virtualenv if configured via python_virtualenv_restrictive
+if [ "${PY_VENV_OWNER}" != "root" ]; then
+  echo ====== Virtualenv owner = $PY_VENV_OWNER =========
+  chown -R ${PY_VENV_OWNER}: ${PY_VENV_PATH_PREFIX}/python2.7
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/pythonenv_setup.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/pythonenv_setup.sh b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/pythonenv_setup.sh
new file mode 100755
index 0000000..5b2b7d9
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/pythonenv_setup.sh
@@ -0,0 +1,138 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+set -x
+
+PY_EXEC=$1
+PY_VENV_PATH_PREFIX=$2
+PY_VENV_OWNER=$3
+
+checkPipInstall()
+{
+  pip show $1 2>&1 > /dev/null
+}
+
+checkSuccess()
+{
+  if [ $? != 0 ]
+  then
+    set +x
+    echo "Error encountered at line $1 while attempting to: "
+    if [ -n "$2" ]
+    then
+      echo $2
+    fi
+    echo Exiting.
+    exit 1
+  fi
+  set -x
+}
+
+if [ -d "${PY_VENV_PATH_PREFIX}/python2.7" ]; then
+  echo "Python client installation detected. Nothing to do."
+  exit 0
+fi
+
+# /etc/pip.conf overrides all
+if [ -f /etc/pip.conf ]; then
+  PYPI_URL=$(cat  /etc/pip.conf | grep -i extra-index-url | awk '{print $3}')
+  PYPI_HOST=$(cat  /etc/pip.conf | grep -i extra-index-url | awk '{print $3}' | sed -e 's/^.*\/\///' | sed -e 's/:.*$//')
+  PYPI_PORT=$(cat  /etc/pip.conf | grep -i extra-index-url | awk '{print $3}'  | sed -e 's/^.*:*://' | sed -e 's/\/.*$//')
+else
+  # If no pip.conf then try to determine based on repo URLs in use
+  if [ -f /etc/yum.repos.d/IOP.repo ]; then
+    cat /etc/yum.repos.d/IOP.repo | grep baseurl |  grep -w http
+    httpurl=$?
+    cat /etc/yum.repos.d/IOP.repo | grep baseurl |  grep -w https
+    httpsurl=$?
+    if [ "$httpurl" -eq 0 ]; then
+      PYPI_HOST=$(cat /etc/yum.repos.d/IOP.repo | grep baseurl | sed -e 's/baseurl=http:\/\///' | cut -f1 -d"/")
+      PYPI_PORT=8080
+      PYPI_URL=http://${PYPI_HOST}:${PYPI_PORT}/simple/
+    elif [ "$httpsurl" -eq 0 ]; then
+      PYPI_HOST=$(cat /etc/yum.repos.d/IOP.repo | grep baseurl | sed -e 's/baseurl=https:\/\///' | cut -f1 -d"/")
+      PYPI_PORT=8080
+      PYPI_URL=http://${PYPI_HOST}:${PYPI_PORT}/simple/
+    fi
+  else
+    # fallback default
+    PYPI_HOST=ibm-open-platform.ibm.com
+    PYPI_PORT=8080
+    PYPI_URL=http://ibm-open-platform.ibm.com:8080/simple/
+  fi
+fi
+
+if [[ -z "${PYPI_URL}" || -z "${PYPI_HOST}" || -z "${PYPI_PORT}" ]];then
+  PYPI_HOST=ibm-open-platform.ibm.com
+  PYPI_PORT=8080
+  PYPI_URL=http://ibm-open-platform.ibm.com:8080/simple/
+fi
+
+PLATFORM=`uname -p`
+rhver=7
+
+if [ "$PY_EXEC" = "/opt/rh/python27/root/usr/bin/python" ]; then
+  rhscl=1
+else
+  rhscl=0
+fi
+
+if [ "$PLATFORM" == "x86_64" ]
+then
+  if [ -x /usr/bin/lsb_release ]; then
+    rhver=$(/usr/bin/lsb_release -rs | cut -f1 -d.)
+  fi
+
+  if [ "$rhver" -eq 6 ];then
+    if [ "$rhscl" -eq 1 ] && [ ! -f /opt/rh/python27/enable ]; then
+      echo "Installation failed; Install Python 2.7 using Red Hat Software Collections and retry."
+      exit 1
+    elif [ "$rhscl" -eq 1 ]; then
+      #Install Python 2.7 using Red Hat Software Collections and retry."
+      source /opt/rh/python27/enable
+      # uninstall older pip version that accompanies SCL
+      pip uninstall -y pip
+    fi
+  fi
+fi
+
+pyver=`echo $(${PY_EXEC} -V 2>&1 | awk '{ print $2 }') | sed -e 's/\.//g'`
+if [ "$pyver" -lt 270 ]; then
+  echo "Installation failed; Ensure that the specified python_interpreter_path is Python version 2.7."
+  exit 1
+fi
+
+easy_install pip
+checkSuccess $LINENO "-  easy_install pip"
+pip -V
+
+pip install --trusted-host ${PYPI_HOST} --no-cache-dir --index-url http://${PYPI_HOST}:${PYPI_PORT}/simple virtualenv --upgrade
+checkPipInstall virtualenv
+checkSuccess $LINENO "-  pip install virtualenv"
+
+virtualenv -p ${PY_EXEC} ${PY_VENV_PATH_PREFIX}/python2.7
+checkSuccess $LINENO "-  create virtualenv using ${PY_EXEC}"
+
+# Set ownership of the created virtualenv if configured via python_virtualenv_restrictive
+if [ "${PY_VENV_OWNER}" != "root" ]; then
+  echo ====== Virtualenv owner = $PY_VENV_OWNER =========
+  chown -R ${PY_VENV_OWNER}: ${PY_VENV_PATH_PREFIX}/python2.7
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/toree_configure.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/toree_configure.sh b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/toree_configure.sh
new file mode 100755
index 0000000..8f4cbb3
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/toree_configure.sh
@@ -0,0 +1,151 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+set -x
+
+NBX_USER=$1
+PY_EXEC=$2
+PY_VENV_PATH_PREFIX=$3
+PY_VENV_OWNER=$4
+KINIT_CMD=$5
+SPARK_HOME=$6
+TOREE_INTERPRETERS=$7
+TOREE_OPTS=${8:-""}
+SPARK_OPTS=$9
+
+checkSuccess()
+{
+  if [ $? != 0 ]
+  then
+    set +x
+    echo "Error encountered at line $1 while attempting to: "
+    if [ -n "$2" ]
+    then
+      echo $2
+    fi
+    echo Exiting.
+    exit 1
+  fi
+  set -x
+}
+
+# /etc/pip.conf overrides all
+if [ -f /etc/pip.conf ]; then
+  PYPI_URL=$(cat  /etc/pip.conf | grep -i extra-index-url | awk '{print $3}')
+  PYPI_HOST=$(cat  /etc/pip.conf | grep -i extra-index-url | awk '{print $3}' | sed -e 's/^.*\/\///' | sed -e 's/:.*$//')
+  PYPI_PORT=$(cat  /etc/pip.conf | grep -i extra-index-url | awk '{print $3}'  | sed -e 's/^.*:*://' | sed -e 's/\/.*$//')
+else
+  # If no pip.conf then try to determine based on repo URLs in use
+  if [ -f /etc/yum.repos.d/IOP.repo ]; then
+    cat /etc/yum.repos.d/IOP.repo | grep baseurl |  grep -w http
+    httpurl=$?
+    cat /etc/yum.repos.d/IOP.repo | grep baseurl |  grep -w https
+    httpsurl=$?
+    if [ "$httpurl" -eq 0 ]; then
+      PYPI_HOST=$(cat /etc/yum.repos.d/IOP.repo | grep baseurl | sed -e 's/baseurl=http:\/\///' | cut -f1 -d"/")
+      PYPI_PORT=8080
+      PYPI_URL=http://${PYPI_HOST}:${PYPI_PORT}/simple/
+    elif [ "$httpsurl" -eq 0 ]; then
+      PYPI_HOST=$(cat /etc/yum.repos.d/IOP.repo | grep baseurl | sed -e 's/baseurl=https:\/\///' | cut -f1 -d"/")
+      PYPI_PORT=8080
+      PYPI_URL=http://${PYPI_HOST}:${PYPI_PORT}/simple/
+    fi
+  else
+    # fallback default
+    PYPI_HOST=ibm-open-platform.ibm.com
+    PYPI_PORT=8080
+    PYPI_URL=http://ibm-open-platform.ibm.com:8080/simple/
+  fi
+fi
+
+if [[ -z "${PYPI_URL}" || -z "${PYPI_HOST}" || -z "${PYPI_PORT}" ]];then
+  PYPI_HOST=ibm-open-platform.ibm.com
+  PYPI_PORT=8080
+  PYPI_URL=http://ibm-open-platform.ibm.com:8080/simple/
+fi
+
+PLATFORM=`uname -p`
+rhver=7
+
+if [ "$PY_EXEC" = "/opt/rh/python27/root/usr/bin/python" ]; then
+  rhscl=1
+else
+  rhscl=0
+fi
+
+if [ "$PLATFORM" == "x86_64" ]
+then
+  if [ -x /usr/bin/lsb_release ]; then
+    rhver=$(/usr/bin/lsb_release -rs | cut -f1 -d.)
+  fi
+
+  if [ "$rhver" -eq 6 ];then
+    if [ "$rhscl" -eq 1 ] && [ ! -f /opt/rh/python27/enable ]; then
+      echo "Configuration failed; Expected Python 2.7 from Red Hat Software Collections was not found."
+      exit 1
+    elif [ "$rhscl" -eq 1 ]; then
+      source /opt/rh/python27/enable
+    fi
+  fi
+fi
+
+pyver=`echo $(${PY_EXEC} -V 2>&1 | awk '{ print $2 }') | sed -e 's/\.//g'`
+if [ "$pyver" -lt 270 ]; then
+  echo "Configuration failed; Ensure that the specified python_interpreter_path is Python version 2.7."
+  exit 1
+fi
+
+if [ ! -d "${PY_VENV_PATH_PREFIX}/python2.7" ]; then
+  echo "Configuration failed as the virtualenv ${PY_VENV_PATH_PREFIX}/python2.7 was not found; Ensure that the installation was usccessful."
+  exit 1
+fi
+source ${PY_VENV_PATH_PREFIX}/python2.7/bin/activate
+pip -V
+
+if [ -z "${TOREE_OPTS}" ]; then
+  jupyter toree install --sys-prefix --spark_home=${SPARK_HOME} --kernel_name='Spark 2.1' --interpreters=${TOREE_INTERPRETERS} "--spark_opts=${SPARK_OPTS}"
+  checkSuccess $LINENO "-  jupyter toree install"
+else
+  jupyter toree install --sys-prefix --spark_home=${SPARK_HOME} --kernel_name='Spark 2.1' --interpreters=${TOREE_INTERPRETERS} "--toree_opts=${TOREE_OPTS}" "--spark_opts=${SPARK_OPTS}"
+  checkSuccess $LINENO "-  jupyter toree install"
+fi
+
+# Note the value of --kernel_name and --interpreters from the toree install command determines the kernel directory
+# i.e. --kernel_name='Spark 2.1' --interpreters='Scala' --> .../jupyter/kernels/spark_2.1_scala/
+kernel_dir=${PY_VENV_PATH_PREFIX}/python2.7/share/jupyter/kernels/spark_2.1_scala
+kernel_run_file=$kernel_dir/bin/run.sh
+
+# Include the end-user name for spark-submit application name (KERNEL_USERNAME env var set by nb2kg)
+sed -i "s/--name \"'Apache Toree'\"/--name \"'\${KERNEL_USERNAME:-Notebook} Scala'\"/" $kernel_run_file
+
+# Replace log file path in SPARK_OPTS
+sed -i "/eval exec/i SPARK_OPTS=\"\${SPARK_OPTS//spark-driver-USER.log/spark-driver-\${KERNEL_USERNAME:-all}.log}\"\n" $kernel_run_file
+
+# For kerberized clusters
+if [ -n "${KINIT_CMD}" ]; then
+  sed -i "/eval exec/i ${KINIT_CMD}\n" $kernel_run_file
+fi
+
+# Set ownership of the created virtualenv if configured via python_virtualenv_restrictive
+if [ "${PY_VENV_OWNER}" != "root" ]; then
+  echo ====== Virtualenv owner = $PY_VENV_OWNER =========
+  chown -R ${PY_VENV_OWNER}: ${PY_VENV_PATH_PREFIX}/python2.7
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/toree_install.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/toree_install.sh b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/toree_install.sh
new file mode 100755
index 0000000..7967105
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/files/toree_install.sh
@@ -0,0 +1,176 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+set -x
+
+PY_EXEC=$1
+PY_VENV_PATH_PREFIX=$2
+PY_VENV_OWNER=$3
+KINIT_CMD=$4
+SPARK_HOME=$5
+SPARK_OPTS=$6
+
+checkPipInstall()
+{
+  pip show $1 2>&1 > /dev/null
+}
+
+checkSuccess()
+{
+  if [ $? != 0 ]
+  then
+    set +x
+    echo "Error encountered at line $1 while attempting to: "
+    if [ -n "$2" ]
+    then
+      echo $2
+    fi
+    echo Exiting.
+    exit 1
+  fi
+  set -x
+}
+
+# /etc/pip.conf overrides all
+if [ -f /etc/pip.conf ]; then
+  PYPI_URL=$(cat  /etc/pip.conf | grep -i extra-index-url | awk '{print $3}')
+  PYPI_HOST=$(cat  /etc/pip.conf | grep -i extra-index-url | awk '{print $3}' | sed -e 's/^.*\/\///' | sed -e 's/:.*$//')
+  PYPI_PORT=$(cat  /etc/pip.conf | grep -i extra-index-url | awk '{print $3}'  | sed -e 's/^.*:*://' | sed -e 's/\/.*$//')
+else
+  # If no pip.conf then try to determine based on repo URLs in use
+  if [ -f /etc/yum.repos.d/IOP.repo ]; then
+    cat /etc/yum.repos.d/IOP.repo | grep baseurl |  grep -w http
+    httpurl=$?
+    cat /etc/yum.repos.d/IOP.repo | grep baseurl |  grep -w https
+    httpsurl=$?
+    if [ "$httpurl" -eq 0 ]; then
+      PYPI_HOST=$(cat /etc/yum.repos.d/IOP.repo | grep baseurl | sed -e 's/baseurl=http:\/\///' | cut -f1 -d"/")
+      PYPI_PORT=8080
+      PYPI_URL=http://${PYPI_HOST}:${PYPI_PORT}/simple/
+    elif [ "$httpsurl" -eq 0 ]; then
+      PYPI_HOST=$(cat /etc/yum.repos.d/IOP.repo | grep baseurl | sed -e 's/baseurl=https:\/\///' | cut -f1 -d"/")
+      PYPI_PORT=8080
+      PYPI_URL=http://${PYPI_HOST}:${PYPI_PORT}/simple/
+    fi
+  else
+    # fallback default
+    PYPI_HOST=ibm-open-platform.ibm.com
+    PYPI_PORT=8080
+    PYPI_URL=http://ibm-open-platform.ibm.com:8080/simple/
+  fi
+fi
+
+if [[ -z "${PYPI_URL}" || -z "${PYPI_HOST}" || -z "${PYPI_PORT}" ]];then
+  PYPI_HOST=ibm-open-platform.ibm.com
+  PYPI_PORT=8080
+  PYPI_URL=http://ibm-open-platform.ibm.com:8080/simple/
+fi
+
+PLATFORM=`uname -p`
+rhver=7
+
+if [ "$PY_EXEC" = "/opt/rh/python27/root/usr/bin/python" ]; then
+  rhscl=1
+else
+  rhscl=0
+fi
+
+if [ "$PLATFORM" == "x86_64" ]
+then
+  if [ -x /usr/bin/lsb_release ]; then
+    rhver=$(/usr/bin/lsb_release -rs | cut -f1 -d.)
+  fi
+
+  if [ "$rhver" -eq 6 ];then
+    if [ "$rhscl" -eq 1 ] && [ ! -f /opt/rh/python27/enable ]; then
+      echo "Installation failed; Install Python 2.7 using Red Hat Software Collections and retry."
+      exit 1
+    elif [ "$rhscl" -eq 1 ]; then
+      source /opt/rh/python27/enable
+      # uninstall older pip version that accompanies SCL
+      pip uninstall -y pip
+    fi
+  fi
+fi
+
+pyver=`echo $(${PY_EXEC} -V 2>&1 | awk '{ print $2 }') | sed -e 's/\.//g'`
+if [ "$pyver" -lt 270 ]; then
+  echo "Installation failed; Ensure that the specified python_interpreter_path is Python version 2.7."
+  exit 1
+fi
+
+if [ ! -d "${PY_VENV_PATH_PREFIX}/python2.7" ]; then
+  easy_install pip
+  checkSuccess $LINENO "-  easy_install pip"
+
+  pip install --trusted-host ${PYPI_HOST} --no-cache-dir --index-url http://${PYPI_HOST}:${PYPI_PORT}/simple virtualenv --upgrade
+  checkPipInstall virtualenv
+  checkSuccess $LINENO "-  pip install virtualenv"
+
+  virtualenv -p ${PY_EXEC} ${PY_VENV_PATH_PREFIX}/python2.7
+  checkSuccess $LINENO "-  create virtualenv using ${PY_EXEC}"
+fi
+source ${PY_VENV_PATH_PREFIX}/python2.7/bin/activate
+pip -V
+
+if [ "$rhver" -eq 6 ]; then
+  if [ "$rhscl" -eq 1 ]; then
+    pip -V
+    # uninstall older pip version that accompanies virtualenv with SCL
+    pip uninstall -y pip
+    easy_install pip
+    checkPipInstall pip
+    checkSuccess $LINENO "- easy_install pip"
+  fi
+fi
+
+# Use --index-url and not --extra-index-url as we are trying to install
+# specific package versions
+pip install --trusted-host ${PYPI_HOST} --no-cache-dir --index-url http://${PYPI_HOST}:${PYPI_PORT}/simple/ setuptools --upgrade
+checkPipInstall setuptools
+checkSuccess $LINENO "- pip install setuptools"
+
+# Using --upgrade enables updating missing dependencies after failed installs
+pip install --trusted-host ${PYPI_HOST} --no-cache-dir --index-url http://${PYPI_HOST}:${PYPI_PORT}/simple/ toree --upgrade
+checkPipInstall toree
+checkSuccess $LINENO "- pip install toree"
+
+# Note the value of --kernel_name and --interpreters from the toree install command determines the kernel directory
+# i.e. --kernel_name='Spark 2.1' --interpreters='Scala' --> .../jupyter/kernels/spark_2.1_scala/
+kernel_dir=${PY_VENV_PATH_PREFIX}/python2.7/share/jupyter/kernels/spark_2.1_scala
+kernel_run_file=$kernel_dir/bin/run.sh
+
+# Include the end-user name for spark-submit application name (KERNEL_USERNAME env var set by nb2kg)
+sed -i "s/--name \"'Apache Toree'\"/--name \"'\${KERNEL_USERNAME:-Notebook} Scala'\"/" $kernel_run_file
+
+# Replace log file path in SPARK_OPTS
+sed -i "/eval exec/i SPARK_OPTS=\"\${SPARK_OPTS//spark-driver-USER.log/spark-driver-\${KERNEL_USERNAME:-all}.log}\"\n" $kernel_run_file
+
+# For kerberized clusters
+if [ -n "${KINIT_CMD}" ]; then
+  sed -i "/eval exec/i ${KINIT_CMD}\n" $kernel_run_file
+fi
+
+# Set ownership of the created virtualenv if configured via python_virtualenv_restrictive
+if [ "${PY_VENV_OWNER}" != "root" ]; then
+  echo ====== Virtualenv owner = $PY_VENV_OWNER =========
+  chown -R ${PY_VENV_OWNER}: ${PY_VENV_PATH_PREFIX}/python2.7
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/jkg_toree.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/jkg_toree.py b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/jkg_toree.py
new file mode 100755
index 0000000..34bcfe1
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/jkg_toree.py
@@ -0,0 +1,134 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os, errno
+from resource_management.core.exceptions import Fail
+from resource_management.core.logger import Logger
+from resource_management.core.resources.system import Execute, File, Directory
+from resource_management.core.source import StaticFile, InlineTemplate, Template
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.get_user_call_output import get_user_call_output
+from resource_management.libraries.functions.check_process_status import check_process_status
+import jnbg_helpers as helpers
+
+class GatewayKernels(Script):
+  def install(self, env):
+    import jkg_toree_params as params
+    self.install_packages(env)
+
+    # Create user and group if they don't exist
+    helpers.create_linux_user(params.user, params.group)
+
+    # Create directories used by the service and service user
+    Directory([params.home_dir, params.jkg_pid_dir, params.log_dir, params.spark_config_dir],
+              mode=0755,
+              create_parents=True,
+              owner=params.user,
+              group=params.group,
+              recursive_ownership=True
+             )
+
+    if os.path.exists(params.py_venv_pathprefix):
+      Logger.warning("Virtualenv path prefix {0} to be used for JNBG service might already exist."
+                     "This is unexpected if the service or service component is being installed on the node for the first time."
+                     "It could indicate remnants from a prior installation.".format(params.py_venv_pathprefix))
+
+    # Setup bash scripts for execution
+    for sh_script in params.sh_scripts:
+      File(params.sh_scripts_dir + os.sep + sh_script,
+           content=StaticFile(sh_script),
+           mode=0750
+          )
+    for sh_script in params.sh_scripts_user:
+      File(params.sh_scripts_dir + os.sep + sh_script,
+           content=StaticFile(sh_script),
+           mode=0755
+          )
+
+    # Run install commands for JKG defined in params
+    for command in params.jkg_commands: Execute(command, logoutput=True)
+
+    # Run install commands for Toree defined in params
+    for command in params.toree_commands: Execute(command, logoutput=True)
+
+    # Run setup commands for log4j
+    for command in params.log4j_setup_commands: Execute(command, logoutput=True)
+
+    # Note that configure is done during startup
+
+  def stop(self, env):
+    import status_params as params
+    import jkg_toree_params as jkgparams
+    env.set_params(params)
+
+    helpers.stop_process(params.jkg_pid_file, jkgparams.user, jkgparams.log_dir)
+
+  def start(self, env):
+    import os, sys, time
+    import jkg_toree_params as params
+    env.set_params(params)
+    self.configure(env)
+    delay_checks = 8
+
+    # Need HDFS started for the next step
+    helpers.create_hdfs_dirs(params.user, params.group, params.dirs)
+
+    Execute(params.start_command, user=params.user, logoutput=True)
+    check_process_status(params.jkg_pid_file)
+
+    time.sleep(delay_checks)
+
+    with open(params.jkg_pid_file, 'r') as fp:
+      try:
+        os.kill(int(fp.read().strip()), 0)
+      except OSError as ose:
+        if ose.errno != errno.EPERM:
+          raise Fail("Error starting Jupyter Kernel Gateway. Check {0} for the possible cause.".format(params.log_dir + "/jupyter_kernel_gateway.log"))
+        else:
+          # non-root install might have to resort to status check but
+          # with the side-effect that any error might only reflected during
+          # the status check after a minute rather than immediately 
+          check_process_status(params.jkg_pid_file)
+
+  def status(self, env):
+    import status_params as params
+    env.set_params(params)
+    check_process_status(params.jkg_pid_file)
+
+  def configure(self, env):
+    import jkg_toree_params as params
+    env.set_params(params)
+
+    # Create directories used by the service and service user
+    # if they were updated
+    Directory([params.home_dir, params.jkg_pid_dir, params.log_dir],
+              mode=0755,
+              create_parents=True,
+              owner=params.user,
+              group=params.group,
+              recursive_ownership=True)
+
+    # Run commands to configure Toree and PySpark
+    for command in params.toree_configure_commands: Execute(command, logoutput=True)
+    for command in params.pyspark_configure_commands: Execute(command, logoutput=True)
+
+if __name__ == "__main__":
+  GatewayKernels().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/jkg_toree_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/jkg_toree_params.py b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/jkg_toree_params.py
new file mode 100755
index 0000000..13a8aba
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/jkg_toree_params.py
@@ -0,0 +1,177 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions import get_kinit_path
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+import jnbg_helpers as helpers
+
+# Server configurations
+config = Script.get_config()
+stack_root = Script.get_stack_root()
+cluster_configs = config['clusterHostInfo']
+
+# Notebook service configs
+user = config['configurations']['jnbg-env']['notebook_user']
+group = config['configurations']['jnbg-env']['notebook_group']
+log_dir = config['configurations']['jnbg-env']['jkg_log_dir']
+jkg_pid_dir = config['configurations']['jnbg-env']['jkg_pid_dir_prefix']
+jkg_host = str(cluster_configs['kernel_gateway_hosts'][0])
+jkg_port = str(config['configurations']['jnbg-env']['jkg_port'])
+jkg_loglevel = str(config['configurations']['jnbg-env']['jkg_loglevel'])
+jkg_max_kernels = config['configurations']['jnbg-env']['max_kernels']
+jkg_cull_period = config['configurations']['jnbg-env']['cull_idle_kernel_period']
+jkg_cull_interval = config['configurations']['jnbg-env']['cull_idle_kernel_interval']
+py_executable = config['configurations']['jnbg-env']['python_interpreter_path']
+py_venv_pathprefix = config['configurations']['jnbg-env']['python_virtualenv_path_prefix']
+py_venv_restrictive = config['configurations']['jnbg-env']['python_virtualenv_restrictive']
+spark_sql_warehouse_dir = config['configurations']['jnbg-env']['spark_sql_warehouse_dir']
+pythonpath = config['configurations']['jnbg-env']['pythonpath']
+spark_home = format("{stack_root}/current/spark2-client")
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+#ui_ssl_enabled = config['configurations']['jnbg-env']['jnbg.ssl']
+ui_ssl_enabled = False
+spark_opts = str(config['configurations']['jnbg-env']['kernel_spark_opts'])
+modified_spark_opts = format("{spark_opts} --conf spark.sql.warehouse.dir={spark_sql_warehouse_dir}")
+modified_spark_opts = "'{0}'".format(modified_spark_opts)
+toree_opts = str(config['configurations']['jnbg-env']['toree_opts'])
+toree_opts = "'{0}'".format(toree_opts)
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+jkg_log_formatter_cmd = format("%(asctime)s,%(msecs)03d %(levelname)s %(name)s: %(message)s")
+jkg_log_formatter_cmd = "'{0}'".format(jkg_log_formatter_cmd)
+venv_owner="root" if py_venv_restrictive else user
+spark_config_dir = config['configurations']['jnbg-env']['spark_conf_dir']
+interpreters = "Scala"
+
+jnbg_kinit_cmd = ""
+if security_enabled:
+  _hostname_lowercase = config['hostname'].lower()
+  jnbg_kerberos_keytab = config['configurations']['jnbg-env']['jnbg.service.kerberos.keytab']
+  jnbg_kerberos_principal = config['configurations']['jnbg-env']['jnbg.service.kerberos.principal']
+  jnbg_kerberos_principal = jnbg_kerberos_principal.replace('_HOST',_hostname_lowercase)
+  jnbg_kinit_cmd = format("{kinit_path_local} -kt {jnbg_kerberos_keytab} {jnbg_kerberos_principal}; ")
+
+jnbg_kinit_arg = "'{0}'".format(jnbg_kinit_cmd)
+
+ambarisudo = AMBARI_SUDO_BINARY
+home_dir = format("/home/{user}")
+hdfs_home_dir = format("/user/{user}")
+jkg_pid_file = format("{jkg_pid_dir}/jupyter_kernel_gateway.pid")
+dirs = [(hdfs_home_dir, "0775"), (spark_sql_warehouse_dir, "01770")]
+package_dir = helpers.package_dir()
+sh_scripts_dir = format("{package_dir}files/")
+sh_scripts = ['jkg_install.sh',
+              'toree_install.sh',
+              'log4j_setup.sh',
+              'toree_configure.sh',
+              'pyspark_configure.sh',
+              'pythonenv_setup.sh']
+sh_scripts_user = ['jkg_start.sh']
+
+# Sequence of commands to be executed for JKG installation
+jkg_commands = []
+cmd_file_name = "jkg_install.sh"
+cmd_file_path = format("{package_dir}files/{cmd_file_name}")
+
+jkg_commands.append(ambarisudo + ' ' +
+                    cmd_file_path + ' ' +
+                    py_executable + ' ' +
+                    py_venv_pathprefix + ' ' +
+                    venv_owner + ' ' +
+                    jnbg_kinit_arg)
+
+# Sequence of commands executed for Toree installation
+toree_commands = []
+cmd_file_name = "toree_install.sh"
+cmd_file_path = format("{package_dir}files/{cmd_file_name}")
+
+toree_commands.append(ambarisudo + ' ' +
+                      cmd_file_path + ' ' +
+                      py_executable + ' ' +
+                      py_venv_pathprefix + ' ' +
+                      venv_owner + ' ' +
+                      jnbg_kinit_arg + ' ' +
+                      spark_home + ' ' +
+                      modified_spark_opts)
+
+# Sequence of commands executed for Toree configuration
+toree_configure_commands = []
+cmd_file_name = "toree_configure.sh"
+cmd_file_path = format("{package_dir}files/{cmd_file_name}")
+
+toree_configure_commands.append(ambarisudo + ' ' +
+                                cmd_file_path + ' ' +
+                                user + ' ' +
+                                py_executable + ' ' +
+                                py_venv_pathprefix + ' ' +
+                                venv_owner + ' ' +
+                                jnbg_kinit_arg + ' ' +
+                                spark_home + ' ' +
+                                interpreters + ' ' +
+                                toree_opts + ' ' +
+                                modified_spark_opts)
+
+# Sequence of commands executed for PySpark kernel configuration
+pyspark_configure_commands = []
+cmd_file_name = "pyspark_configure.sh"
+cmd_file_path = format("{package_dir}files/{cmd_file_name}")
+
+pyspark_configure_commands.append(ambarisudo + ' ' +
+                                  cmd_file_path + ' ' +
+                                  py_executable + ' ' +
+                                  py_venv_pathprefix + ' ' +
+                                  venv_owner + ' ' +
+                                  jnbg_kinit_arg + ' ' +
+                                  spark_home + ' ' +
+                                  pythonpath + ' ' +
+                                  modified_spark_opts)
+
+log4j_setup_commands = []
+cmd_file_name = "log4j_setup.sh"
+cmd_file_path = format("{package_dir}files/{cmd_file_name}")
+
+log4j_setup_commands.append(ambarisudo + ' ' +
+                            cmd_file_path + ' ' +
+                            spark_config_dir)
+
+# JKG startup command
+start_args = ['"jupyter kernelgateway' +
+              ' --ip=' + '0.0.0.0' +
+              ' --port=' + jkg_port +
+              ' --port_retries=' + '0' +
+              ' --log-level=' + jkg_loglevel +
+              ' --KernelGatewayApp.max_kernels=' + jkg_max_kernels,
+              ' --KernelGatewayApp.cull_idle_kernel_period=' + jkg_cull_period,
+              ' --KernelGatewayApp.cull_idle_kernel_interval=' + jkg_cull_interval,
+              ' --KernelSpecManager.ensure_native_kernel=' + 'False',
+              ' --KernelGatewayApp.log_format=' + jkg_log_formatter_cmd,
+              ' --JupyterWebsocketPersonality.list_kernels=' + 'True "',
+              spark_home,
+              py_executable,
+              py_venv_pathprefix,
+              jnbg_kinit_arg,
+              log_dir + "/jupyter_kernel_gateway.log",
+              jkg_pid_file]
+
+cmd_file_name = "jkg_start.sh"
+cmd_file_path = format("{package_dir}files/{cmd_file_name}")
+start_command = cmd_file_path + ' ' + ' '.join(start_args)

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/jnbg_helpers.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/jnbg_helpers.py b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/jnbg_helpers.py
new file mode 100755
index 0000000..4d126e3
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/jnbg_helpers.py
@@ -0,0 +1,81 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os, pwd, grp
+from resource_management.core.resources.system import Execute, File
+from resource_management.core.logger import Logger
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.get_user_call_output import get_user_call_output
+from resource_management.libraries.functions.show_logs import show_logs
+#from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from ambari_commons.constants import AMBARI_SUDO_BINARY
+
+def package_dir():
+  return os.path.realpath(__file__).split('/package')[0] + '/package/'
+
+def create_linux_user(user, group):
+  sudo = AMBARI_SUDO_BINARY
+
+  try: pwd.getpwnam(user)
+  except KeyError: Execute(format("{sudo} useradd ") + user, logoutput=True)
+  try: grp.getgrnam(group)
+  except KeyError: Execute(format("{sudo} groupadd ") + group, logoutput=True)
+
+def create_hdfs_dirs(user, group, dirs):
+  import jnbg_params as params
+  for dir, perms in dirs:
+    params.HdfsResource(dir,
+                        type = "directory",
+                        action = "create_on_execute",
+                        owner = user,
+                        group = group,
+                        mode = int(perms, 8)
+                       )
+  params.HdfsResource(None, action="execute")
+ 
+def stop_process(pid_file, user, log_dir):
+  """
+  Kill the process by pid file, then check the process is running or not.
+  If the process is still running after the kill command, try to kill
+  with -9 option (hard kill)
+  """
+
+  sudo = AMBARI_SUDO_BINARY
+  pid = get_user_call_output(format("cat {pid_file}"), user=user, is_checked_call=False)[1]
+  process_id_exists_command = format("ls {pid_file} >/dev/null 2>&1 && ps -p {pid} >/dev/null 2>&1")
+
+  kill_cmd = format("{sudo} kill {pid}")
+  Execute(kill_cmd, not_if=format("! ({process_id_exists_command})"))
+
+  wait_time = 5
+  hard_kill_cmd = format("{sudo} kill -9 {pid}")
+  Execute(hard_kill_cmd,
+          not_if=format("! ({process_id_exists_command}) || ( sleep {wait_time} && ! ({process_id_exists_command}) )"),
+          ignore_failures = True)
+
+  try:
+    Execute(format("! ({process_id_exists_command})"),
+            tries=20,
+            try_sleep=3)
+  except:
+    show_logs(log_dir, user)
+    raise
+
+  File(pid_file, action="delete")

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/jnbg_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/jnbg_params.py b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/jnbg_params.py
new file mode 100755
index 0000000..82660ab
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/jnbg_params.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
+
+import functools
+
+#for create_hdfs_directory
+
+# server configurations
+config = Script.get_config()
+
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+hostname = config["hostname"]
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+dfs_type = default("/commandParams/dfs_type", "")
+
+# create partial functions with common arguments for every HdfsResource call
+# to create hdfs directory we need to import this and call HdfsResource in code
+
+HdfsResource = functools.partial(
+ HdfsResource,
+  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
+  security_enabled = security_enabled,
+  user = hdfs_user,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs,
+  immutable_paths = get_not_managed_resources(),
+  dfs_type = dfs_type
+ )

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/py_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/py_client.py b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/py_client.py
new file mode 100755
index 0000000..094edde
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/py_client.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+from resource_management.core.resources.system import Execute, File, Directory
+from resource_management.core.source import StaticFile, InlineTemplate, Template
+from resource_management.core.resources.system import Execute
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.format import format
+import jnbg_helpers as helpers
+
+class PyClient(Script):
+  def install(self, env):
+    import py_client_params as params
+    from jkg_toree_params import user, group, sh_scripts_dir, sh_scripts, sh_scripts_user
+
+    # Setup bash scripts for execution
+    for sh_script in sh_scripts:
+      File(sh_scripts_dir + os.sep + sh_script,
+           content=StaticFile(sh_script),
+           mode=0750
+          )
+    for sh_script in sh_scripts_user:
+      File(sh_scripts_dir + os.sep + sh_script,
+           content=StaticFile(sh_script),
+           mode=0755
+          )
+
+    self.install_packages(env)
+    self.configure(env)
+
+    # Create user and group if they don't exist
+    helpers.create_linux_user(user, group)
+
+    # Run install commands for Python client defined in params
+    for command in params.commands: Execute(command, logoutput=True)
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+  def configure(self, env):
+    import py_client_params as params
+    env.set_params(params)
+
+if __name__ == "__main__":
+  PyClient().execute()


[10/12] ambari git commit: AMBARI-21466. KNOX upgrade fails due to wrong stack root

Posted by jo...@apache.org.
AMBARI-21466. KNOX upgrade fails due to wrong stack root


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/a7b6d5a0
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/a7b6d5a0
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/a7b6d5a0

Branch: refs/heads/branch-feature-AMBARI-21348
Commit: a7b6d5a0fc72c3d20c50210e887b7294007b0dba
Parents: 0cb9194
Author: Attila Doroszlai <ad...@hortonworks.com>
Authored: Thu Jul 13 12:54:47 2017 +0200
Committer: Attila Doroszlai <ad...@hortonworks.com>
Committed: Thu Jul 13 12:54:47 2017 +0200

----------------------------------------------------------------------
 .../libraries/functions/stack_tools.py                 | 13 +++++++++++++
 .../upgrades/ChangeStackReferencesAction.java          |  4 +++-
 .../KNOX/0.5.0.2.2/package/scripts/params_linux.py     |  8 ++++++++
 .../KNOX/0.5.0.2.2/package/scripts/upgrade.py          |  2 +-
 .../upgrades/ChangeStackReferencesActionTest.java      |  1 +
 5 files changed, 26 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/a7b6d5a0/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py b/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
index 420ae11..830598b 100644
--- a/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
+++ b/ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py
@@ -120,3 +120,16 @@ def get_stack_root(stack_name, stack_root_json):
     return "/usr/{0}".format(stack_name.lower())
 
   return stack_root[stack_name]
+
+
+def get_stack_name(stack_formatted):
+  """
+  Get the stack name (eg. HDP) from formatted string that may contain stack version (eg. HDP-2.6.1.0-123)
+  """
+  if stack_formatted is None:
+    return None
+
+  if '-' not in stack_formatted:
+    return stack_formatted
+
+  return stack_formatted.split('-')[0]

http://git-wip-us.apache.org/repos/asf/ambari/blob/a7b6d5a0/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ChangeStackReferencesAction.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ChangeStackReferencesAction.java b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ChangeStackReferencesAction.java
index d75d031..03e5caf 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ChangeStackReferencesAction.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/serveraction/upgrades/ChangeStackReferencesAction.java
@@ -35,6 +35,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Function;
+import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
 
@@ -45,6 +46,7 @@ import com.google.common.collect.Sets;
 public class ChangeStackReferencesAction extends AbstractServerAction {
 
   private static final Logger LOG = LoggerFactory.getLogger(ChangeStackReferencesAction.class);
+  private static final Set<String> SKIP_PROPERTIES = ImmutableSet.of("cluster-env/stack_root");
   private static final Set<Map.Entry<String, String>> REPLACEMENTS = Maps.asMap(
     Sets.newHashSet("/usr/iop", "iop/apps", "iop.version", "IOP_VERSION"),
     new Function<String, String>() {
@@ -83,7 +85,7 @@ public class ChangeStackReferencesAction extends AbstractServerAction {
         for (Map.Entry<String, String> entry : properties.entrySet()) {
           String key = entry.getKey();
           String original = entry.getValue();
-          if (original != null) {
+          if (original != null && !SKIP_PROPERTIES.contains(configType + "/" + key)) {
             String replaced = original;
             for (Map.Entry<String, String> replacement : REPLACEMENTS) {
               replaced = replaced.replace(replacement.getKey(), replacement.getValue());

http://git-wip-us.apache.org/repos/asf/ambari/blob/a7b6d5a0/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py
index 5a2ef19..9b0bbfc 100644
--- a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/params_linux.py
@@ -28,6 +28,7 @@ from resource_management.libraries.functions.version import format_stack_version
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions.get_port_from_url import get_port_from_url
 from resource_management.libraries.functions.get_stack_version import get_stack_version
+from resource_management.libraries.functions.stack_tools import get_stack_name, get_stack_root
 from resource_management.libraries.functions import get_kinit_path
 from resource_management.libraries.script.script import Script
 from status_params import *
@@ -67,6 +68,13 @@ stack_supports_core_site_for_ranger_plugin = check_stack_feature(StackFeature.CO
 # DO NOT format it since we need the build number too.
 upgrade_from_version = default("/hostLevelParams/current_version", None)
 
+source_stack = default("/commandParams/source_stack", None)
+source_stack_name = get_stack_name(source_stack)
+if source_stack_name is not None and source_stack_name != stack_name:
+  source_stack_root = get_stack_root(source_stack_name, default('/configurations/cluster-env/stack_root', None))
+else:
+  source_stack_root = stack_root
+
 # server configurations
 # Default value used in HDP 2.3.0.0 and earlier.
 knox_data_dir = '/var/lib/knox/data'

http://git-wip-us.apache.org/repos/asf/ambari/blob/a7b6d5a0/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/upgrade.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/upgrade.py b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/upgrade.py
index 917f340..fa035c7 100644
--- a/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/upgrade.py
+++ b/ambari-server/src/main/resources/common-services/KNOX/0.5.0.2.2/package/scripts/upgrade.py
@@ -91,7 +91,7 @@ def seed_current_data_directory():
     Logger.info("Seeding Knox data from prior version...")
 
     # <stack-root>/2.3.0.0-1234/knox/data/.
-    source_data_dir = os.path.join(params.stack_root, params.upgrade_from_version, "knox", "data", ".")
+    source_data_dir = os.path.join(params.source_stack_root, params.upgrade_from_version, "knox", "data", ".")
 
     # <stack-root>/current/knox-server/data
     target_data_dir = os.path.join(params.stack_root, "current", "knox-server", "data")

http://git-wip-us.apache.org/repos/asf/ambari/blob/a7b6d5a0/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ChangeStackReferencesActionTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ChangeStackReferencesActionTest.java b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ChangeStackReferencesActionTest.java
index 592a95f..1104c96 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ChangeStackReferencesActionTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/serveraction/upgrades/ChangeStackReferencesActionTest.java
@@ -69,6 +69,7 @@ public class ChangeStackReferencesActionTest {
     originalProperties.put("mapreduce_tar_source", "/usr/iop/current/hadoop-client/mapreduce.tar.gz");
     originalProperties.put("pig_tar_destination_folder", "hdfs:///iop/apps/{{ stack_version }}/pig/");
     originalProperties.put("pig_tar_source", "/usr/iop/current/pig-client/pig.tar.gz");
+    originalProperties.put("stack_root", "/usr/iop");
     expect(clusterEnv.getProperties()).andReturn(originalProperties).anyTimes();
 
     // this is the crux of the test


[09/12] ambari git commit: AMBARI-21464 - Ranger is Missing from BigInsights to HDP Upgrade Packs (jonathanhurley)

Posted by jo...@apache.org.
AMBARI-21464 - Ranger is Missing from BigInsights to HDP Upgrade Packs (jonathanhurley)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/0cb9194f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/0cb9194f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/0cb9194f

Branch: refs/heads/branch-feature-AMBARI-21348
Commit: 0cb9194f568534f7dde7d881fc31f06a619759f9
Parents: 69e492f
Author: Jonathan Hurley <jh...@hortonworks.com>
Authored: Wed Jul 12 21:32:10 2017 -0400
Committer: Jonathan Hurley <jh...@hortonworks.com>
Committed: Wed Jul 12 21:32:10 2017 -0400

----------------------------------------------------------------------
 .../4.2.5/upgrades/config-upgrade.xml           |  68 +++++++
 .../upgrades/nonrolling-upgrade-to-hdp-2.6.xml  | 164 ++++++++++++++++
 .../BigInsights/4.2/upgrades/config-upgrade.xml |  94 +++++++++
 .../upgrades/nonrolling-upgrade-to-hdp-2.6.xml  | 190 +++++++++++++++++++
 4 files changed, 516 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/0cb9194f/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
index b51a744..e33b8fb 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/config-upgrade.xml
@@ -63,6 +63,74 @@
       </component>
     </service>
 
+    <service name="RANGER">
+      <component name="RANGER_ADMIN">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_6_0_0_remove_bind_anonymous">
+            <type>ranger-env</type>
+            <transfer operation="delete" delete-key="bind_anonymous" />
+          </definition>
+          <definition xsi:type="configure" id="admin_log4j_parameterize" summary="Parameterizing Ranger Log4J Properties">
+            <type>admin-log4j</type>
+            <set key="ranger_xa_log_maxfilesize" value="256"/>
+            <set key="ranger_xa_log_maxbackupindex" value="20"/>
+            <replace key="content" find="log4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.xa_log_appender.MaxFileSize={{ranger_xa_log_maxfilesize}}MB"/>
+            <replace key="content" find="log4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.xa_log_appender=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.xa_log_appender.MaxBackupIndex={{ranger_xa_log_maxbackupindex}}"/>
+          </definition>
+        </changes>
+      </component>
+      <component name="RANGER_USERSYNC">
+        <changes>
+          <definition xsi:type="configure" id="usersync_log4j_parameterize" summary="Parameterizing Ranger Usersync Log4J Properties">
+            <type>usersync-log4j</type>
+            <set key="ranger_usersync_log_maxfilesize" value="256"/>
+            <set key="ranger_usersync_log_maxbackupindex" value="20"/>
+            <replace key="content" find="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.logFile.MaxFileSize = {{ranger_usersync_log_maxfilesize}}MB"/>
+            <replace key="content" find="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.logFile.MaxBackupIndex = {{ranger_usersync_log_maxbackupindex}}"/>
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_6_0_0_disable_delta_sync_during_upgrade">
+            <type>ranger-ugsync-site</type>
+            <set key="ranger.usersync.ldap.deltasync" value="false"
+              if-type="ranger-ugsync-site" if-key="ranger.usersync.source.impl.class" if-value="org.apache.ranger.ldapusersync.process.LdapUserGroupBuilder"/>
+          </definition>
+        </changes>
+      </component>
+      <component name="RANGER_TAGSYNC">
+        <changes>
+          <definition xsi:type="configure" id="tagsync_log4j_parameterize" summary="Parameterizing Ranger Tagsync Log4J Properties">
+            <type>tagsync-log4j</type>
+            <set key="ranger_tagsync_log_maxfilesize" value="256"/>
+            <set key="ranger_tagsync_log_number_of_backup_files" value="20"/>
+            <replace key="content" find="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.logFile.MaxFileSize = {{ranger_tagsync_log_maxfilesize}}MB"/>
+            <replace key="content" find="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.logFile=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.logFile.MaxBackupIndex = {{ranger_tagsync_log_number_of_backup_files}}"/>
+          </definition>
+        </changes>
+      </component>
+    </service>
+    <service name="RANGER_KMS">
+    <component name="RANGER_KMS_SERVER">
+      <changes>
+        <definition xsi:type="configure" id="kms_log4j_parameterize" summary="Parameterizing Ranger KMS Log4J Properties">
+          <type>kms-log4j</type>
+          <set key="ranger_kms_log_maxfilesize" value="256"/>
+          <set key="ranger_kms_log_maxbackupindex" value="20"/>
+          <set key="ranger_kms_audit_log_maxfilesize" value="256"/>
+          <set key="ranger_kms_audit_log_maxbackupindex" value="20"/>
+          <replace key="content" find="log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.kms.MaxFileSize = {{ranger_kms_log_maxfilesize}}MB"/>
+          <replace key="content" find="log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.kms.MaxBackupIndex = {{ranger_kms_log_maxbackupindex}}"/>
+          <replace key="content" find="log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.kms-audit.MaxFileSize = {{ranger_kms_audit_log_maxfilesize}}MB"/>
+          <replace key="content" find="log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.kms-audit.MaxBackupIndex = {{ranger_kms_audit_log_maxbackupindex}}"/>
+        </definition>
+        <definition xsi:type="configure" id="hdp_2_6_0_0_remove_ranger_kms_duplicate_ssl">
+          <type>ranger-kms-site</type>
+          <transfer operation="delete" delete-key="ranger.https.attrib.keystore.file" if-type="ranger-kms-site" if-key="ranger.service.https.attrib.keystore.file" if-key-state="present"/>
+          <transfer operation="delete" delete-key="ranger.service.https.attrib.clientAuth" if-type="ranger-kms-site" if-key="ranger.service.https.attrib.client.auth" if-key-state="present"/>
+        </definition>
+      </changes>
+    </component>
+    </service>
+
     <service name="HIVE">
       <component name="HIVE_SERVER">
         <changes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0cb9194f/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
index 2c82cb3..5f1e06c 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2.5/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
@@ -25,6 +25,7 @@
   <prerequisite-checks>
     <check>org.apache.ambari.server.checks.ServicesYarnWorkPreservingCheck</check>
     <check>org.apache.ambari.server.checks.JavaVersionCheck</check>
+    <check>org.apache.ambari.server.checks.RangerSSLConfigCheck</check>
     <configuration>
       <!-- Configuration properties for all pre-reqs including required pre-reqs -->
       <check-properties name="org.apache.ambari.server.checks.HiveDynamicServiceDiscoveryCheck">
@@ -117,6 +118,18 @@
           <function>prepare_express_upgrade</function>
         </task>
       </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Backup Ranger Database">
+        <task xsi:type="manual">
+          <message>Before continuing, please backup the Ranger Admin database on the following host(s): {{hosts.all}}.</message>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="RANGER_KMS" component="RANGER_KMS_SERVER" title="Backup Ranger KMS Database">
+        <task xsi:type="manual">
+          <message>Before continuing, please backup Ranger KMS database on the following host(s): {{hosts.all}}.</message>
+        </task>
+      </execute-stage>
     </group>
 
     <group xsi:type="stop" name="STOP_LOW_LEVEL_SERVICE_COMPONENTS" title="Stop Components for Core Services">
@@ -140,6 +153,16 @@
         <component>NFS_GATEWAY</component>
       </service>
 
+      <service name="RANGER">
+        <component>RANGER_USERSYNC</component>
+        <component>RANGER_ADMIN</component>
+        <component>RANGER_TAGSYNC</component>
+      </service>
+
+      <service name="RANGER_KMS">
+        <component>RANGER_KMS_SERVER</component>
+      </service>
+
       <service name="ZOOKEEPER">
         <component>ZOOKEEPER_SERVER</component>
       </service>
@@ -187,6 +210,44 @@
         <task xsi:type="configure" id="biginsights_4_2_hbase_env_config" />
       </execute-stage>
 
+      <!--RANGER-->
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger Admin">
+        <task xsi:type="configure" id="hdp_2_6_0_0_remove_bind_anonymous"/>
+      </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Parameterizing Ranger Admin Log4J Properties">
+        <task xsi:type="configure" id="admin_log4j_parameterize">
+          <summary>Updating the Ranger admin Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_USERSYNC" title="Parameterizing Ranger Usersync Log4J Properties">
+        <task xsi:type="configure" id="usersync_log4j_parameterize">
+          <summary>Updating the Ranger usersync Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_TAGSYNC" title="Parameterizing Ranger Tagsync Log4J Properties">
+        <task xsi:type="configure" id="tagsync_log4j_parameterize">
+          <summary>Updating the Ranger tagsync Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_USERSYNC" title="Apply config changes for Ranger Usersync">
+        <task xsi:type="configure" id="hdp_2_6_0_0_disable_delta_sync_during_upgrade"/>
+      </execute-stage>
+
+      <!--RANGER-KMS-->
+      <execute-stage service="RANGER_KMS" component="RANGER_KMS_SERVER" title="Parameterizing Ranger Kms Log4J Properties">
+        <task xsi:type="configure" id="kms_log4j_parameterize">
+          <summary>Updating the KMS Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="RANGER_KMS" component="RANGER_KMS_SERVER" title="Apply config changes for Ranger KMS">
+        <task xsi:type="configure" id="hdp_2_6_0_0_remove_ranger_kms_duplicate_ssl"/>
+      </execute-stage>
+
       <!-- HIVE -->
       <execute-stage service="HIVE" component="HIVE_SERVER" title="Apply config changes for Hive Server">
         <task xsi:type="configure" id="biginsights_4_2_0_0_hive_env_configure"/>
@@ -254,6 +315,18 @@
       </service>
     </group>
 
+    <group xsi:type="restart" name="RANGER" title="Ranger">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <supports-auto-skip-failure>false</supports-auto-skip-failure>
+      <parallel-scheduler/>
+      <service name="RANGER">
+        <component>RANGER_ADMIN</component>
+        <component>RANGER_USERSYNC</component>
+        <component>RANGER_TAGSYNC</component>
+      </service>
+    </group>
+
     <group xsi:type="restart" name="HDFS" title="HDFS">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -292,6 +365,16 @@
       </execute-stage>
     </group>
 
+    <group xsi:type="restart" name="RANGER_KMS" title="Ranger KMS">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <supports-auto-skip-failure>false</supports-auto-skip-failure>
+      <parallel-scheduler/>
+      <service name="RANGER_KMS">
+        <component>RANGER_KMS_SERVER</component>
+      </service>
+    </group>
+
     <group xsi:type="restart" name="KAFKA" title="Kafka">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -374,6 +457,8 @@
       <skippable>true</skippable>
       <priority>
         <service>ZOOKEEPER</service>
+        <service>RANGER</service>
+        <service>RANGER_KMS</service>
         <service>HDFS</service>
         <service>KAFKA</service>
         <service>YARN</service>
@@ -567,6 +652,61 @@
       </component>
     </service>
 
+    <service name="RANGER">
+      <component name="RANGER_ADMIN">
+        <pre-upgrade>
+
+          <task xsi:type="execute" hosts="all">
+            <script>scripts/ranger_admin.py</script>
+            <function>set_pre_start</function>
+          </task>
+
+          <task xsi:type="execute" hosts="any">
+            <summary>Upgrading Ranger database schema</summary>
+            <script>scripts/ranger_admin.py</script>
+            <function>setup_ranger_database</function>
+          </task>
+
+          <task xsi:type="configure_function" hosts="all" />
+
+          <task xsi:type="execute" hosts="any">
+            <summary>Applying Ranger java patches</summary>
+            <script>scripts/ranger_admin.py</script>
+            <function>setup_ranger_java_patches</function>
+          </task>
+        </pre-upgrade>
+
+        <pre-downgrade copy-upgrade="true" />
+
+        <upgrade>
+          <task xsi:type="restart-task"/>
+        </upgrade>
+
+      </component>
+
+      <component name="RANGER_USERSYNC">
+        <upgrade>
+          <task xsi:type="restart-task"/>
+        </upgrade>
+      </component>
+
+      <component name="RANGER_TAGSYNC">
+
+        <pre-upgrade>
+          <task xsi:type="execute" hosts="all">
+            <script>scripts/ranger_tagsync.py</script>
+            <function>configure_atlas_user_for_tagsync</function>
+          </task>
+        </pre-upgrade>
+
+        <pre-downgrade copy-upgrade="true" />
+
+        <upgrade>
+          <task xsi:type="restart-task"/>
+        </upgrade>
+      </component>
+    </service>
+
     <service name="HDFS">
       <component name="NAMENODE">
         <upgrade>
@@ -605,6 +745,30 @@
       </component>
     </service>
 
+    <service name="RANGER_KMS">
+      <component name="RANGER_KMS_SERVER">
+        <pre-upgrade>
+          <task xsi:type="execute" hosts="any" sequential="true">
+            <summary>Upgrading Ranger KMS database schema</summary>
+            <script>scripts/kms_server.py</script>
+            <function>setup_ranger_kms_database</function>
+          </task>
+        </pre-upgrade>
+
+        <pre-downgrade>
+          <task xsi:type="execute" hosts="any" sequential="true">
+            <summary>Downgrading Ranger KMS database schema</summary>
+            <script>scripts/kms_server.py</script>
+            <function>setup_ranger_kms_database</function>
+          </task>
+        </pre-downgrade>
+
+        <upgrade>
+          <task xsi:type="restart-task"/>
+        </upgrade>
+      </component>
+    </service>
+
     <service name="MAPREDUCE2">
       <component name="HISTORYSERVER">
         <upgrade>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0cb9194f/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
index b46f476..070207a 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/config-upgrade.xml
@@ -18,6 +18,98 @@
 
 <upgrade-config-changes xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="upgrade-config.xsd">
   <services>
+    <service name="RANGER">
+      <component name="RANGER_ADMIN">
+        <changes>
+
+          <definition xsi:type="configure" id="hdp_2_5_0_0_remove_audit_db_flag">
+            <type>ranger-env</type>
+            <transfer operation="delete" delete-key="xasecure.audit.destination.db"/>
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_5_0_0_remove_audit_db_admin_properties">
+            <type>admin-properties</type>
+            <transfer operation="delete" delete-key="audit_db_name" />
+            <transfer operation="delete" delete-key="audit_db_user" />
+            <transfer operation="delete" delete-key="audit_db_password" />
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_5_0_0_remove_audit_db_ranger_admin_site">
+            <type>ranger-admin-site</type>
+            <set key="ranger.audit.source.type" value="solr"/>
+            <transfer operation="delete" delete-key="ranger.jpa.audit.jdbc.driver" />
+            <transfer operation="delete" delete-key="ranger.jpa.audit.jdbc.url" />
+            <transfer operation="delete" delete-key="ranger.jpa.audit.jdbc.user" />
+            <transfer operation="delete" delete-key="ranger.jpa.audit.jdbc.password" />
+            <transfer operation="delete" delete-key="ranger.jpa.audit.jdbc.credential.alias" />
+            <transfer operation="delete" delete-key="ranger.jpa.audit.jdbc.dialect" />
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_5_0_0_remove_sso_property">
+            <type>ranger-admin-site</type>
+            <transfer operation="delete" delete-key="ranger.sso.cookiename" />
+            <transfer operation="delete" delete-key="ranger.sso.query.param.originalurl" />
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_5_0_0_set_external_solrCloud_flag">
+            <type>ranger-env</type>
+            <set key="is_external_solrCloud_enabled" value="true"
+              if-type="ranger-env" if-key="is_solrCloud_enabled" if-value="true"/>
+          </definition>
+
+          <definition xsi:type="configure" id="hdp_2_6_0_0_remove_bind_anonymous">
+            <type>ranger-env</type>
+            <transfer operation="delete" delete-key="bind_anonymous" />
+          </definition>
+        </changes>
+      </component>
+
+      <component name="RANGER_USERSYNC">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_6_0_0_disable_delta_sync_during_upgrade">
+            <type>ranger-ugsync-site</type>
+            <set key="ranger.usersync.ldap.deltasync" value="false"
+              if-type="ranger-ugsync-site" if-key="ranger.usersync.source.impl.class" if-value="org.apache.ranger.ldapusersync.process.LdapUserGroupBuilder"/>
+          </definition>
+        </changes>
+      </component>
+    </service>
+
+    <service name="RANGER_KMS">
+      <component name="RANGER_KMS_SERVER">
+        <changes>
+          <definition xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_kms_audit_db">
+            <type>ranger-kms-audit</type>
+            <transfer operation="delete" delete-key="xasecure.audit.destination.db" />
+            <transfer operation="delete" delete-key="xasecure.audit.destination.db.jdbc.url" />
+            <transfer operation="delete" delete-key="xasecure.audit.destination.db.user" />
+            <transfer operation="delete" delete-key="xasecure.audit.destination.db.password" />
+            <transfer operation="delete" delete-key="xasecure.audit.destination.db.jdbc.driver" />
+            <transfer operation="delete" delete-key="xasecure.audit.credential.provider.file" />
+            <transfer operation="delete" delete-key="xasecure.audit.destination.db.batch.filespool.dir" />
+          </definition>
+          <definition xsi:type="configure" id="kms_log4j_parameterize" summary="Parameterizing Ranger KMS Log4J Properties">
+            <type>kms-log4j</type>
+            <set key="ranger_kms_log_maxfilesize" value="256"/>
+            <set key="ranger_kms_log_maxbackupindex" value="20"/>
+            <set key="ranger_kms_audit_log_maxfilesize" value="256"/>
+            <set key="ranger_kms_audit_log_maxbackupindex" value="20"/>
+            <replace key="content" find="log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.kms.MaxFileSize = {{ranger_kms_log_maxfilesize}}MB"/>
+            <replace key="content" find="log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.kms=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.kms.MaxBackupIndex = {{ranger_kms_log_maxbackupindex}}"/>
+            <replace key="content" find="log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.kms-audit.MaxFileSize = {{ranger_kms_audit_log_maxfilesize}}MB"/>
+            <replace key="content" find="log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender" replace-with="log4j.appender.kms-audit=org.apache.log4j.DailyRollingFileAppender&#xA;log4j.appender.kms-audit.MaxBackupIndex = {{ranger_kms_audit_log_maxbackupindex}}"/>
+          </definition>
+          <definition xsi:type="configure" id="hdp_2_6_0_0_remove_ranger_kms_duplicate_ssl">
+            <type>ranger-kms-site</type>
+            <transfer operation="delete" delete-key="ranger.https.attrib.keystore.file"
+              if-type="ranger-kms-site" if-key="ranger.service.https.attrib.keystore.file" if-key-state="present"/>
+            <transfer operation="delete" delete-key="ranger.service.https.attrib.clientAuth"
+              if-type="ranger-kms-site" if-key="ranger.service.https.attrib.client.auth" if-key-state="present"/>
+          </definition>
+        </changes>
+      </component>
+    </service>
+
     <service name="HDFS">
       <component name="NAMENODE">
         <changes>
@@ -143,4 +235,6 @@
       </component>
     </service>
   </services>
+
+
 </upgrade-config-changes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/0cb9194f/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
index b66c234..5b8f8d9 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
@@ -25,6 +25,7 @@
   <prerequisite-checks>
     <check>org.apache.ambari.server.checks.ServicesYarnWorkPreservingCheck</check>
     <check>org.apache.ambari.server.checks.JavaVersionCheck</check>
+    <check>org.apache.ambari.server.checks.RangerSSLConfigCheck</check>
     <configuration>
       <!-- Configuration properties for all pre-reqs including required pre-reqs -->
       <check-properties name="org.apache.ambari.server.checks.HiveDynamicServiceDiscoveryCheck">
@@ -117,6 +118,18 @@
           <function>prepare_express_upgrade</function>
         </task>
       </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Backup Ranger Database">
+        <task xsi:type="manual">
+          <message>Before continuing, please backup the Ranger Admin database on the following host(s): {{hosts.all}}.</message>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="RANGER_KMS" component="RANGER_KMS_SERVER" title="Backup Ranger KMS Database">
+        <task xsi:type="manual">
+          <message>Before continuing, please backup Ranger KMS database on the following host(s): {{hosts.all}}.</message>
+        </task>
+      </execute-stage>
     </group>
 
     <group xsi:type="stop" name="STOP_LOW_LEVEL_SERVICE_COMPONENTS" title="Stop Components for Core Services">
@@ -140,6 +153,16 @@
         <component>NFS_GATEWAY</component>
       </service>
 
+      <service name="RANGER">
+        <component>RANGER_USERSYNC</component>
+        <component>RANGER_ADMIN</component>
+        <component>RANGER_TAGSYNC</component>
+      </service>
+
+      <service name="RANGER_KMS">
+        <component>RANGER_KMS_SERVER</component>
+      </service>
+
       <service name="ZOOKEEPER">
         <component>ZOOKEEPER_SERVER</component>
       </service>
@@ -211,6 +234,70 @@
       <execute-stage service="OOZIE" component="OOZIE_SERVER" title="Apply config changes for Oozie server">
         <task xsi:type="configure" id="biginsights_4_2_oozie_server_update_environment_tomcat" />
       </execute-stage>
+
+      <!-- RANGER -->
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger Admin">
+        <task xsi:type="configure" id="hdp_2_5_0_0_remove_audit_db_flag"/>
+      </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger Admin">
+        <task xsi:type="configure" id="hdp_2_5_0_0_remove_audit_db_admin_properties"/>
+      </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger Admin">
+        <task xsi:type="configure" id="hdp_2_5_0_0_remove_audit_db_ranger_admin_site"/>
+      </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger Admin">
+        <task xsi:type="configure" id="hdp_2_5_0_0_remove_sso_property"/>
+      </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger Admin">
+        <task xsi:type="configure" id="hdp_2_5_0_0_set_external_solrCloud_flag"/>
+      </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Calculating Ranger Properties">
+        <condition xsi:type="security" type="kerberos"/>
+        <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.RangerKerberosConfigCalculation">
+          <summary>Calculating Ranger Properties</summary>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Configuring Ranger Alerts">
+        <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.RangerWebAlertConfigAction">
+          <summary>Configuring Ranger Alerts</summary>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_ADMIN" title="Apply config changes for Ranger Admin">
+        <task xsi:type="configure" id="hdp_2_6_0_0_remove_bind_anonymous"/>
+      </execute-stage>
+
+      <execute-stage service="RANGER" component="RANGER_USERSYNC" title="Apply config changes for Ranger Usersync">
+        <task xsi:type="configure" id="hdp_2_6_0_0_disable_delta_sync_during_upgrade"/>
+      </execute-stage>
+
+      <!-- RANGER KMS -->
+      <execute-stage service="RANGER_KMS" component="RANGER_KMS_SERVER" title="Apply config changes for Ranger KMS Server">
+        <task xsi:type="configure" id="hdp_2_5_0_0_remove_ranger_kms_audit_db"/>
+      </execute-stage>
+
+      <execute-stage service="RANGER_KMS" component="RANGER_KMS_SERVER" title="Parameterizing Ranger Kms Log4J Properties">
+        <task xsi:type="configure" id="kms_log4j_parameterize">
+          <summary>Updating the KMS Log4J properties to include parameterizations</summary>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="RANGER_KMS" component="RANGER_KMS_SERVER" title="Calculating Ranger Properties">
+        <condition xsi:type="security" type="kerberos"/>
+        <task xsi:type="server_action" class="org.apache.ambari.server.serveraction.upgrades.RangerKmsProxyConfig">
+          <summary>Adding Ranger proxy user properties</summary>
+        </task>
+      </execute-stage>
+
+      <execute-stage service="RANGER_KMS" component="RANGER_KMS_SERVER" title="Apply config changes for Ranger KMS">
+        <task xsi:type="configure" id="hdp_2_6_0_0_remove_ranger_kms_duplicate_ssl"/>
+      </execute-stage>
     </group>
 
 
@@ -254,6 +341,18 @@
       </service>
     </group>
 
+    <group xsi:type="restart" name="RANGER" title="Ranger">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <supports-auto-skip-failure>false</supports-auto-skip-failure>
+      <parallel-scheduler/>
+      <service name="RANGER">
+        <component>RANGER_ADMIN</component>
+        <component>RANGER_USERSYNC</component>
+        <component>RANGER_TAGSYNC</component>
+      </service>
+    </group>
+
     <group xsi:type="restart" name="HDFS" title="HDFS">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -292,6 +391,16 @@
       </execute-stage>
     </group>
 
+    <group xsi:type="restart" name="RANGER_KMS" title="Ranger KMS">
+      <service-check>false</service-check>
+      <skippable>true</skippable>
+      <supports-auto-skip-failure>false</supports-auto-skip-failure>
+      <parallel-scheduler/>
+      <service name="RANGER_KMS">
+        <component>RANGER_KMS_SERVER</component>
+      </service>
+    </group>
+
     <group xsi:type="restart" name="KAFKA" title="Kafka">
       <service-check>false</service-check>
       <skippable>true</skippable>
@@ -374,6 +483,8 @@
       <skippable>true</skippable>
       <priority>
         <service>ZOOKEEPER</service>
+        <service>RANGER</service>
+        <service>RANGER_KMS</service>
         <service>HDFS</service>
         <service>KAFKA</service>
         <service>YARN</service>
@@ -567,6 +678,61 @@
       </component>
     </service>
 
+    <service name="RANGER">
+      <component name="RANGER_ADMIN">
+        <pre-upgrade>
+
+          <task xsi:type="execute" hosts="all">
+            <script>scripts/ranger_admin.py</script>
+            <function>set_pre_start</function>
+          </task>
+
+          <task xsi:type="execute" hosts="any">
+            <summary>Upgrading Ranger database schema</summary>
+            <script>scripts/ranger_admin.py</script>
+            <function>setup_ranger_database</function>
+          </task>
+
+          <task xsi:type="configure_function" hosts="all" />
+
+          <task xsi:type="execute" hosts="any">
+            <summary>Applying Ranger java patches</summary>
+            <script>scripts/ranger_admin.py</script>
+            <function>setup_ranger_java_patches</function>
+          </task>
+        </pre-upgrade>
+
+        <pre-downgrade copy-upgrade="true" />
+
+        <upgrade>
+          <task xsi:type="restart-task"/>
+        </upgrade>
+
+      </component>
+
+      <component name="RANGER_USERSYNC">
+        <upgrade>
+          <task xsi:type="restart-task"/>
+        </upgrade>
+      </component>
+
+      <component name="RANGER_TAGSYNC">
+
+        <pre-upgrade>
+          <task xsi:type="execute" hosts="all">
+            <script>scripts/ranger_tagsync.py</script>
+            <function>configure_atlas_user_for_tagsync</function>
+          </task>
+        </pre-upgrade>
+
+        <pre-downgrade copy-upgrade="true" />
+
+        <upgrade>
+          <task xsi:type="restart-task"/>
+        </upgrade>
+      </component>
+    </service>
+
     <service name="HDFS">
       <component name="NAMENODE">
         <upgrade>
@@ -605,6 +771,30 @@
       </component>
     </service>
 
+    <service name="RANGER_KMS">
+      <component name="RANGER_KMS_SERVER">
+        <pre-upgrade>
+          <task xsi:type="execute" hosts="any" sequential="true">
+            <summary>Upgrading Ranger KMS database schema</summary>
+            <script>scripts/kms_server.py</script>
+            <function>setup_ranger_kms_database</function>
+          </task>
+        </pre-upgrade>
+
+        <pre-downgrade>
+          <task xsi:type="execute" hosts="any" sequential="true">
+            <summary>Downgrading Ranger KMS database schema</summary>
+            <script>scripts/kms_server.py</script>
+            <function>setup_ranger_kms_database</function>
+          </task>
+        </pre-downgrade>
+
+        <upgrade>
+          <task xsi:type="restart-task"/>
+        </upgrade>
+      </component>
+    </service>
+
     <service name="MAPREDUCE2">
       <component name="HISTORYSERVER">
         <upgrade>


[05/12] ambari git commit: AMBARI-21462. Readd TITAN, R4ML, SYSTEMML, JNBG to BigInsights and fix HBase backup during EU and imports (alejandro)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/params.py
new file mode 100755
index 0000000..3cb7aef
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/params.py
@@ -0,0 +1,128 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+
+# server configurations
+config = Script.get_config()
+tmp_dir = Script.get_tmp_dir()
+
+stack_name = default("/hostLevelParams/stack_name", None)
+
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+iop_stack_version = format_stack_version(stack_version_unformatted)
+
+# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
+version = default("/commandParams/version", None)
+
+titan_user = config['configurations']['titan-env']['titan_user']
+user_group = config['configurations']['cluster-env']['user_group']
+titan_bin_dir = '/usr/iop/current/titan-client/bin'
+
+
+smokeuser = config['configurations']['cluster-env']['smokeuser']
+smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
+
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
+kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
+
+# titan configurations
+titan_conf_dir = "/usr/iop/current/titan-client/conf"
+titan_hbase_solr_props = config['configurations']['titan-hbase-solr']['content']
+titan_env_props = config['configurations']['titan-env']['content']
+log4j_console_props = config['configurations']['titan-log4j']['content']
+
+# not supporting 32 bit jdk.
+java64_home = config['hostLevelParams']['java_home']
+hadoop_config_dir = '/etc/hadoop/conf'
+hbase_config_dir = '/etc/hbase/conf'
+
+# Titan required 'storage.hostname' which is hbase cluster in IOP 4.2.
+# The host name should be zooKeeper quorum
+storage_hosts = config['clusterHostInfo']['zookeeper_hosts']
+storage_host_list = []
+for hostname in storage_hosts:
+  storage_host_list.append(hostname)
+storage_host = ",".join(storage_host_list)
+hbase_zookeeper_parent = config['configurations']['hbase-site']['zookeeper.znode.parent']
+
+# Solr cloud host
+solr_hosts = config['clusterHostInfo']['solr_hosts']
+solr_host_list = []
+for hostname in solr_hosts:
+  solr_host_list.append(hostname)
+solr_host = ",".join(solr_host_list)
+solr_server_host = solr_hosts[0]
+
+# Titan client, it does not work right now, there is no 'titan_host' in 'clusterHostInfo'
+# It will return "Configuration parameter 'titan_host' was not found in configurations dictionary!"
+# So here is a known issue as task 118900, will install titan and solr on same node right now.
+# titan_host = config['clusterHostInfo']['titan_host']
+titan_host = solr_server_host
+
+# Conf directory and jar should be copy to solr site
+titan_dir = format('/usr/iop/current/titan-client')
+titan_ext_dir = format('/usr/iop/current/titan-client/ext')
+titan_solr_conf_dir = format('/usr/iop/current/titan-client/conf/solr')
+titan_solr_jar_file = format('/usr/iop/current/titan-client/lib/jts-1.13.jar')
+
+titan_solr_hdfs_dir = "/apps/titan"
+titan_solr_hdfs_conf_dir = "/apps/titan/conf"
+titan_solr_hdfs_jar = "/apps/titan/jts-1.13.jar"
+titan_tmp_dir = format('{tmp_dir}/titan')
+titan_solr_dir = format('{titan_tmp_dir}/solr_installed')
+configuration_tags = config['configurationTags']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+titan_hdfs_mode = 0775
+
+#for create_hdfs_directory
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+kinit_path_local = get_kinit_path()
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+hdfs_site = config['configurations']['hdfs-site']
+hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', 'missing_principal').replace("_HOST", hostname)
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
+import functools
+#to create hdfs directory we need to call params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user = hdfs_user,
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs
+)
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/service_check.py
new file mode 100755
index 0000000..3c011a1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/service_check.py
@@ -0,0 +1,64 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import os
+from resource_management import *
+from resource_management.core.resources.system import Execute, File
+from resource_management.core.source import InlineTemplate, StaticFile
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.version import compare_versions
+from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.validate import call_and_match_output
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+
+class TitanServiceCheck(Script):
+    pass
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class TitanServiceCheckLinux(TitanServiceCheck):
+    def service_check(self, env):
+        import params
+        env.set_params(params)
+
+        File( format("{tmp_dir}/titanSmoke.groovy"),
+              content = StaticFile("titanSmoke.groovy"),
+              mode = 0755
+              )
+
+        if params.iop_stack_version != "" and compare_versions(params.iop_stack_version, '4.2') >= 0:
+            if params.security_enabled:
+                kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};")
+                Execute(kinit_cmd,
+                        user=params.smokeuser
+                        )
+
+            Execute(format("gremlin {tmp_dir}/titanSmoke.groovy"),
+                    tries     = 3,
+                    try_sleep = 5,
+                    path      = format('{titan_bin_dir}:/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'),
+                    user      = params.smokeuser,
+                    logoutput = True
+                    )
+
+if __name__ == "__main__":
+    # print "Track service check status"
+    TitanServiceCheckLinux().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/titan.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/titan.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/titan.py
new file mode 100755
index 0000000..fd94c82
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/titan.py
@@ -0,0 +1,70 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import os
+from resource_management import *
+from resource_management.core.source import InlineTemplate
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def titan():
+    import params
+
+    Directory(params.titan_conf_dir,
+               create_parents = True,
+               owner=params.titan_user,
+               group=params.user_group
+               )
+
+    File(format("{params.titan_conf_dir}/titan-env.sh"),
+             mode=0644,
+             group=params.user_group,
+             owner=params.titan_user,
+             content=InlineTemplate(params.titan_env_props)
+             )
+
+    # titan-hbase-solr_properties is always set to a default even if it's not in the payload
+    File(format("{params.titan_conf_dir}/titan-hbase-solr.properties"),
+         mode=0644,
+         group=params.user_group,
+         owner=params.titan_user,
+         content=InlineTemplate(params.titan_hbase_solr_props)
+         )
+
+    if (params.log4j_console_props != None):
+        File(format("{params.titan_conf_dir}/log4j-console.properties"),
+             mode=0644,
+             group=params.user_group,
+             owner=params.titan_user,
+             content=InlineTemplate(params.log4j_console_props)
+             )
+    elif (os.path.exists(format("{params.titan_conf_dir}/log4j-console.properties"))):
+        File(format("{params.titan_conf_dir}/log4j-console.properties"),
+             mode=0644,
+             group=params.user_group,
+             owner=params.titan_user
+             )
+    # Change titan ext directory for multiple user access
+    Directory(params.titan_ext_dir,
+               create_parents = True,
+               owner=params.titan_user,
+               group=params.user_group,
+               mode=0775
+               )

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/titan_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/titan_client.py b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/titan_client.py
new file mode 100755
index 0000000..d54ccee
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/services/TITAN/package/scripts/titan_client.py
@@ -0,0 +1,58 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+import os
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from titan import titan
+
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+
+class TitanClient(Script):
+    def configure(self, env):
+        import params
+        env.set_params(params)
+        titan()
+
+    def status(self, env):
+        raise ClientComponentHasNoStatus()
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class TitanClientLinux(TitanClient):
+    def get_component_name(self):
+        return "titan-client"
+
+    def pre_rolling_restart(self, env):
+        import params
+        env.set_params(params)
+
+        if params.version and compare_versions(format_stack_version(params.version), '4.2.0.0') >= 0:
+            conf_select.select(params.stack_name, "titan", params.version)
+            stack_select.select("titan-client", params.version)
+
+    def install(self, env):
+        self.install_packages(env)
+        self.configure(env)
+
+if __name__ == "__main__":
+    TitanClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
index 4867626..b66c234 100644
--- a/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
+++ b/ambari-server/src/main/resources/stacks/BigInsights/4.2/upgrades/nonrolling-upgrade-to-hdp-2.6.xml
@@ -107,7 +107,7 @@
       <execute-stage service="HBASE" component="HBASE_MASTER" title="Snapshot HBASE">
         <task xsi:type="execute" hosts="master">
           <script>scripts/hbase_upgrade.py</script>
-          <function>snapshot</function>
+          <function>take_snapshot</function>
         </task>
       </execute-stage>
 


[07/12] ambari git commit: AMBARI-21462. Readd TITAN, R4ML, SYSTEMML, JNBG to BigInsights and fix HBase backup during EU and imports (alejandro)

Posted by jo...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/py_client_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/py_client_params.py b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/py_client_params.py
new file mode 100755
index 0000000..5dcc8e4
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/py_client_params.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.format import format
+from jkg_toree_params import py_executable, py_venv_pathprefix, py_venv_restrictive, venv_owner, ambarisudo
+import jnbg_helpers as helpers
+
+# Server configurations
+config = Script.get_config()
+stack_root = Script.get_stack_root()
+
+package_dir = helpers.package_dir()
+cmd_file_name = "pythonenv_setup.sh"
+cmd_file_path = format("{package_dir}files/{cmd_file_name}")
+
+# Sequence of commands executed in py_client.py
+commands = [ambarisudo + ' ' +
+            cmd_file_path + ' ' +
+            py_executable + ' ' +
+            py_venv_pathprefix + ' ' +
+            venv_owner]

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/service_check.py
new file mode 100755
index 0000000..d4d5f42
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/service_check.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.format import format
+from resource_management.core.resources.system import Execute
+
+class JupyterKernelGatewayServiceCheck(Script):
+    def service_check(self, env):
+        import jkg_toree_params as params
+        env.set_params(params)
+
+        if params.security_enabled:
+          jnbg_kinit_cmd = format("{kinit_path_local} -kt {jnbg_kerberos_keytab} {jnbg_kerberos_principal}; ")
+          Execute(jnbg_kinit_cmd, user=params.user)
+
+        scheme = "https" if params.ui_ssl_enabled else "http"
+        Execute(format("curl -s -o /dev/null -w'%{{http_code}}' --negotiate -u: -k {scheme}://{jkg_host}:{jkg_port}/api/kernelspecs | grep 200"),
+                tries = 10,
+                try_sleep=3,
+                logoutput=True)
+        Execute(format("curl -s --negotiate -u: -k {scheme}://{jkg_host}:{jkg_port}/api/kernelspecs | grep Scala"),
+                tries = 10,
+                try_sleep=3,
+                logoutput=True)
+
+if __name__ == "__main__":
+    JupyterKernelGatewayServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/status_params.py b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/status_params.py
new file mode 100755
index 0000000..78d7a8a
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/JNBG/0.2.0/package/scripts/status_params.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.script.script import Script
+from resource_management.libraries.functions.format import format
+
+config = Script.get_config()
+jkg_pid_dir = config['configurations']['jnbg-env']['jkg_pid_dir_prefix']
+jkg_pid_file = format("{jkg_pid_dir}/jupyter_kernel_gateway.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/R4ML/0.8.0/configuration/r4ml-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/R4ML/0.8.0/configuration/r4ml-env.xml b/ambari-server/src/main/resources/common-services/R4ML/0.8.0/configuration/r4ml-env.xml
new file mode 100755
index 0000000..25261a3
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/R4ML/0.8.0/configuration/r4ml-env.xml
@@ -0,0 +1,48 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="true">
+  <!-- Local/offline install R baseurl -->
+  <property>
+    <name>Baseurl for local install of R and R packages dependencies</name>
+    <description>The baseurl of the repository for R and R packages. This is only needed during local or offline install.</description>
+    <value>http://</value>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <!-- .Renviron -->
+  <property>
+    <name>Renviron</name>
+    <description>This is the jinja template for Renviron file</description>
+    <value>
+SPARK_HOME={{spark_home}}
+R4ML_SPARK_DRIVER_MEMORY={{spark_driver_memory}}
+SPARKR_SUBMIT_ARGS={{spark_submit_args}}
+R4ML_YARN_KEYTAB=
+R4ML_YARN_PRINCIPAL=
+R4ML_SYSML_JAR={{systemml_jar}}
+R_LIBS={{spark_home}}/R/lib:{{r4ml_home}}/R/lib
+    </value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/R4ML/0.8.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/R4ML/0.8.0/metainfo.xml b/ambari-server/src/main/resources/common-services/R4ML/0.8.0/metainfo.xml
new file mode 100755
index 0000000..214d577
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/R4ML/0.8.0/metainfo.xml
@@ -0,0 +1,92 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>R4ML</name>
+      <displayName>R4ML</displayName>
+      <comment>A scalable, hybrid approach to ML/Stats using R, SystemML and Apache Spark.</comment>
+      <version>0.8.0</version>
+      <components>
+        <component>
+          <name>R4ML</name>
+          <displayName>R4ML</displayName>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>SPARK2/SPARK2_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>SYSTEMML/SYSTEMML</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/r4ml_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <configFiles>
+          </configFiles>          
+        </component>
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>r4ml_4_3_*</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <requiredServices>
+        <service>SPARK2</service>
+        <service>SYSTEMML</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+      </configuration-dependencies>
+
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/files/Install.R
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/files/Install.R b/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/files/Install.R
new file mode 100755
index 0000000..b7c9d1c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/files/Install.R
@@ -0,0 +1,25 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+args <- commandArgs(trailingOnly = TRUE)
+options(repos=c("http://cran.rstudio.com"))
+tryCatch({
+  if (suppressWarnings(!require(args[1], character.only=T))) install.packages(args[1])
+},
+warning = function(w) {print(w); ifelse(grepl("unable to resolve", w) || grepl("non-zero exit status", w), quit(save="no", status=1), quit(save="no", status=0))},
+error = function(e) quit(save="no", status=2))
+quit(save="no", status=0)

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/files/ServiceCheck.R
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/files/ServiceCheck.R b/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/files/ServiceCheck.R
new file mode 100755
index 0000000..c5ab359
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/files/ServiceCheck.R
@@ -0,0 +1,28 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+tryCatch({
+  lib_loc <- file.path("/usr/iop/current/r4ml-client" , "R", "lib")
+  .libPaths(c(lib_loc, .libPaths()))
+  lib_loc <- file.path("/usr/iop/current/spark2-client" , "R", "lib")
+  .libPaths(c(lib_loc, .libPaths()))
+  library(R4ML)
+  r4ml.session()
+  r4ml.session.stop()
+}, warnings =  function(w) ifelse(grepl("validateTransformOptions", w), quit(save="no", status=0), quit(save="no", status=1)),
+error = function(e) {print(e); quit(save="no", status=2)})
+quit(save="no", status=0)

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/files/localr.repo
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/files/localr.repo b/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/files/localr.repo
new file mode 100755
index 0000000..d9117db
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/files/localr.repo
@@ -0,0 +1,22 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+[RREPO]
+name=RREPO
+baseurl=URLXXXX
+enabled=1
+gpgcheck=0
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/scripts/__init__.py b/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/scripts/__init__.py
new file mode 100755
index 0000000..5561e10
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/scripts/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/scripts/params.py
new file mode 100755
index 0000000..f23c3b1
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/scripts/params.py
@@ -0,0 +1,80 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management import *
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.libraries.script.script import Script
+import os
+
+# temp directory
+exec_tmp_dir = Script.get_tmp_dir()
+
+# server configurations
+config = Script.get_config()
+stack_root = Script.get_stack_root()
+
+r4ml_home = format("{stack_root}/current/r4ml-client")
+
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+stack_version = format_stack_version(stack_version_unformatted)
+
+# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
+version = default("/commandParams/version", None)
+stack_name = default("/hostLevelParams/stack_name", None)
+
+java_home = config['hostLevelParams']['java_home']
+r4ml_conf_dir = "/etc/r4ml/conf"
+if stack_version and check_stack_feature(StackFeature.ROLLING_UPGRADE, stack_version):
+  r4ml_conf_dir = format("{stack_root}/current/r4ml-client/conf")
+
+# environment variables
+spark_home = os.path.join(stack_root, "current", 'spark2-client')
+spark_driver_memory = "4G"
+spark_submit_args = "--num-executors 4 sparkr-shell"
+r4ml_auto_start = 0
+Renviron_template = config['configurations']['r4ml-env']['Renviron']
+
+# rpm links
+epel = ""
+centos = ""
+if System.get_instance().os_family == "redhat" :
+  if System.get_instance().os_major_version == "7" :
+    epel = "https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm"
+    if System.get_instance().machine == "x86_64" :
+      centos = "http://mirror.centos.org/centos/7/os/x86_64/Packages/"
+  else :
+    epel = "https://dl.fedoraproject.org/pub/epel/epel-release-latest-6.noarch.rpm"
+    if System.get_instance().machine == "x86_64" :
+      centos = "http://mirror.centos.org/centos/6/os/x86_64/Packages/"
+
+# local R and R packages baseurl
+baseurl = config['configurations']['r4ml-env']['Baseurl for local install of R and R packages dependencies']
+rrepo = "/etc/yum.repos.d/localr.repo"
+
+# systemml jar path
+systemml_jar = os.path.join(stack_root, "current", "systemml-client", "lib", "systemml.jar")
+if not os.path.isfile(systemml_jar) or not os.access(systemml_jar, os.R_OK) :
+  systemml_jar = ""
+
+smokeuser = config['configurations']['cluster-env']['smokeuser']
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/scripts/r4ml_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/scripts/r4ml_client.py b/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/scripts/r4ml_client.py
new file mode 100755
index 0000000..3dbce5c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/scripts/r4ml_client.py
@@ -0,0 +1,201 @@
+#!/usr/bin/python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import subprocess
+from resource_management import *
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.functions import StackFeature
+from resource_management.libraries.functions.stack_features import check_stack_feature
+from resource_management.core.exceptions import ClientComponentHasNoStatus
+from resource_management.core.logger import Logger
+
+class R4MLClient(Script):
+
+  def configure(selfself, env):
+    import params
+    env.set_params(params)
+
+  def get_component_name(self):
+    return "r4ml-client"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      Logger.info("Executing R4ML Client Stack Upgrade pre-restart")
+      conf_select.select(params.stack_name, "r4ml", params.version)
+      stack_select.select("r4ml-client", params.version)
+
+  def stack_upgrade_save_new_config(self, env):
+    import params
+    env.set_params(params)
+
+    conf_select_name = "r4ml"
+    base_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+    config_dir = self.get_config_dir_during_stack_upgrade(env, base_dir, conf_select_name)
+
+    if config_dir:
+      Logger.info("stack_upgrade_save_new_config(): Calling conf-select on %s using version %s" % (conf_select_name, str(params.version)))
+
+      # Because this script was called from ru_execute_tasks.py which already enters an Environment with its own basedir,
+      # must change it now so this function can find the Jinja Templates for the service.
+      env.config.basedir = base_dir
+      conf_select.select(params.stack_name, conf_select_name, params.version)
+      self.configure(env, config_dir=config_dir)
+
+  def checkPackage(self, packages):
+    try :
+      checked_call("sudo yum list " + packages)
+    except Exception as e:
+      # ignore
+      print e
+      return 1
+    return 0
+
+  def setupEpelRepo(self, params):
+    epel_installed = False
+    import urllib
+    code = 0
+    try :
+      code = subprocess.call(["sudo", "which", "R"])
+    except Exception as e :
+      Logger.error(str(e))
+    if code != 0 :
+      # try to set up R repo
+      code = self.checkPackage("R")
+      if code != 0 :
+        # R does not exist in any repo
+        code = self.checkPackage("epel-release")
+        if code != 0 :
+          if params.epel != "" :
+            # proceed to install EPEL
+            try :
+              urllib.urlretrieve(params.epel, "/tmp/epel.rpm")
+              Execute(("yum", "install", "/tmp/epel.rpm", "-y"), sudo=True)
+              epel_installed = True
+            except Exception as e :
+              Logger.error(str(e))
+              # it is ok to fail to download as it can be an offline install case
+        else :
+          Execute(("yum", "install", "epel-release", "-y"), sudo=True)
+          epel_installed = True
+
+      # check another two dependencies
+      code = self.checkPackage("texinfo-tex texlive-epsf")
+      if code != 0 :
+        # download from centos mirror
+        if params.centos != "" :
+          try :
+            import re
+            urllib.urlretrieve(params.centos, "/tmp/index")
+            s = open("/tmp/index", "r").read()
+            tex = re.search('texinfo-tex(.+)rpm(?=\")', s).group(0)
+            epsf = re.search('texlive-epsf-svn(.+)rpm(?=\")', s).group(0)
+            urllib.urlretrieve(params.centos + tex, "/tmp/tex.rpm")
+            urllib.urlretrieve(params.centos + epsf, "/tmp/epsf.rpm")
+            Execute(("yum", "install", "/tmp/epsf.rpm", "/tmp/tex.rpm", "-y"), sudo=True)
+          except Exception as e :
+            Logger.error(str(e))
+        else :
+          Logger.error("Dependent packages texinfo-tex and texlive-epsf are not found in any repos. Enable RedHat Optional Packages repo or install these two packages manually before retry.")
+          exit(1)
+      # install R now
+      Execute(("yum", "install", "R", "-y"), sudo=True)
+    return epel_installed
+
+  def setupRrepo(self, params):
+    import re
+    if params.baseurl != "http://" :
+      # assume this is a local install
+      File(format(params.rrepo),
+           action="delete")
+
+      File(format(params.rrepo),
+           content = StaticFile("localr.repo"),
+           mode = 0644)
+      Execute(("sed", "-i", "s/URLXXXX/" + re.sub('\$', '\$', re.sub('/', '\/', params.baseurl)) + "/g ", params.rrepo),
+              sudo=True)
+      Logger.info("Local install R from %s." %params.baseurl)
+      # install R now
+      Execute(("yum", "install", "R", "-y"), sudo=True)
+      return False
+    else :
+      return self.setupEpelRepo(params)
+
+  def install(self, env):
+    import params
+    env.set_params(params)
+
+    # set up R repo
+    epel_installed = self.setupRrepo(params)
+
+    # install R and R4ML
+    self.install_packages(env)
+
+    # remove epel-release repo installed above as R has been installed
+    if epel_installed :
+      Execute(("yum", "remove", "epel-release", "-y"), sudo=True)
+    else :
+      if (os.path.exists(params.rrepo)) :
+        File(format(params.rrepo),
+             action="delete")
+
+    # install several R packages that will be used by R4ML functions
+    installR = params.exec_tmp_dir + "/Install.R"
+    File(format(installR),
+         content = StaticFile("Install.R"),
+         mode = 0755)
+
+    if (params.baseurl != "http://"):
+      import re
+      Execute(("sed", "-i", "s/repos=c(.*/repos=c(\"" + re.sub('\$', '\$', re.sub('/', '\/', params.baseurl)) + "\"))/g", installR), sudo=True)
+
+    # install the dependent packages
+    packages = ["R6", "uuid", "survival"]
+    for pkg in packages :
+      Execute(("Rscript", installR, pkg), sudo=True)
+
+    # set up configuration file
+    Directory(params.r4ml_conf_dir,
+              create_parents=True,
+              action="create",
+              mode=0755)
+
+    File(format("{r4ml_conf_dir}/Renviron"),
+         mode=0755,
+         content = InlineTemplate(params.Renviron_template))
+
+    # install R4ML package to /usr/iop/current/r4ml-client/R/lib directory
+    Directory(format(params.r4ml_home + "/R/lib"),
+              action="create",
+              create_parents=True,
+              mode=0755)
+
+    checked_call(format("sudo R_LIBS={spark_home}/R/lib R CMD INSTALL --install-tests --library={r4ml_home}/R/lib {r4ml_home}/R4ML_*.tar.gz"))
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+  R4MLClient().execute()
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/scripts/service_check.py
new file mode 100755
index 0000000..2acb4d2
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/R4ML/0.8.0/package/scripts/service_check.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.libraries.functions.format import format
+import subprocess
+import os
+
+class R4MLServiceCheck(Script):
+    def service_check(self, env):
+        import params
+        env.set_params(params)
+
+        # generate the service check file
+        scR = os.path.join(params.exec_tmp_dir, "ServiceCheck.R")
+        File(format(scR),
+             content = StaticFile("ServiceCheck.R"),
+             mode = 0755)
+
+        Execute(("Rscript", scR),
+                tries=120,
+                try_sleep=20,
+                path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+                logoutput=True,
+                user=params.smokeuser)
+
+if __name__ == "__main__":
+    R4MLServiceCheck().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/SYSTEMML/0.10.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SYSTEMML/0.10.0/metainfo.xml b/ambari-server/src/main/resources/common-services/SYSTEMML/0.10.0/metainfo.xml
new file mode 100755
index 0000000..ecd503f
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SYSTEMML/0.10.0/metainfo.xml
@@ -0,0 +1,77 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>SYSTEMML</name>
+      <displayName>SystemML</displayName>
+      <comment>Apache SystemML is a distributed and declarative machine learning platform.</comment>
+      <version>0.10.0</version>
+      <components>
+        <component>
+          <name>SYSTEMML</name>
+          <displayName>SystemML</displayName>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/systemml_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <configFiles>
+          </configFiles>          
+        </component>
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>apache_systemml*</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <requiredServices>
+        <service>HDFS</service>
+      </requiredServices>
+
+      <configuration-dependencies>
+      </configuration-dependencies>
+
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/SYSTEMML/0.10.0/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SYSTEMML/0.10.0/package/scripts/__init__.py b/ambari-server/src/main/resources/common-services/SYSTEMML/0.10.0/package/scripts/__init__.py
new file mode 100755
index 0000000..5561e10
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SYSTEMML/0.10.0/package/scripts/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/SYSTEMML/0.10.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SYSTEMML/0.10.0/package/scripts/params.py b/ambari-server/src/main/resources/common-services/SYSTEMML/0.10.0/package/scripts/params.py
new file mode 100755
index 0000000..dd7e46c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SYSTEMML/0.10.0/package/scripts/params.py
@@ -0,0 +1,40 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.format import format
+from resource_management.libraries.functions.version import format_stack_version
+from resource_management.libraries.functions import stack_select
+from resource_management.libraries.script.script import Script
+
+# server configurations
+config = Script.get_config()
+stack_root = Script.get_stack_root()
+
+systemml_home_dir = format("{stack_root}/current/systemml-client")
+systemml_lib_dir = format("{systemml_home_dir}/lib")
+systemml_scripts_dir = format("{systemml_home_dir}/scripts")
+
+stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
+stack_version = format_stack_version(stack_version_unformatted)
+
+# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
+version = default("/commandParams/version", None)
+stack_name = default("/hostLevelParams/stack_name", None)
+
+java_home = config['hostLevelParams']['java_home']

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/SYSTEMML/0.10.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SYSTEMML/0.10.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/SYSTEMML/0.10.0/package/scripts/service_check.py
new file mode 100755
index 0000000..c15b907
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SYSTEMML/0.10.0/package/scripts/service_check.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.libraries.functions.format import format
+import subprocess
+import os
+
+class SystemMLServiceCheck(Script):
+    def service_check(self, env):
+        import params
+        env.set_params(params)
+        
+        if os.path.exists(params.systemml_lib_dir):
+            cp = format("{params.stack_root}/current/hadoop-client/*:{params.stack_root}/current/hadoop-mapreduce-client/*:{params.stack_root}/current/hadoop-client/lib/*:{params.systemml_lib_dir}/systemml.jar")
+            java = format("{params.java_home}/bin/java")
+            command = [java, "-cp", cp, "org.apache.sysml.api.DMLScript", "-s", "print('Apache SystemML');"]
+            process = subprocess.Popen(command, stdout=subprocess.PIPE)
+            output = process.communicate()[0]
+            print output
+        
+            if 'Apache SystemML' not in output:
+                raise Fail("Expected output Apache SystemML not found.")
+
+if __name__ == "__main__":
+    SystemMLServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/SYSTEMML/0.10.0/package/scripts/systemml_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/SYSTEMML/0.10.0/package/scripts/systemml_client.py b/ambari-server/src/main/resources/common-services/SYSTEMML/0.10.0/package/scripts/systemml_client.py
new file mode 100755
index 0000000..2d45b68
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/SYSTEMML/0.10.0/package/scripts/systemml_client.py
@@ -0,0 +1,49 @@
+#!/usr/bin/python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+#from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import stack_select
+
+
+class SystemMLClient(Script):
+
+  def get_component_name(self):
+    return "systemml-client"
+
+  def pre_upgrade_restart(self, env, upgrade_type=None):
+    import params
+
+    env.set_params(params)
+    if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
+      #conf_select.select(params.stack_name, "systemml", params.version)
+      stack_select.select("systemml-client", params.version)
+
+  def install(self, env):
+    self.install_packages(env)
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+
+if __name__ == "__main__":
+  SystemMLClient().execute()
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/alerts.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/alerts.json b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/alerts.json
new file mode 100755
index 0000000..5afab9c
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/alerts.json
@@ -0,0 +1,33 @@
+ {
+  "TITAN": {
+    "TITAN_SERVER": [
+      {
+        "name": "titan",
+        "label": "titan server",
+        "description": "This host-level alert is triggered if the Titan Server Instance is unreachable.",
+        "interval": 1,
+        "scope": "ANY",
+        "source": {
+          "type": "SCRIPT",
+	  "path": "TITAN/1.0.0/package/alerts/alert_check_titan_server.py",
+	  "parameters": [
+            {
+              "name": "titan.run.dir",
+              "display_name": "Run Directory",
+              "value": "/var/run/titan",
+              "type": "STRING",
+              "description": "The directory where titan server processe place its PID files."
+            },
+	    {
+              "name": "titan.user",
+              "display_name": "Titan User",
+              "value": "titan",
+              "type": "STRING",
+              "description": "User who starts the Titan Server process"
+            }
+          ]
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/gremlin-server.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/gremlin-server.xml b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/gremlin-server.xml
new file mode 100755
index 0000000..1b33e6a
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/gremlin-server.xml
@@ -0,0 +1,85 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_final="false" supports_adding_forbidden="true">
+
+  <property>
+    <name>content</name>
+    <description>Configuration for Titan Server</description>
+    <value>
+host: {{titan_host}}
+port: {{titan_server_port}}
+threadPoolWorker: 1
+gremlinPool: 8
+scriptEvaluationTimeout: 300000
+serializedResponseTimeout: 300000
+channelizer: org.apache.tinkerpop.gremlin.server.channel.HttpChannelizer
+graphs: {
+  graph: conf/titan-hbase-solr.properties,
+  graphSpark: conf/hadoop-graph/hadoop-gryo.properties}
+plugins:
+  - aurelius.titan
+  - tinkerpop.spark
+  - tinkerpop.hadoop
+  - tinkerpop.tinkergraph
+scriptEngines: {
+  gremlin-groovy: {
+    imports: [java.lang.Math],
+    staticImports: [java.lang.Math.PI],
+    scripts: [scripts/empty-sample.groovy]},
+  nashorn: {
+      imports: [java.lang.Math],
+      staticImports: [java.lang.Math.PI]}}
+serializers:
+  - { className: org.apache.tinkerpop.gremlin.driver.ser.GryoMessageSerializerV1d0, config: { useMapperFromGraph: graph }}
+  - { className: org.apache.tinkerpop.gremlin.driver.ser.GryoMessageSerializerV1d0, config: { serializeResultToString: true }}
+  - { className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerGremlinV1d0, config: { useMapperFromGraph: graph }}
+  - { className: org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerV1d0, config: { useMapperFromGraph: graph }}
+processors:
+  - { className: org.apache.tinkerpop.gremlin.server.op.session.SessionOpProcessor, config: { sessionTimeout: 28800000 }}
+metrics: {
+  consoleReporter: {enabled: true, interval: 180000},
+  csvReporter: {enabled: true, interval: 180000, fileName: /tmp/gremlin-server-metrics.csv},
+  jmxReporter: {enabled: true},
+  slf4jReporter: {enabled: true, interval: 180000},
+  gangliaReporter: {enabled: false, interval: 180000, addressingMode: MULTICAST},
+  graphiteReporter: {enabled: false, interval: 180000}}
+threadPoolBoss: 1
+maxInitialLineLength: 4096
+maxHeaderSize: 8192
+maxChunkSize: 8192
+maxContentLength: 65536
+maxAccumulationBufferComponents: 1024
+resultIterationBatchSize: 64
+writeBufferHighWaterMark: 32768
+writeBufferHighWaterMark: 65536
+ssl: {
+  enabled: {{titan_server_ssl}}{{titan_server_ssl_key_file}}{{titan_server_ssl_key_password}}{{titan_server_ssl_key_cert_file}}
+}
+{{titan_server_simple_authenticator}}
+    </value>
+    <value-attributes>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/hadoop-gryo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/hadoop-gryo.xml b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/hadoop-gryo.xml
new file mode 100755
index 0000000..677fa2d
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/hadoop-gryo.xml
@@ -0,0 +1,94 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false" supports_adding_forbidden="true">
+
+  <property>
+    <name>content</name>
+    <description>Configuration of hadoop-gryo.properties</description>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+gremlin.graph=org.apache.tinkerpop.gremlin.hadoop.structure.HadoopGraph
+gremlin.hadoop.graphInputFormat=org.apache.tinkerpop.gremlin.hadoop.structure.io.gryo.GryoInputFormat
+gremlin.hadoop.graphOutputFormat=org.apache.tinkerpop.gremlin.hadoop.structure.io.gryo.GryoOutputFormat
+gremlin.hadoop.jarsInDistributedCache=true
+
+gremlin.hadoop.inputLocation=data/tinkerpop-modern.kryo
+gremlin.hadoop.outputLocation=output
+
+#####################################
+# GiraphGraphComputer Configuration #
+#####################################
+giraph.minWorkers=2
+giraph.maxWorkers=2
+giraph.useOutOfCoreGraph=true
+giraph.useOutOfCoreMessages=true
+mapred.map.child.java.opts=-Xmx1024m
+mapred.reduce.child.java.opts=-Xmx1024m
+giraph.numInputThreads=4
+giraph.numComputeThreads=4
+# giraph.maxPartitionsInMemory=1
+# giraph.userPartitionCount=2
+
+####################################
+# SparkGraphComputer Configuration #
+####################################
+spark.master=yarn
+spark.submit.deployMode=client
+spark.yarn.jars={{default_fs}}/user/spark/share/lib/spark/*.jar
+
+# the Spark YARN ApplicationManager needs this to resolve classpath it sends to the executors
+spark.yarn.appMasterEnv.JAVA_HOME={{java64_home}}
+spark.yarn.appMasterEnv.HADOOP_CONF_DIR={{hadoop_config_dir}}
+spark.yarn.appMasterEnv.SPARK_CONF_DIR={{spark_config_dir}}
+spark.yarn.am.extraJavaOptions=-Diop.version={{full_stack_version}} -Djava.library.path={{hadoop_lib_native_dir}}
+
+# the Spark Executors (on the work nodes) needs this to resolve classpath to run Spark tasks
+spark.executorEnv.JAVA_HOME={{java64_home}}
+spark.executorEnv.HADOOP_CONF_DIR={{hadoop_config_dir}}
+spark.executorEnv.SPARK_CONF_DIR={{spark_config_dir}}
+spark.executor.memory=2g
+spark.executor.extraClassPath={{hbase_config_dir}}
+spark.serializer=org.apache.spark.serializer.KryoSerializer
+    </value>
+    <value-attributes>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/hadoop-hbase-read.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/hadoop-hbase-read.xml b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/hadoop-hbase-read.xml
new file mode 100755
index 0000000..10b1d99
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/hadoop-hbase-read.xml
@@ -0,0 +1,102 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false" supports_adding_forbidden="true">
+
+  <property>
+    <name>content</name>
+    <description>Configuration of hadoop-hbase-read.properties</description>
+    <value>
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+gremlin.graph=org.apache.tinkerpop.gremlin.hadoop.structure.HadoopGraph
+gremlin.hadoop.graphInputFormat=com.thinkaurelius.titan.hadoop.formats.hbase.HBaseInputFormat
+gremlin.hadoop.graphOutputFormat=org.apache.hadoop.mapreduce.lib.output.NullOutputFormat
+gremlin.hadoop.jarsInDistributedCache=true
+gremlin.hadoop.deriveMemory=false
+gremlin.hadoop.memoryOutputFormat=org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat
+
+gremlin.hadoop.inputLocation=none
+gremlin.hadoop.outputLocation=output
+
+titanmr.ioformat.conf.storage.backend=hbase
+titanmr.ioformat.conf.storage.hbase.table=titan_solr
+titanmr.ioformat.conf.storage.hostname={{storage_host}}
+zookeeper.znode.parent={{hbase_zookeeper_parent}}
+
+#####################################
+# GiraphGraphComputer Configuration #
+#####################################
+giraph.minWorkers=2
+giraph.maxWorkers=2
+giraph.useOutOfCoreGraph=true
+giraph.useOutOfCoreMessages=true
+mapred.map.child.java.opts=-Xmx1024m
+mapred.reduce.child.java.opts=-Xmx1024m
+giraph.numInputThreads=4
+giraph.numComputeThreads=4
+# giraph.maxPartitionsInMemory=1
+# giraph.userPartitionCount=2
+
+####################################
+# SparkGraphComputer Configuration #
+####################################
+spark.master=yarn
+spark.submit.deployMode=client
+spark.yarn.jars={{default_fs}}/user/spark/share/lib/spark/*.jar
+
+# the Spark YARN ApplicationManager needs this to resolve classpath it sends to the executors
+spark.yarn.appMasterEnv.JAVA_HOME={{java64_home}}
+spark.yarn.appMasterEnv.HADOOP_CONF_DIR={{hadoop_config_dir}}
+spark.yarn.appMasterEnv.SPARK_CONF_DIR={{spark_config_dir}}
+spark.yarn.am.extraJavaOptions=-Diop.version={{full_stack_version}} -Djava.library.path={{hadoop_lib_native_dir}}
+
+# the Spark Executors (on the work nodes) needs this to resolve classpath to run Spark tasks
+spark.executorEnv.JAVA_HOME={{java64_home}}
+spark.executorEnv.HADOOP_CONF_DIR={{hadoop_config_dir}}
+spark.executorEnv.SPARK_CONF_DIR={{spark_config_dir}}
+spark.executor.memory=2g
+spark.executor.extraClassPath={{hbase_config_dir}}
+
+spark.serializer=org.apache.spark.serializer.KryoSerializer
+    </value>
+    <value-attributes>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/titan-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/titan-env.xml b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/titan-env.xml
new file mode 100755
index 0000000..9611764
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/titan-env.xml
@@ -0,0 +1,157 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>titan_user</name>
+    <display-name>Titan User</display-name>
+    <description>User to run Titan as</description>
+    <property-type>USER</property-type>
+    <value>titan</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>titan_pid_dir</name>
+    <value>/var/run/titan</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>titan_log_dir</name>
+    <value>/var/log/titan</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>titan_server_port</name>
+    <value>8182</value>
+    <description>Sets the port Titan Server binds to, default is 8182</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+
+  <property>
+    <name>titan_hdfs_home_dir</name>
+    <value>/apps/titan/data</value>
+    <description>A root location in HDFS for Titan to write collection data to. Rather than specifying an HDFS location for the data directory or update log directory, use this to specify one root location and have everything automatically created within this HDFS</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>SimpleAuthenticator</name>
+    <value>false</value>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <description>Set whether to enable SimpleAuthenticator(PLAIN SASL), the credentials database is located at /usr/iop/current/titan-client/data/credentials.kryo. Note: this option will not take effect if Knox is installed.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ssl.enabled</name>
+    <value>false</value>
+    <value-attributes>
+      <type>boolean</type>
+    </value-attributes>
+    <description>Set whether to enable ssl</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ssl.keyCertChainFile</name>
+    <value/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <description>The X.509 certificate chain file in PEM format. If this value is not present and ssl.enabled is true a self-signed certificate will be used (not suitable for production).</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ssl.keyFile</name>
+    <value/>
+    <value-attributes>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <description>The PKCS#8 private key file in PEM format. If this value is not present and ssl.enabled is true a self-signed certificate will be used (not suitable for production).</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>ssl.keyPassword</name>
+    <value/>
+    <property-type>PASSWORD</property-type>
+    <value-attributes>
+      <type>password</type>
+      <empty-value-valid>true</empty-value-valid>
+    </value-attributes>
+    <description>The password of the keyFile if it's not password-protected.</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>content</name>
+    <display-name>titan-env template</display-name>
+    <description>This is the template for titan-env.sh file</description>
+    <on-ambari-upgrade add="false"/>
+    <value>
+# Set JAVA HOME
+export JAVA_HOME={{java64_home}}
+
+# Add hadoop configuration directory into classpath
+export HADOOP_CONF_DIR={{hadoop_config_dir}}
+
+
+# Setup the environment for SparkGraphComputer
+# Add yarn and spark lib and config into classpath for SparkGraphComputer.
+export YARN_HOME={{yarn_home_dir}}
+export YARN_CONF_DIR=$HADOOP_CONF_DIR
+export SPARK_HOME={{spark_home_dir}}
+export SPARK_CONF_DIR={{spark_config_dir}}
+export TITAN_HOME={{titan_home_dir}}
+export CLASSPATH=$CLASSPATH:$HADOOP_CONF_DIR:$SPARK_CONF_DIR:$TITAN_HOME/conf
+
+#add hbase configuration directory into classpath
+if ([ -d "{{hbase_config_dir}}" ]); then
+   export HBASE_CONF_DIR={{hbase_config_dir}}
+   export CLASSPATH=$CLASSPATH:$HBASE_CONF_DIR
+fi
+
+if ([[ ! -d "{{titan_ext_spark_plugin_dir}}" ]] &amp;&amp; [[ -d "$SPARK_HOME/jars" ]]); then
+  for jar in $SPARK_HOME/jars/*.jar; do
+    if ([[ $jar != */guava*.jar ]] &amp;&amp; [[ $jar != */slf4j-log4j12*.jar ]] &amp;&amp; [[ $jar != */spark-core*.jar ]]) ;then
+      CLASSPATH=$CLASSPATH:$jar
+    fi
+  done
+fi
+
+export CLASSPATH
+
+# Add iop.version and native lib in java opt for hadoop config.
+export IOP_JAVA_OPTIONS="$JAVA_OPTIONS -D{{platform_name}}.version={{full_stack_version}} -Djava.library.path={{hadoop_lib_native_dir}}"
+
+{% if security_enabled -%}
+export JVMFLAGS="-Djava.security.auth.login.config={{titan_solr_client_jaas_file}}"
+export IOP_JAVA_OPTIONS="$IOP_JAVA_OPTIONS $JVMFLAGS"
+{% endif %}
+
+source "$HADOOP_CONF_DIR"/hadoop-env.sh
+export HADOOP_GREMLIN_LIBS=$TITAN_HOME/lib
+export TITAN_LOGDIR={{titan_log_dir}}
+    </value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/titan-hbase-solr.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/titan-hbase-solr.xml b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/titan-hbase-solr.xml
new file mode 100755
index 0000000..49e3f80
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/titan-hbase-solr.xml
@@ -0,0 +1,69 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false" supports_adding_forbidden="true">
+
+  <property>
+    <name>content</name>
+    <description>Describe the configurations for Solr</description>
+    <on-ambari-upgrade add="false"/>
+    <value># Titan configuration sample: HBase and Solr
+# ATTENTION: If you would like to use this property, do manually execute titan-solr-connection.sh before build index.
+
+# This file connects to HBase using a Zookeeper quorum
+# (storage.hostname) consisting solely of localhost. It also
+# connects to Solr running on localhost using Solr's HTTP API.
+# Zookeeper, the HBase services, and Solr must already be
+# running and available before starting Titan with this file.
+gremlin.graph=com.thinkaurelius.titan.core.TitanFactory
+storage.backend=hbase
+storage.hostname={{storage_host}}
+storage.hbase.table=titan_solr
+storage.hbase.ext.zookeeper.znode.parent={{hbase_zookeeper_parent}}
+
+cache.db-cache = true
+cache.db-cache-clean-wait = 20
+cache.db-cache-time = 180000
+cache.db-cache-size = 0.5
+
+# The indexing backend used to extend and optimize Titan's query
+# functionality. This setting is optional. Titan can use multiple
+# heterogeneous index backends. Hence, this option can appear more than
+# once, so long as the user-defined name between "index" and "backend" is
+# unique among appearances.Similar to the storage backend, this should be
+# set to one of Titan's built-in shorthand names for its standard index
+# backends (shorthands: lucene, elasticsearch, es, solr) or to the full
+# package and classname of a custom/third-party IndexProvider
+# implementation.
+
+index.search.backend=solr
+index.search.solr.mode=cloud
+index.search.solr.zookeeper-url={{zookeeper_solr_for_titan_hostname}}
+index.search.solr.configset=titan
+{{titan_solr_client_jaas_config}}
+    </value>
+    <value-attributes>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/titan-log4j.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/titan-log4j.xml b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/titan-log4j.xml
new file mode 100755
index 0000000..c32a9e4
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/configuration/titan-log4j.xml
@@ -0,0 +1,65 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="false">
+
+  <property>
+    <name>content</name>
+    <description>Custom log4j-console.properties</description>
+    <on-ambari-upgrade add="false"/>
+    <value>
+      # Used by gremlin.sh
+
+      log4j.appender.A2=org.apache.log4j.ConsoleAppender
+      log4j.appender.A2.Threshold=TRACE
+      log4j.appender.A2.layout=org.apache.log4j.PatternLayout
+      log4j.appender.A2.layout.ConversionPattern=%d{HH:mm:ss} %-5p %c %x - %m%n
+      log4j.rootLogger=${gremlin.log4j.level}, A2
+
+      #log4j.logger.com.thinkaurelius.titan.graphdb.database.idassigner.placement=DEBUG
+      #log4j.logger.com.thinkaurelius.titan.diskstorage.hbase.HBaseStoreManager=DEBUG
+
+      # Disable spurious Hadoop config deprecation warnings under 2.2.0.
+      #
+      # See https://issues.apache.org/jira/browse/HADOOP-10178
+      #
+      # This can and should be deleted when we upgrade our Hadoop 2.2.0
+      # dependency to 2.3.0 or 3.0.0.
+      log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=OFF
+
+      # Configure MR at its own loglevel. We usually want MR at INFO,
+      # even if the rest of the loggers are at WARN or ERROR or FATAL,
+      # because job progress information is at INFO.
+      log4j.logger.org.apache.hadoop.mapred=${gremlin.mr.log4j.level}
+      log4j.logger.org.apache.hadoop.mapreduce=${gremlin.mr.log4j.level}
+
+      # This generates 3 INFO lines per jar on the classpath -- usually more
+      # noise than desirable in the REPL. Switching it to the default
+      # log4j level means it will be at WARN by default, which is ideal.
+      log4j.logger.org.apache.hadoop.mapred.LocalDistributedCacheManager=${gremlin.log4j.level}
+    </value>
+    <value-attributes>
+      <show-property-name>false</show-property-name>
+    </value-attributes>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/kerberos.json b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/kerberos.json
new file mode 100755
index 0000000..a25382e
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/kerberos.json
@@ -0,0 +1,52 @@
+{
+  "services": [
+    {
+      "name": "TITAN",
+      "components": [
+        {
+          "name": "TITAN_SERVER",
+          "identities": [
+            {
+              "name": "titan_principal",
+              "principal": {
+                "value": "${titan-env/titan_user}/_HOST@${realm}",
+                "type" : "service",
+                "configuration": "titan-env/titan_principal_name",
+                "local_username": "${titan-env/titan_user}"
+
+              },
+              "keytab": {
+                "file": "${keytab_dir}/titan.service.keytab",
+                "owner": {
+                  "name": "${titan-env/titan_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "titan-env/titan_keytab_path"
+              }
+            }
+          ]
+        },
+        {
+          "name": "TITAN_CLIENT",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            }
+          ]
+        }
+      ],
+      "configurations": [
+        {
+          "core-site": {
+            "hadoop.proxyuser.titan.groups": "${hadoop-env/proxyuser_group}",
+            "hadoop.proxyuser.titan.hosts": "*"
+          }
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/metainfo.xml b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/metainfo.xml
new file mode 100755
index 0000000..75696c1
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/metainfo.xml
@@ -0,0 +1,124 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>TITAN</name>
+      <displayName>Titan</displayName>
+      <comment>Titan is a scalable graph database optimized for storing and querying graphs containing hundreds of
+        billions of vertices and edges distributed across a multi-machine cluster.</comment>
+      <version>1.0.0</version>
+      <components>
+        <component>
+          <name>TITAN_SERVER</name>
+          <displayName>Titan Server</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <commandScript>
+            <script>scripts/titan_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>yaml</type>
+              <fileName>gremlin-server.yaml</fileName>
+              <dictionaryName>gremlin-server</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+        <component>
+          <name>TITAN_CLIENT</name>
+          <displayName>Titan Client</displayName>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/titan_client.py</script>
+            <scriptType>PYTHON</scriptType>
+	        <timeout>600</timeout>
+          </commandScript>
+          <configFiles>
+            <configFile>
+               <type>env</type>
+               <fileName>titan-env.sh</fileName>
+               <dictionaryName>titan-env</dictionaryName>
+            </configFile>
+            <configFile>
+                <type>env</type>
+                <fileName>log4j-console.properties</fileName>
+                <dictionaryName>titan-log4j</dictionaryName>
+            </configFile>
+            <configFile>
+                <type>env</type>
+                <fileName>titan-hbase-solr.properties</fileName>
+                <dictionaryName>titan-hbase-solr</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>hadoop-gryo.properties</fileName>
+              <dictionaryName>hadoop-gryo</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>hadoop-hbase-read.properties</fileName>
+              <dictionaryName>hadoop-hbase-read</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>titan</name>
+            </package>
+	        <package>
+	          <name>ambari-infra-solr-client-*</name>
+	        </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <requiredServices>		
+        <service>HDFS</service>
+        <service>HBASE</service>
+        <service>SOLR</service>
+        <service>SPARK2</service>
+      </requiredServices>
+      
+      <configuration-dependencies>
+	<config-type>gremlin-server</config-type>
+        <config-type>titan-env</config-type>
+        <config-type>titan-hbase-solr</config-type>
+        <config-type>titan-log4j</config-type>
+        <config-type>hadoop-gryo</config-type>
+        <config-type>hadoop-hbase-read</config-type>
+	<config-type>knox-env</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/alerts/alert_check_titan_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/alerts/alert_check_titan_server.py b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/alerts/alert_check_titan_server.py
new file mode 100755
index 0000000..8342c51
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/alerts/alert_check_titan_server.py
@@ -0,0 +1,65 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management.core.resources import Execute
+from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
+from resource_management.libraries.functions import format
+
+RESULT_CODE_OK = 'OK'
+RESULT_CODE_CRITICAL = 'CRITICAL'
+RESULT_CODE_UNKNOWN = 'UNKNOWN'
+STACK_ROOT = '{{cluster-env/stack_root}}'
+TITAN_RUN_DIR = 'titan.run.dir'
+TITAN_USER = 'titan.user'
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def execute(configurations={}, parameters={}, host_name=None):
+  """
+  Returns a tuple containing the result code and a pre-formatted result label
+
+  Keyword arguments:
+  configurations (dictionary): a mapping of configuration key to value
+  parameters (dictionary): a mapping of script parameter key to value
+  host_name (string): the name of this host where the alert is running
+  """
+  titan_bin_dir = configurations[STACK_ROOT] + format("/current/titan-server/bin")
+
+  gremlin_server_script_path = titan_bin_dir + format("/gremlin-server-script.sh")
+  
+  titan_pid_file = parameters[TITAN_RUN_DIR] + format("/titan.pid")
+  titan_user = parameters[TITAN_USER]
+  (code, msg) = get_check_result(gremlin_server_script_path, titan_pid_file, titan_user)
+  return (code, msg)
+
+@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
+def get_tokens():
+  """
+  Returns a tuple of tokens in the format {{site/property}} that will be used
+  to build the dictionary passed into execute
+  """
+  return (STACK_ROOT, TITAN_RUN_DIR)
+
+def get_check_result(gremlin_server_script_path, titan_pid_file, titan_user):
+  cmd = format("{gremlin_server_script_path} status {titan_pid_file}")
+  try:
+    result = Execute(cmd,
+                     user=titan_user
+                     )
+    return (RESULT_CODE_OK, ["titan server is up and running"])
+  except Exception, ex:
+    return (RESULT_CODE_CRITICAL, [ex])
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/files/gremlin-server-script.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/files/gremlin-server-script.sh b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/files/gremlin-server-script.sh
new file mode 100755
index 0000000..97aa897
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/files/gremlin-server-script.sh
@@ -0,0 +1,86 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Init script for Gremlin Server so it automatically starts/stops with the machine.
+#
+# To install:
+# 1)  Add a symlink to this file in /etc/init.d/ under the name you'd like to see the service
+#     For example, to name the service "gremlin-server": ln -s /usr/local/packages/dynamodb-titan100-storage-backend-1.0.0-hadoop1/bin/gremlin-server-service.sh /etc/init.d/gremlin-server
+# 2a) If you're running RH: chkconfig --add gremlin-server
+# 2b) If you're running Ubuntu: update-rc.d gremlin-server defaults
+#
+# You have to SET the Gremlin Server installation directory here:
+PID_FILE=$2
+GREMLIN_SERVER_LOG_FILE=$3
+GREMLIN_SERVER_ERR_FILE=$4
+GREMLIN_SERVER_BIN_DIR=$5
+GREMLIN_SERVER_CONF_DIR=$6
+
+
+usage() {
+  echo "Usage: `basename $0`: start|stop|status"
+  exit 1
+}
+
+status() {
+  echo "get program status"
+  local pid
+  if [[ -f "$PID_FILE" && -s "$PID_FILE" ]]; then
+  	#statements
+        pid=$(cat $PID_FILE)
+  	if kill -0 $pid > /dev/null 2>&1; then
+  		# pid exists
+                echo "program is running"
+  		return 0
+  	fi
+  else
+  	echo "program is not running"
+  fi
+  return 1
+}
+
+start() {
+  if ! status ; then
+      echo "start program"
+      /usr/bin/nohup ${GREMLIN_SERVER_BIN_DIR}/gremlin-server.sh ${GREMLIN_SERVER_CONF_DIR}/gremlin-server.yaml 1>$GREMLIN_SERVER_LOG_FILE 2>${GREMLIN_SERVER_ERR_FILE} &
+      echo $! > $PID_FILE
+      sleep 50
+  fi
+}
+
+stop() {
+	local pid
+	if status ; then
+		echo "stop program"
+		pid=`cat $PID_FILE`
+		kill -9 $pid
+                rm -f $PID_FILE
+	fi
+}
+
+case "$1" in
+	start)
+  start
+  ;;
+  stop)
+  stop
+  ;;
+  status)
+  status
+  ;;
+  *)
+  usage
+  ;;
+esac

http://git-wip-us.apache.org/repos/asf/ambari/blob/69e492f2/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/files/tinkergraph-empty.properties
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/files/tinkergraph-empty.properties b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/files/tinkergraph-empty.properties
new file mode 100755
index 0000000..0e68eeeb
--- /dev/null
+++ b/ambari-server/src/main/resources/common-services/TITAN/1.0.0/package/files/tinkergraph-empty.properties
@@ -0,0 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+gremlin.graph=org.apache.tinkerpop.gremlin.tinkergraph.structure.TinkerGraph
+gremlin.tinkergraph.vertexIdManager=LONG


[03/12] ambari git commit: AMBARI-21445. Fixes the following bugs : (1). Make Hive Kerberos keytab files group non-readable (2). HiveServer2 Authentication via LDAP to work correctly (3). Remove leading while spaces for the hive-env and hive-interactive-

Posted by jo...@apache.org.
AMBARI-21445. Fixes the following bugs : (1). Make Hive Kerberos keytab files group non-readable (2). HiveServer2 Authentication via LDAP to work correctly (3). Remove leading while spaces for the hive-env and hive-interactive-env template.


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/83761d42
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/83761d42
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/83761d42

Branch: refs/heads/branch-feature-AMBARI-21348
Commit: 83761d425f437ef1b74a5669c1aa3cad1c074a26
Parents: c2b2210
Author: Swapan Shridhar <ss...@hortonworks.com>
Authored: Tue Jul 11 15:37:08 2017 -0700
Committer: Swapan Shridhar <ss...@hortonworks.com>
Committed: Wed Jul 12 12:02:34 2017 -0700

----------------------------------------------------------------------
 .../0.12.0.2.0/package/scripts/params_linux.py  |   4 +
 .../0.12.0.2.0/package/scripts/service_check.py |   3 +-
 .../services/HIVE/configuration/hive-env.xml    |  78 +++++-----
 .../HIVE/configuration/hive-interactive-env.xml |  62 ++++----
 .../stacks/HDP/2.6/services/HIVE/kerberos.json  | 151 +++++++++++++++++++
 .../stacks/HDP/2.6/services/YARN/kerberos.json  |   2 +-
 6 files changed, 228 insertions(+), 72 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/83761d42/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
index 4b595a8..b2bc34a 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params_linux.py
@@ -846,3 +846,7 @@ ranger_hive_metastore_lookup = default('/configurations/ranger-hive-plugin-prope
 
 if security_enabled:
   hive_metastore_principal_with_host = hive_metastore_principal.replace('_HOST', hostname.lower())
+
+# For ldap - hive_check
+hive_ldap_user= config['configurations']['hive-env'].get('alert_ldap_username','')
+hive_ldap_passwd=config['configurations']['hive-env'].get('alert_ldap_password','')
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/83761d42/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/service_check.py b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/service_check.py
index a521d6d..db253d3 100644
--- a/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/service_check.py
@@ -122,7 +122,8 @@ class HiveServiceCheckDefault(HiveServiceCheck):
                                params.hive_server_principal, kinit_cmd, params.smokeuser,
                                transport_mode=params.hive_transport_mode, http_endpoint=params.hive_http_endpoint,
                                ssl=params.hive_ssl, ssl_keystore=ssl_keystore,
-                               ssl_password=ssl_password)
+                               ssl_password=ssl_password, ldap_username=params.hive_ldap_user,
+                               ldap_password=params.hive_ldap_passwd)
         Logger.info("Successfully connected to {0} on port {1}".format(address, server_port))
         workable_server_available = True
       except:

http://git-wip-us.apache.org/repos/asf/ambari/blob/83761d42/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-env.xml
index a6cf1bc..929c10d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-env.xml
@@ -60,56 +60,56 @@
     <display-name>hive-env template</display-name>
     <description>This is the jinja template for hive-env.sh file</description>
     <value>
-      export HADOOP_USER_CLASSPATH_FIRST=true  #this prevents old metrics libs from mapreduce lib from bringing in old jar deps overriding HIVE_LIB
-      if [ "$SERVICE" = "cli" ]; then
-      if [ -z "$DEBUG" ]; then
-      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseNUMA -XX:+UseParallelGC -XX:-UseGCOverheadLimit"
-      else
-      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
-      fi
-      fi
+export HADOOP_USER_CLASSPATH_FIRST=true  #this prevents old metrics libs from mapreduce lib from bringing in old jar deps overriding HIVE_LIB
+if [ "$SERVICE" = "cli" ]; then
+  if [ -z "$DEBUG" ]; then
+    export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseNUMA -XX:+UseParallelGC -XX:-UseGCOverheadLimit"
+  else
+    export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
+  fi
+fi
 
-      # The heap size of the jvm stared by hive shell script can be controlled via:
+# The heap size of the jvm stared by hive shell script can be controlled via:
 
-      if [ "$SERVICE" = "metastore" ]; then
-      export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore
-      else
-      export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client
-      fi
+if [ "$SERVICE" = "metastore" ]; then
+  export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore
+else
+  export HADOOP_HEAPSIZE={{hive_heapsize}} # Setting for HiveServer2 and Client
+fi
 
-      export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m"
-      export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS{{heap_dump_opts}}"
+export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m"
+export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS{{heap_dump_opts}}"
 
-      # Larger heap size may be required when running queries over large number of files or partitions.
-      # By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
-      # appropriate for hive server (hwi etc).
+# Larger heap size may be required when running queries over large number of files or partitions.
+# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
+# appropriate for hive server (hwi etc).
 
 
-      # Set HADOOP_HOME to point to a specific hadoop install directory
-      HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+# Set HADOOP_HOME to point to a specific hadoop install directory
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
 
-      export HIVE_HOME=${HIVE_HOME:-{{hive_home_dir}}}
+export HIVE_HOME=${HIVE_HOME:-{{hive_home_dir}}}
 
-      # Hive Configuration Directory can be controlled by:
-      export HIVE_CONF_DIR=${HIVE_CONF_DIR:-{{hive_config_dir}}}
+# Hive Configuration Directory can be controlled by:
+export HIVE_CONF_DIR=${HIVE_CONF_DIR:-{{hive_config_dir}}}
 
-      # Folder containing extra libraries required for hive compilation/execution can be controlled by:
-      if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
-        if [ -f "${HIVE_AUX_JARS_PATH}" ]; then
-          export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
-        elif [ -d "/usr/hdp/current/hive-webhcat/share/hcatalog" ]; then
-          export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar
-        fi
-      elif [ -d "/usr/hdp/current/hive-webhcat/share/hcatalog" ]; then
-        export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar
-      fi
+# Folder containing extra libraries required for hive compilation/execution can be controlled by:
+if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
+  if [ -f "${HIVE_AUX_JARS_PATH}" ]; then
+    export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
+  elif [ -d "/usr/hdp/current/hive-webhcat/share/hcatalog" ]; then
+    export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar
+  fi
+elif [ -d "/usr/hdp/current/hive-webhcat/share/hcatalog" ]; then
+  export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar
+fi
 
-      export METASTORE_PORT={{hive_metastore_port}}
+export METASTORE_PORT={{hive_metastore_port}}
 
-      {% if sqla_db_used or lib_dir_available %}
-      export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:{{jdbc_libs_dir}}"
-      export JAVA_LIBRARY_PATH="$JAVA_LIBRARY_PATH:{{jdbc_libs_dir}}"
-      {% endif %}
+{% if sqla_db_used or lib_dir_available %}
+export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:{{jdbc_libs_dir}}"
+export JAVA_LIBRARY_PATH="$JAVA_LIBRARY_PATH:{{jdbc_libs_dir}}"
+{% endif %}
     </value>
     <value-attributes>
       <type>content</type>

http://git-wip-us.apache.org/repos/asf/ambari/blob/83761d42/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-env.xml
index ada4859..86720f4 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/configuration/hive-interactive-env.xml
@@ -100,47 +100,47 @@
     <display-name>hive-interactive-env template</display-name>
     <description>This is the jinja template for hive-env.sh file</description>
     <value>
-      if [ "$SERVICE" = "cli" ]; then
-      if [ -z "$DEBUG" ]; then
-      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
-      else
-      export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
-      fi
-      fi
+if [ "$SERVICE" = "cli" ]; then
+  if [ -z "$DEBUG" ]; then
+    export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
+  else
+    export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
+  fi
+fi
 
-      # The heap size of the jvm stared by hive shell script can be controlled via:
+# The heap size of the jvm stared by hive shell script can be controlled via:
 
-      if [ "$SERVICE" = "metastore" ]; then
-      export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore
-      else
-      export HADOOP_HEAPSIZE={{hive_interactive_heapsize}} # Setting for HiveServer2 and Client
-      fi
+if [ "$SERVICE" = "metastore" ]; then
+  export HADOOP_HEAPSIZE={{hive_metastore_heapsize}} # Setting for HiveMetastore
+else
+  export HADOOP_HEAPSIZE={{hive_interactive_heapsize}} # Setting for HiveServer2 and Client
+fi
 
-      export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m"
-      export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS{{heap_dump_opts}}"
+export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS  -Xmx${HADOOP_HEAPSIZE}m"
+export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS{{heap_dump_opts}}"
 
-      # Larger heap size may be required when running queries over large number of files or partitions.
-      # By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
-      # appropriate for hive server (hwi etc).
+# Larger heap size may be required when running queries over large number of files or partitions.
+# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
+# appropriate for hive server (hwi etc).
 
 
-      # Set HADOOP_HOME to point to a specific hadoop install directory
-      HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+# Set HADOOP_HOME to point to a specific hadoop install directory
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
 
-      # Hive Configuration Directory can be controlled by:
-      export HIVE_CONF_DIR={{hive_server_interactive_conf_dir}}
+# Hive Configuration Directory can be controlled by:
+export HIVE_CONF_DIR={{hive_server_interactive_conf_dir}}
 
-      # Add additional hcatalog jars
-      if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
-        export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
-      else
-        export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-server2-hive2/lib/hive-hcatalog-core.jar
-      fi
+# Add additional hcatalog jars
+if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
+  export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
+else
+  export HIVE_AUX_JARS_PATH=/usr/hdp/current/hive-server2-hive2/lib/hive-hcatalog-core.jar
+fi
 
-      export METASTORE_PORT={{hive_metastore_port}}
+export METASTORE_PORT={{hive_metastore_port}}
 
-      # Spark assembly contains a conflicting copy of HiveConf from hive-1.2
-      export HIVE_SKIP_SPARK_ASSEMBLY=true
+# Spark assembly contains a conflicting copy of HiveConf from hive-1.2
+export HIVE_SKIP_SPARK_ASSEMBLY=true
 
     </value>
     <value-attributes>

http://git-wip-us.apache.org/repos/asf/ambari/blob/83761d42/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/kerberos.json
new file mode 100644
index 0000000..b6e57e1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/HIVE/kerberos.json
@@ -0,0 +1,151 @@
+{
+  "services": [
+    {
+      "name": "HIVE",
+      "identities": [
+        {
+          "name": "/spnego"
+        },
+        {
+          "name": "/smokeuser"
+        }
+      ],
+      "configurations": [
+        {
+          "hive-site": {
+            "hive.metastore.sasl.enabled": "true",
+            "hive.server2.authentication": "KERBEROS"
+          }
+        },
+        {
+          "ranger-hive-audit": {
+            "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule",
+            "xasecure.audit.jaas.Client.loginModuleControlFlag": "required",
+            "xasecure.audit.jaas.Client.option.useKeyTab": "true",
+            "xasecure.audit.jaas.Client.option.storeKey": "false",
+            "xasecure.audit.jaas.Client.option.serviceName": "solr",
+            "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true"
+          }
+        }
+      ],
+      "components": [
+        {
+          "name": "HIVE_METASTORE",
+          "identities": [
+            {
+              "name": "/HIVE/HIVE_SERVER/hive_server_hive",
+              "principal": {
+                "configuration": "hive-site/hive.metastore.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "hive-site/hive.metastore.kerberos.keytab.file"
+              }
+            }
+          ]
+        },
+        {
+          "name": "HIVE_SERVER",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "hive_server_hive",
+              "principal": {
+                "value": "hive/_HOST@${realm}",
+                "type": "service",
+                "configuration": "hive-site/hive.server2.authentication.kerberos.principal",
+                "local_username": "${hive-env/hive_user}"
+              },
+              "keytab": {
+                "file": "${keytab_dir}/hive.service.keytab",
+                "owner": {
+                  "name": "${hive-env/hive_user}",
+                  "access": "r"
+                },
+                "group": {
+                  "name": "${cluster-env/user_group}",
+                  "access": ""
+                },
+                "configuration": "hive-site/hive.server2.authentication.kerberos.keytab"
+              }
+            },
+            {
+              "name": "atlas_kafka",
+              "reference": "/HIVE/HIVE_SERVER/hive_server_hive",
+              "principal": {
+                "configuration": "hive-atlas-application.properties/atlas.jaas.KafkaClient.option.principal"
+              },
+              "keytab": {
+                "configuration": "hive-atlas-application.properties/atlas.jaas.KafkaClient.option.keyTab"
+              }
+            },
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "hive-site/hive.server2.authentication.spnego.principal"
+              },
+              "keytab": {
+                "configuration": "hive-site/hive.server2.authentication.spnego.keytab"
+              }
+            },
+            {
+              "name": "ranger_audit",
+              "reference": "/HIVE/HIVE_SERVER/hive_server_hive",
+              "principal": {
+                "configuration": "ranger-hive-audit/xasecure.audit.jaas.Client.option.principal"
+              },
+              "keytab": {
+                "configuration": "ranger-hive-audit/xasecure.audit.jaas.Client.option.keyTab"
+              }
+            }
+          ]
+        },
+        {
+          "name": "HIVE_SERVER_INTERACTIVE",
+          "identities": [
+            {
+              "name": "/HDFS/NAMENODE/hdfs"
+            },
+            {
+              "name": "/HIVE/HIVE_SERVER/hive_server_hive"
+            },
+            {
+              "name": "/HIVE/HIVE_SERVER/spnego"
+            },
+            {
+              "name": "/YARN/NODEMANAGER/llap_zk_hive"
+            }
+          ]
+        },
+        {
+          "name": "WEBHCAT_SERVER",
+          "identities": [
+            {
+              "name": "/spnego",
+              "principal": {
+                "configuration": "webhcat-site/templeton.kerberos.principal"
+              },
+              "keytab": {
+                "configuration": "webhcat-site/templeton.kerberos.keytab"
+              }
+            }
+          ],
+          "configurations": [
+            {
+              "core-site": {
+                "hadoop.proxyuser.HTTP.hosts": "${clusterHostInfo/webhcat_server_host|append(core-site/hadoop.proxyuser.HTTP.hosts, \\\\,, true)}"
+              }
+            },
+            {
+              "webhcat-site": {
+                "templeton.kerberos.secret": "secret",
+                "templeton.hive.properties": "hive.metastore.local=false,hive.metastore.uris=${clusterHostInfo/hive_metastore_host|each(thrift://%s:9083, \\\\,, \\s*\\,\\s*)},hive.metastore.sasl.enabled=true,hive.metastore.execute.setugi=true,hive.metastore.warehouse.dir=/apps/hive/warehouse,hive.exec.mode.local.auto=false,hive.metastore.kerberos.principal=hive/_HOST@${realm}"
+              }
+            }
+          ]
+        }
+      ]
+    }
+  ]
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/83761d42/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json
index b1501b8..60d50eb 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json
+++ b/ambari-server/src/main/resources/stacks/HDP/2.6/services/YARN/kerberos.json
@@ -117,7 +117,7 @@
                 },
                 "group": {
                   "name": "${cluster-env/user_group}",
-                  "access": "r"
+                  "access": ""
                 },
                 "configuration": "hive-interactive-site/hive.llap.zk.sm.keytab.file"
               },