You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ds...@apache.org on 2014/09/11 21:26:41 UTC
[1/3] AMBARI-7257 Use Versioned RPMS for HDP 2.2 stack and make it
plugabable to be able to reuse the scripts for HDP 2.* (dsen)
Repository: ambari
Updated Branches:
refs/heads/trunk 8778556f4 -> 7d9feb6af
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hcat_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hcat_client.py b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hcat_client.py
index 6da9d2f..2a5481c 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hcat_client.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hcat_client.py
@@ -28,7 +28,10 @@ class TestHcatClient(RMFTestCase):
command = "configure",
config_file="default.json"
)
-
+ self.assertResourceCalled('Directory', '/etc/hive/conf',
+ owner = 'hcat',
+ group = 'hadoop',
+ )
self.assertResourceCalled('Directory', '/etc/hcatalog/conf',
owner = 'hcat',
group = 'hadoop',
@@ -59,7 +62,10 @@ class TestHcatClient(RMFTestCase):
command = "configure",
config_file="secured.json"
)
-
+ self.assertResourceCalled('Directory', '/etc/hive/conf',
+ owner = 'hcat',
+ group = 'hadoop',
+ )
self.assertResourceCalled('Directory', '/etc/hcatalog/conf',
owner = 'hcat',
group = 'hadoop',
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_metastore.py b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_metastore.py
index 0bef64d..204e384 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_metastore.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_metastore.py
@@ -17,6 +17,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
+import os
from mock.mock import MagicMock, call, patch
from stacks.utils.RMFTestCase import *
@@ -40,6 +41,8 @@ class TestHiveMetastore(RMFTestCase):
self.assert_configure_default()
self.assertResourceCalled('Execute', 'env HADOOP_HOME=/usr JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/start_metastore_script /var/log/hive/hive.out /var/log/hive/hive.log /var/run/hive/hive.pid /etc/hive/conf.server /var/log/hive',
not_if = 'ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps `cat /var/run/hive/hive.pid` >/dev/null 2>&1',
+ environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/lib/hive/bin",
+ 'HADOOP_HOME' : '/usr'},
user = 'hive',
)
self.assertResourceCalled('Execute', '/usr/jdk64/jdk1.7.0_45/bin/java -cp /usr/lib/ambari-agent/DBConnectionVerification.jar:/usr/share/java/mysql-connector-java.jar org.apache.ambari.server.DBConnectionVerification \'jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true\' hive \'!`"\'"\'"\' 1\' com.mysql.jdbc.Driver',
@@ -78,8 +81,11 @@ class TestHiveMetastore(RMFTestCase):
)
self.assert_configure_secured()
+ self.maxDiff = None
self.assertResourceCalled('Execute', 'env HADOOP_HOME=/usr JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/start_metastore_script /var/log/hive/hive.out /var/log/hive/hive.log /var/run/hive/hive.pid /etc/hive/conf.server /var/log/hive',
not_if = 'ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps `cat /var/run/hive/hive.pid` >/dev/null 2>&1',
+ environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/lib/hive/bin",
+ 'HADOOP_HOME' : '/usr'},
user = 'hive',
)
self.assertResourceCalled('Execute', '/usr/jdk64/jdk1.7.0_45/bin/java -cp /usr/lib/ambari-agent/DBConnectionVerification.jar:/usr/share/java/mysql-connector-java.jar org.apache.ambari.server.DBConnectionVerification \'jdbc:mysql://c6402.ambari.apache.org/hive?createDatabaseIfNotExist=true\' hive \'!`"\'"\'"\' 1\' com.mysql.jdbc.Driver',
@@ -196,6 +202,7 @@ class TestHiveMetastore(RMFTestCase):
)
self.assertResourceCalled('Execute', 'hive mkdir -p /tmp/AMBARI-artifacts/ ; cp /usr/share/java/mysql-connector-java.jar /usr/lib/hive/lib//mysql-connector-java.jar',
creates = '/usr/lib/hive/lib//mysql-connector-java.jar',
+ environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/lib/hive/bin"},
path = ['/bin', '/usr/bin/'],
not_if = 'test -f /usr/lib/hive/lib//mysql-connector-java.jar',
)
@@ -322,6 +329,7 @@ class TestHiveMetastore(RMFTestCase):
self.assertResourceCalled('Execute', 'hive mkdir -p /tmp/AMBARI-artifacts/ ; cp /usr/share/java/mysql-connector-java.jar /usr/lib/hive/lib//mysql-connector-java.jar',
creates = '/usr/lib/hive/lib//mysql-connector-java.jar',
path = ['/bin', '/usr/bin/'],
+ environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/lib/hive/bin"},
not_if = 'test -f /usr/lib/hive/lib//mysql-connector-java.jar',
)
self.assertResourceCalled('Execute', '/bin/sh -c \'cd /usr/lib/ambari-agent/ && curl -kf -x "" --retry 5 http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar -o DBConnectionVerification.jar\'',
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
index 1c7e47e..bc723ab 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_server.py
@@ -17,6 +17,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
+import os
from mock.mock import MagicMock, call, patch
from stacks.utils.RMFTestCase import *
@@ -53,6 +54,7 @@ class TestHiveServer(RMFTestCase):
keytab = UnknownConfigurationMock(),
conf_dir = '/etc/hadoop/conf',
hdfs_user = 'hdfs',
+ bin_dir = '/usr/bin',
kinit_path_local = "/usr/bin/kinit"
)
@@ -64,6 +66,7 @@ class TestHiveServer(RMFTestCase):
keytab = UnknownConfigurationMock(),
conf_dir = '/etc/hadoop/conf',
hdfs_user = 'hdfs',
+ bin_dir = '/usr/bin',
kinit_path_local = "/usr/bin/kinit"
)
self.assertResourceCalled('HdfsDirectory', None,
@@ -72,6 +75,7 @@ class TestHiveServer(RMFTestCase):
conf_dir = '/etc/hadoop/conf',
hdfs_user = 'hdfs',
kinit_path_local = '/usr/bin/kinit',
+ bin_dir = '/usr/bin',
action = ['create']
)
@@ -80,6 +84,7 @@ class TestHiveServer(RMFTestCase):
owner='tez',
dest_dir='/apps/tez/',
kinnit_if_needed='',
+ hadoop_conf_dir='/etc/hadoop/conf',
hdfs_user='hdfs'
)
@@ -88,11 +93,14 @@ class TestHiveServer(RMFTestCase):
owner='tez',
dest_dir='/apps/tez/lib/',
kinnit_if_needed='',
+ hadoop_conf_dir='/etc/hadoop/conf',
hdfs_user='hdfs'
)
self.assertResourceCalled('Execute', 'env JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/start_hiveserver2_script /var/log/hive/hive-server2.out /var/log/hive/hive-server2.log /var/run/hive/hive-server.pid /etc/hive/conf.server /var/log/hive',
not_if = 'ls /var/run/hive/hive-server.pid >/dev/null 2>&1 && ps `cat /var/run/hive/hive-server.pid` >/dev/null 2>&1',
+ environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/lib/hive/bin",
+ 'HADOOP_HOME' : '/usr'},
user = 'hive'
)
@@ -144,6 +152,8 @@ class TestHiveServer(RMFTestCase):
self.assert_configure_secured()
self.assertResourceCalled('Execute', 'env JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/start_hiveserver2_script /var/log/hive/hive-server2.out /var/log/hive/hive-server2.log /var/run/hive/hive-server.pid /etc/hive/conf.server /var/log/hive',
not_if = 'ls /var/run/hive/hive-server.pid >/dev/null 2>&1 && ps `cat /var/run/hive/hive-server.pid` >/dev/null 2>&1',
+ environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/lib/hive/bin",
+ 'HADOOP_HOME' : '/usr'},
user = 'hive'
)
@@ -180,6 +190,7 @@ class TestHiveServer(RMFTestCase):
kinit_path_local = '/usr/bin/kinit',
mode = 0777,
owner = 'hive',
+ bin_dir = '/usr/bin',
action = ['create_delayed'],
)
self.assertResourceCalled('HdfsDirectory', '/user/hive',
@@ -190,6 +201,7 @@ class TestHiveServer(RMFTestCase):
kinit_path_local = '/usr/bin/kinit',
mode = 0700,
owner = 'hive',
+ bin_dir = '/usr/bin',
action = ['create_delayed'],
)
self.assertResourceCalled('HdfsDirectory', None,
@@ -198,6 +210,7 @@ class TestHiveServer(RMFTestCase):
conf_dir = '/etc/hadoop/conf',
hdfs_user = 'hdfs',
kinit_path_local = '/usr/bin/kinit',
+ bin_dir = '/usr/bin',
action = ['create'],
)
self.assertResourceCalled('Directory', '/etc/hive/conf.server',
@@ -295,6 +308,7 @@ class TestHiveServer(RMFTestCase):
self.assertResourceCalled('Execute', 'hive mkdir -p /tmp/AMBARI-artifacts/ ; cp /usr/share/java/mysql-connector-java.jar /usr/lib/hive/lib//mysql-connector-java.jar',
creates = '/usr/lib/hive/lib//mysql-connector-java.jar',
path = ['/bin', '/usr/bin/'],
+ environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/lib/hive/bin"},
not_if = 'test -f /usr/lib/hive/lib//mysql-connector-java.jar',
)
self.assertResourceCalled('Execute', '/bin/sh -c \'cd /usr/lib/ambari-agent/ && curl -kf -x "" --retry 5 http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar -o DBConnectionVerification.jar\'',
@@ -331,6 +345,7 @@ class TestHiveServer(RMFTestCase):
conf_dir = '/etc/hadoop/conf',
hdfs_user = 'hdfs',
kinit_path_local = '/usr/bin/kinit',
+ bin_dir = '/usr/bin',
mode = 0777,
owner = 'hive',
action = ['create_delayed'],
@@ -342,6 +357,7 @@ class TestHiveServer(RMFTestCase):
hdfs_user = 'hdfs',
kinit_path_local = '/usr/bin/kinit',
mode = 0700,
+ bin_dir = '/usr/bin',
owner = 'hive',
action = ['create_delayed'],
)
@@ -350,6 +366,7 @@ class TestHiveServer(RMFTestCase):
keytab = '/etc/security/keytabs/hdfs.headless.keytab',
conf_dir = '/etc/hadoop/conf',
hdfs_user = 'hdfs',
+ bin_dir = '/usr/bin',
kinit_path_local = '/usr/bin/kinit',
action = ['create'],
)
@@ -448,6 +465,7 @@ class TestHiveServer(RMFTestCase):
self.assertResourceCalled('Execute', 'hive mkdir -p /tmp/AMBARI-artifacts/ ; cp /usr/share/java/mysql-connector-java.jar /usr/lib/hive/lib//mysql-connector-java.jar',
creates = '/usr/lib/hive/lib//mysql-connector-java.jar',
path = ['/bin', '/usr/bin/'],
+ environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/lib/hive/bin"},
not_if = 'test -f /usr/lib/hive/lib//mysql-connector-java.jar',
)
self.assertResourceCalled('Execute', '/bin/sh -c \'cd /usr/lib/ambari-agent/ && curl -kf -x "" --retry 5 http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar -o DBConnectionVerification.jar\'',
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_service_check.py b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_service_check.py
index eefb6b9..4ae9ad2 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_service_check.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_service_check.py
@@ -17,6 +17,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
+import os
from mock.mock import MagicMock, call, patch
from stacks.utils.RMFTestCase import *
import datetime, sys, socket
@@ -42,6 +43,7 @@ class TestServiceCheck(RMFTestCase):
path = ['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
tries = 3,
user = 'ambari-qa',
+ environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/lib/hive/bin"},
try_sleep = 5,
)
self.assertResourceCalled('ExecuteHadoop', 'fs -test -e /apps/hive/warehouse/hcatsmoke',
@@ -50,6 +52,7 @@ class TestServiceCheck(RMFTestCase):
conf_dir = '/etc/hadoop/conf',
keytab=UnknownConfigurationMock(),
kinit_path_local='/usr/bin/kinit',
+ bin_dir = '/usr/lib/hive/bin',
security_enabled=False
)
self.assertResourceCalled('Execute', ' /tmp/hcatSmoke.sh hcatsmoke cleanup',
@@ -57,6 +60,7 @@ class TestServiceCheck(RMFTestCase):
path = ['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
tries = 3,
user = 'ambari-qa',
+ environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/lib/hive/bin"},
try_sleep = 5,
)
self.assertNoMoreResources()
@@ -78,6 +82,7 @@ class TestServiceCheck(RMFTestCase):
path = ['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
tries = 3,
user = 'ambari-qa',
+ environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/lib/hive/bin"},
try_sleep = 5,
)
self.assertResourceCalled('ExecuteHadoop', 'fs -test -e /apps/hive/warehouse/hcatsmoke',
@@ -87,6 +92,7 @@ class TestServiceCheck(RMFTestCase):
keytab='/etc/security/keytabs/hdfs.headless.keytab',
kinit_path_local='/usr/bin/kinit',
security_enabled=True,
+ bin_dir = '/usr/lib/hive/bin',
principal='hdfs'
)
self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa; /tmp/hcatSmoke.sh hcatsmoke cleanup',
@@ -94,6 +100,7 @@ class TestServiceCheck(RMFTestCase):
path = ['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
tries = 3,
user = 'ambari-qa',
+ environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/lib/hive/bin"},
try_sleep = 5,
)
self.assertNoMoreResources()
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
index 1b98e45..5740587 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/OOZIE/test_oozie_server.py
@@ -45,8 +45,8 @@ class TestOozieServer(RMFTestCase):
ignore_failures = True,
user = 'oozie',
)
- self.assertResourceCalled('Execute', ' hadoop dfs -put /usr/lib/oozie/share /user/oozie ; hadoop dfs -chmod -R 755 /user/oozie/share',
- not_if = " hadoop dfs -ls /user/oozie/share | awk 'BEGIN {count=0;} /share/ {count++} END {if (count > 0) {exit 0} else {exit 1}}'",
+ self.assertResourceCalled('Execute', ' hadoop --config /etc/hadoop/conf dfs -put /usr/lib/oozie/share /user/oozie ; hadoop --config /etc/hadoop/conf dfs -chmod -R 755 /user/oozie/share',
+ not_if = " hadoop --config /etc/hadoop/conf dfs -ls /user/oozie/share | awk 'BEGIN {count=0;} /share/ {count++} END {if (count > 0) {exit 0} else {exit 1}}'",
user = 'oozie',
)
self.assertResourceCalled('Execute', 'cd /var/tmp/oozie && /usr/lib/oozie/bin/oozie-start.sh',
@@ -91,8 +91,8 @@ class TestOozieServer(RMFTestCase):
ignore_failures = True,
user = 'oozie',
)
- self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/oozie.service.keytab oozie/c6402.ambari.apache.org@EXAMPLE.COM; hadoop dfs -put /usr/lib/oozie/share /user/oozie ; hadoop dfs -chmod -R 755 /user/oozie/share',
- not_if = "/usr/bin/kinit -kt /etc/security/keytabs/oozie.service.keytab oozie/c6402.ambari.apache.org@EXAMPLE.COM; hadoop dfs -ls /user/oozie/share | awk 'BEGIN {count=0;} /share/ {count++} END {if (count > 0) {exit 0} else {exit 1}}'",
+ self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/oozie.service.keytab oozie/c6402.ambari.apache.org@EXAMPLE.COM; hadoop --config /etc/hadoop/conf dfs -put /usr/lib/oozie/share /user/oozie ; hadoop --config /etc/hadoop/conf dfs -chmod -R 755 /user/oozie/share',
+ not_if = "/usr/bin/kinit -kt /etc/security/keytabs/oozie.service.keytab oozie/c6402.ambari.apache.org@EXAMPLE.COM; hadoop --config /etc/hadoop/conf dfs -ls /user/oozie/share | awk 'BEGIN {count=0;} /share/ {count++} END {if (count > 0) {exit 0} else {exit 1}}'",
user = 'oozie',
)
self.assertResourceCalled('Execute', 'cd /var/tmp/oozie && /usr/lib/oozie/bin/oozie-start.sh',
@@ -122,6 +122,7 @@ class TestOozieServer(RMFTestCase):
kinit_path_local = '/usr/bin/kinit',
mode = 0775,
owner = 'oozie',
+ bin_dir = '/usr/bin',
action = ['create'],
)
self.assertResourceCalled('XmlConfig', 'oozie-site.xml',
@@ -224,6 +225,7 @@ class TestOozieServer(RMFTestCase):
kinit_path_local = '/usr/bin/kinit',
mode = 0775,
owner = 'oozie',
+ bin_dir = '/usr/bin',
action = ['create'],
)
self.assertResourceCalled('XmlConfig', 'oozie-site.xml',
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_service_check.py b/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_service_check.py
index 1e1ad24..2521636 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_service_check.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/PIG/test_pig_service_check.py
@@ -28,13 +28,14 @@ class TestPigServiceCheck(RMFTestCase):
command = "service_check",
config_file="default.json"
)
- self.assertResourceCalled('ExecuteHadoop', 'dfs -rmr pigsmoke.out passwd; hadoop dfs -put /etc/passwd passwd ',
+ self.assertResourceCalled('ExecuteHadoop', 'dfs -rmr pigsmoke.out passwd; hadoop --config /etc/hadoop/conf dfs -put /etc/passwd passwd ',
try_sleep = 5,
tries = 3,
user = 'ambari-qa',
conf_dir = '/etc/hadoop/conf',
security_enabled = False,
keytab = UnknownConfigurationMock(),
+ bin_dir = '/usr/bin',
kinit_path_local = '/usr/bin/kinit'
)
@@ -44,7 +45,7 @@ class TestPigServiceCheck(RMFTestCase):
)
self.assertResourceCalled('Execute', 'pig /tmp/pigSmoke.sh',
- path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
+ path = [':/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
tries = 3,
user = 'ambari-qa',
try_sleep = 5,
@@ -52,6 +53,7 @@ class TestPigServiceCheck(RMFTestCase):
self.assertResourceCalled('ExecuteHadoop', 'fs -test -e pigsmoke.out',
user = 'ambari-qa',
+ bin_dir = '/usr/bin',
conf_dir = '/etc/hadoop/conf',
)
self.assertNoMoreResources()
@@ -63,13 +65,14 @@ class TestPigServiceCheck(RMFTestCase):
config_file="secured.json"
)
- self.assertResourceCalled('ExecuteHadoop', 'dfs -rmr pigsmoke.out passwd; hadoop dfs -put /etc/passwd passwd ',
+ self.assertResourceCalled('ExecuteHadoop', 'dfs -rmr pigsmoke.out passwd; hadoop --config /etc/hadoop/conf dfs -put /etc/passwd passwd ',
try_sleep = 5,
tries = 3,
user = 'ambari-qa',
conf_dir = '/etc/hadoop/conf',
security_enabled = True,
keytab = '/etc/security/keytabs/smokeuser.headless.keytab',
+ bin_dir = '/usr/bin',
kinit_path_local = '/usr/bin/kinit'
)
@@ -79,7 +82,7 @@ class TestPigServiceCheck(RMFTestCase):
)
self.assertResourceCalled('Execute', 'pig /tmp/pigSmoke.sh',
- path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
+ path = [':/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
tries = 3,
user = 'ambari-qa',
try_sleep = 5,
@@ -87,6 +90,7 @@ class TestPigServiceCheck(RMFTestCase):
self.assertResourceCalled('ExecuteHadoop', 'fs -test -e pigsmoke.out',
user = 'ambari-qa',
+ bin_dir = '/usr/bin',
conf_dir = '/etc/hadoop/conf',
)
self.assertNoMoreResources()
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/test/python/stacks/2.0.6/WEBHCAT/test_webhcat_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/WEBHCAT/test_webhcat_server.py b/ambari-server/src/test/python/stacks/2.0.6/WEBHCAT/test_webhcat_server.py
index 0e96b66..bde2e86 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/WEBHCAT/test_webhcat_server.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/WEBHCAT/test_webhcat_server.py
@@ -107,6 +107,7 @@ class TestWebHCatServer(RMFTestCase):
kinit_path_local = "/usr/bin/kinit",
mode = 0755,
owner = 'hcat',
+ bin_dir = '/usr/bin',
action = ['create_delayed'],
)
self.assertResourceCalled('HdfsDirectory', '/user/hcat',
@@ -117,6 +118,7 @@ class TestWebHCatServer(RMFTestCase):
kinit_path_local = "/usr/bin/kinit",
mode = 0755,
owner = 'hcat',
+ bin_dir = '/usr/bin',
action = ['create_delayed'],
)
self.assertResourceCalled('HdfsDirectory', None,
@@ -125,6 +127,7 @@ class TestWebHCatServer(RMFTestCase):
conf_dir = '/etc/hadoop/conf',
hdfs_user = 'hdfs',
kinit_path_local = "/usr/bin/kinit",
+ bin_dir = '/usr/bin',
action = ['create'],
)
self.assertResourceCalled('Directory', '/var/run/webhcat',
@@ -160,6 +163,7 @@ class TestWebHCatServer(RMFTestCase):
mode=0755,
dest_dir='/apps/webhcat',
kinnit_if_needed='',
+ hadoop_conf_dir='/etc/hadoop/conf',
hdfs_user='hdfs'
)
self.assertResourceCalled('CopyFromLocal', '/usr/share/HDP-webhcat/pig.tar.gz',
@@ -167,6 +171,7 @@ class TestWebHCatServer(RMFTestCase):
mode=0755,
dest_dir='/apps/webhcat',
kinnit_if_needed='',
+ hadoop_conf_dir='/etc/hadoop/conf',
hdfs_user='hdfs'
)
self.assertResourceCalled('CopyFromLocal', '/usr/share/HDP-webhcat/hive.tar.gz',
@@ -174,6 +179,7 @@ class TestWebHCatServer(RMFTestCase):
mode=0755,
dest_dir='/apps/webhcat',
kinnit_if_needed='',
+ hadoop_conf_dir='/etc/hadoop/conf',
hdfs_user='hdfs'
)
@@ -186,6 +192,7 @@ class TestWebHCatServer(RMFTestCase):
kinit_path_local = '/usr/bin/kinit',
mode = 0755,
owner = 'hcat',
+ bin_dir = '/usr/bin',
action = ['create_delayed'],
)
self.assertResourceCalled('HdfsDirectory', '/user/hcat',
@@ -196,6 +203,7 @@ class TestWebHCatServer(RMFTestCase):
kinit_path_local = '/usr/bin/kinit',
mode = 0755,
owner = 'hcat',
+ bin_dir = '/usr/bin',
action = ['create_delayed'],
)
self.assertResourceCalled('HdfsDirectory', None,
@@ -204,6 +212,7 @@ class TestWebHCatServer(RMFTestCase):
conf_dir = '/etc/hadoop/conf',
hdfs_user = 'hdfs',
kinit_path_local = '/usr/bin/kinit',
+ bin_dir = '/usr/bin',
action = ['create'],
)
self.assertResourceCalled('Directory', '/var/run/webhcat',
@@ -243,6 +252,7 @@ class TestWebHCatServer(RMFTestCase):
mode=0755,
dest_dir='/apps/webhcat',
kinnit_if_needed='/usr/bin/kinit -kt /etc/security/keytabs/hdfs.headless.keytab hdfs;',
+ hadoop_conf_dir='/etc/hadoop/conf',
hdfs_user='hdfs'
)
self.assertResourceCalled('CopyFromLocal', '/usr/share/HDP-webhcat/pig.tar.gz',
@@ -250,6 +260,7 @@ class TestWebHCatServer(RMFTestCase):
mode=0755,
dest_dir='/apps/webhcat',
kinnit_if_needed='/usr/bin/kinit -kt /etc/security/keytabs/hdfs.headless.keytab hdfs;',
+ hadoop_conf_dir='/etc/hadoop/conf',
hdfs_user='hdfs'
)
self.assertResourceCalled('CopyFromLocal', '/usr/share/HDP-webhcat/hive.tar.gz',
@@ -257,5 +268,6 @@ class TestWebHCatServer(RMFTestCase):
mode=0755,
dest_dir='/apps/webhcat',
kinnit_if_needed='/usr/bin/kinit -kt /etc/security/keytabs/hdfs.headless.keytab hdfs;',
+ hadoop_conf_dir='/etc/hadoop/conf',
hdfs_user='hdfs'
)
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
index 155e07d..738ffc1 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_historyserver.py
@@ -124,6 +124,7 @@ class TestHistoryServer(RMFTestCase):
group = 'hadoop',
action = ['create_delayed'],
mode = 0777,
+ bin_dir = '/usr/bin'
)
self.assertResourceCalled('HdfsDirectory', '/mapred',
security_enabled = False,
@@ -132,6 +133,7 @@ class TestHistoryServer(RMFTestCase):
hdfs_user = 'hdfs',
kinit_path_local = "/usr/bin/kinit",
owner = 'mapred',
+ bin_dir = '/usr/bin',
action = ['create_delayed'],
)
self.assertResourceCalled('HdfsDirectory', '/mapred/system',
@@ -141,6 +143,7 @@ class TestHistoryServer(RMFTestCase):
hdfs_user = 'hdfs',
kinit_path_local = "/usr/bin/kinit",
owner = 'hdfs',
+ bin_dir = '/usr/bin',
action = ['create_delayed'],
)
self.assertResourceCalled('HdfsDirectory', '/mr-history/tmp',
@@ -152,6 +155,7 @@ class TestHistoryServer(RMFTestCase):
mode = 0777,
owner = 'mapred',
group = 'hadoop',
+ bin_dir = '/usr/bin',
action = ['create_delayed'],
)
self.assertResourceCalled('HdfsDirectory', '/mr-history/done',
@@ -163,6 +167,7 @@ class TestHistoryServer(RMFTestCase):
mode = 01777,
owner = 'mapred',
group = 'hadoop',
+ bin_dir = '/usr/bin',
action = ['create_delayed'],
)
self.assertResourceCalled('HdfsDirectory', None,
@@ -171,6 +176,7 @@ class TestHistoryServer(RMFTestCase):
conf_dir = '/etc/hadoop/conf',
hdfs_user = 'hdfs',
kinit_path_local = "/usr/bin/kinit",
+ bin_dir = '/usr/bin',
action = ['create'],
)
self.assertResourceCalled('Directory', '/var/run/hadoop-yarn/yarn',
@@ -314,6 +320,7 @@ class TestHistoryServer(RMFTestCase):
owner = 'yarn',
group = 'hadoop',
action = ['create_delayed'],
+ bin_dir = '/usr/bin',
mode = 0777,
)
self.assertResourceCalled('HdfsDirectory', '/mapred',
@@ -323,6 +330,7 @@ class TestHistoryServer(RMFTestCase):
hdfs_user = 'hdfs',
kinit_path_local = '/usr/bin/kinit',
owner = 'mapred',
+ bin_dir = '/usr/bin',
action = ['create_delayed'],
)
self.assertResourceCalled('HdfsDirectory', '/mapred/system',
@@ -332,6 +340,7 @@ class TestHistoryServer(RMFTestCase):
hdfs_user = 'hdfs',
kinit_path_local = '/usr/bin/kinit',
owner = 'hdfs',
+ bin_dir = '/usr/bin',
action = ['create_delayed'],
)
self.assertResourceCalled('HdfsDirectory', '/mr-history/tmp',
@@ -343,6 +352,7 @@ class TestHistoryServer(RMFTestCase):
mode = 0777,
owner = 'mapred',
group = 'hadoop',
+ bin_dir = '/usr/bin',
action = ['create_delayed'],
)
self.assertResourceCalled('HdfsDirectory', '/mr-history/done',
@@ -354,6 +364,7 @@ class TestHistoryServer(RMFTestCase):
mode = 01777,
owner = 'mapred',
group = 'hadoop',
+ bin_dir = '/usr/bin',
action = ['create_delayed'],
)
self.assertResourceCalled('HdfsDirectory', None,
@@ -362,6 +373,7 @@ class TestHistoryServer(RMFTestCase):
conf_dir = '/etc/hadoop/conf',
hdfs_user = 'hdfs',
kinit_path_local = '/usr/bin/kinit',
+ bin_dir = '/usr/bin',
action = ['create'],
)
self.assertResourceCalled('Directory', '/var/run/hadoop-yarn/yarn',
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py
index 4723b0f..5f15d91 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_nodemanager.py
@@ -120,6 +120,7 @@ class TestNodeManager(RMFTestCase):
owner = 'yarn',
group = 'hadoop',
action = ['create_delayed'],
+ bin_dir = '/usr/bin',
mode = 0777,
)
self.assertResourceCalled('HdfsDirectory', '/mapred',
@@ -129,6 +130,7 @@ class TestNodeManager(RMFTestCase):
hdfs_user = 'hdfs',
kinit_path_local = "/usr/bin/kinit",
owner = 'mapred',
+ bin_dir = '/usr/bin',
action = ['create_delayed'],
)
self.assertResourceCalled('HdfsDirectory', '/mapred/system',
@@ -138,6 +140,7 @@ class TestNodeManager(RMFTestCase):
hdfs_user = 'hdfs',
kinit_path_local = "/usr/bin/kinit",
owner = 'hdfs',
+ bin_dir = '/usr/bin',
action = ['create_delayed'],
)
self.assertResourceCalled('HdfsDirectory', '/mr-history/tmp',
@@ -149,6 +152,7 @@ class TestNodeManager(RMFTestCase):
mode = 0777,
owner = 'mapred',
group = 'hadoop',
+ bin_dir = '/usr/bin',
action = ['create_delayed'],
)
self.assertResourceCalled('HdfsDirectory', '/mr-history/done',
@@ -160,6 +164,7 @@ class TestNodeManager(RMFTestCase):
mode = 01777,
owner = 'mapred',
group = 'hadoop',
+ bin_dir = '/usr/bin',
action = ['create_delayed'],
)
self.assertResourceCalled('HdfsDirectory', None,
@@ -168,6 +173,7 @@ class TestNodeManager(RMFTestCase):
conf_dir = '/etc/hadoop/conf',
hdfs_user = 'hdfs',
kinit_path_local = "/usr/bin/kinit",
+ bin_dir = '/usr/bin',
action = ['create'],
)
self.assertResourceCalled('Directory', '/var/run/hadoop-yarn/yarn',
@@ -311,6 +317,7 @@ class TestNodeManager(RMFTestCase):
owner = 'yarn',
group = 'hadoop',
action = ['create_delayed'],
+ bin_dir = '/usr/bin',
mode = 0777,
)
self.assertResourceCalled('HdfsDirectory', '/mapred',
@@ -320,6 +327,7 @@ class TestNodeManager(RMFTestCase):
hdfs_user = 'hdfs',
kinit_path_local = '/usr/bin/kinit',
owner = 'mapred',
+ bin_dir = '/usr/bin',
action = ['create_delayed'],
)
self.assertResourceCalled('HdfsDirectory', '/mapred/system',
@@ -329,6 +337,7 @@ class TestNodeManager(RMFTestCase):
hdfs_user = 'hdfs',
kinit_path_local = '/usr/bin/kinit',
owner = 'hdfs',
+ bin_dir = '/usr/bin',
action = ['create_delayed'],
)
self.assertResourceCalled('HdfsDirectory', '/mr-history/tmp',
@@ -338,6 +347,7 @@ class TestNodeManager(RMFTestCase):
hdfs_user = 'hdfs',
kinit_path_local = '/usr/bin/kinit',
mode = 0777,
+ bin_dir = '/usr/bin',
owner = 'mapred',
group = 'hadoop',
action = ['create_delayed'],
@@ -349,6 +359,7 @@ class TestNodeManager(RMFTestCase):
hdfs_user = 'hdfs',
kinit_path_local = '/usr/bin/kinit',
mode = 01777,
+ bin_dir = '/usr/bin',
owner = 'mapred',
group = 'hadoop',
action = ['create_delayed'],
@@ -357,6 +368,7 @@ class TestNodeManager(RMFTestCase):
security_enabled = True,
keytab = '/etc/security/keytabs/hdfs.headless.keytab',
conf_dir = '/etc/hadoop/conf',
+ bin_dir = '/usr/bin',
hdfs_user = 'hdfs',
kinit_path_local = '/usr/bin/kinit',
action = ['create'],
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_service_check.py b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_service_check.py
index 7c4c01a..65ea0a5 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_service_check.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_service_check.py
@@ -17,6 +17,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
+import os
from mock.mock import MagicMock, call, patch
from stacks.utils.RMFTestCase import *
@@ -41,8 +42,9 @@ class TestServiceCheck(RMFTestCase):
user = 'ambari-qa',
try_sleep = 5,
)
- self.assertResourceCalled('Execute', '/usr/bin/yarn node -list',
- user = 'ambari-qa',
+ self.assertResourceCalled('Execute', 'yarn --config /etc/hadoop/conf node -list',
+ environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/bin"},
+ user = 'ambari-qa',
)
self.assertNoMoreResources()
@@ -63,7 +65,8 @@ class TestServiceCheck(RMFTestCase):
user = 'ambari-qa',
try_sleep = 5,
)
- self.assertResourceCalled('Execute', '/usr/bin/yarn node -list',
+ self.assertResourceCalled('Execute', 'yarn --config /etc/hadoop/conf node -list',
+ environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/bin"},
user = 'ambari-qa',
)
self.assertNoMoreResources()
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py b/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py
index 93ea2d1..2282dcc 100644
--- a/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py
+++ b/ambari-server/src/test/python/stacks/2.1/FALCON/test_falcon_server.py
@@ -94,6 +94,7 @@ class TestFalconServer(RMFTestCase):
kinit_path_local = '/usr/bin/kinit',
mode = 0777,
owner = 'falcon',
+ bin_dir = '/usr/bin',
action = ['create_delayed'],
)
self.assertResourceCalled('HdfsDirectory', None,
@@ -102,6 +103,7 @@ class TestFalconServer(RMFTestCase):
conf_dir = '/etc/hadoop/conf',
hdfs_user = 'hdfs',
kinit_path_local = '/usr/bin/kinit',
+ bin_dir = '/usr/bin',
action = ['create'],
)
self.assertResourceCalled('Directory', '/hadoop/falcon',
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py b/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py
index 77909a7..47423ff 100644
--- a/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py
+++ b/ambari-server/src/test/python/stacks/2.1/HIVE/test_hive_metastore.py
@@ -17,6 +17,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
+import os
from mock.mock import MagicMock, call, patch
from stacks.utils.RMFTestCase import *
@@ -40,6 +41,8 @@ class TestHiveMetastore(RMFTestCase):
self.assert_configure_default()
self.assertResourceCalled('Execute', 'env HADOOP_HOME=/usr JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/start_metastore_script /var/log/hive/hive.out /var/log/hive/hive.log /var/run/hive/hive.pid /etc/hive/conf.server /var/log/hive',
not_if = 'ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps `cat /var/run/hive/hive.pid` >/dev/null 2>&1',
+ environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/lib/hive/bin",
+ 'HADOOP_HOME' : '/usr'},
user = 'hive'
)
@@ -82,6 +85,8 @@ class TestHiveMetastore(RMFTestCase):
self.assert_configure_secured()
self.assertResourceCalled('Execute', 'env HADOOP_HOME=/usr JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/start_metastore_script /var/log/hive/hive.out /var/log/hive/hive.log /var/run/hive/hive.pid /etc/hive/conf.server /var/log/hive',
not_if = 'ls /var/run/hive/hive.pid >/dev/null 2>&1 && ps `cat /var/run/hive/hive.pid` >/dev/null 2>&1',
+ environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/lib/hive/bin",
+ 'HADOOP_HOME' : '/usr'},
user = 'hive'
)
@@ -175,6 +180,7 @@ class TestHiveMetastore(RMFTestCase):
self.assertResourceCalled('Execute', 'hive mkdir -p /tmp/AMBARI-artifacts/ ; cp /usr/share/java/mysql-connector-java.jar /usr/lib/hive/lib//mysql-connector-java.jar',
creates = '/usr/lib/hive/lib//mysql-connector-java.jar',
path = ['/bin', '/usr/bin/'],
+ environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/lib/hive/bin"},
not_if = 'test -f /usr/lib/hive/lib//mysql-connector-java.jar',
)
self.assertResourceCalled('Execute', '/bin/sh -c \'cd /usr/lib/ambari-agent/ && curl -kf -x "" --retry 5 http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar -o DBConnectionVerification.jar\'',
@@ -279,6 +285,7 @@ class TestHiveMetastore(RMFTestCase):
self.assertResourceCalled('Execute', 'hive mkdir -p /tmp/AMBARI-artifacts/ ; cp /usr/share/java/mysql-connector-java.jar /usr/lib/hive/lib//mysql-connector-java.jar',
creates = '/usr/lib/hive/lib//mysql-connector-java.jar',
path = ['/bin', '/usr/bin/'],
+ environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/lib/hive/bin"},
not_if = 'test -f /usr/lib/hive/lib//mysql-connector-java.jar',
)
self.assertResourceCalled('Execute', '/bin/sh -c \'cd /usr/lib/ambari-agent/ && curl -kf -x "" --retry 5 http://c6401.ambari.apache.org:8080/resources/DBConnectionVerification.jar -o DBConnectionVerification.jar\'',
[2/3] AMBARI-7257 Use Versioned RPMS for HDP 2.2 stack and make it
plugabable to be able to reuse the scripts for HDP 2.* (dsen)
Posted by ds...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/yarn.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/yarn.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/yarn.py
index 6aff622..83e40c6 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/yarn.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/yarn.py
@@ -78,7 +78,7 @@ def yarn(name = None):
)
XmlConfig("core-site.xml",
- conf_dir=params.config_dir,
+ conf_dir=params.hadoop_conf_dir,
configurations=params.config['configurations']['core-site'],
configuration_attributes=params.config['configuration_attributes']['core-site'],
owner=params.hdfs_user,
@@ -87,7 +87,7 @@ def yarn(name = None):
)
XmlConfig("mapred-site.xml",
- conf_dir=params.config_dir,
+ conf_dir=params.hadoop_conf_dir,
configurations=params.config['configurations']['mapred-site'],
configuration_attributes=params.config['configuration_attributes']['mapred-site'],
owner=params.yarn_user,
@@ -96,7 +96,7 @@ def yarn(name = None):
)
XmlConfig("yarn-site.xml",
- conf_dir=params.config_dir,
+ conf_dir=params.hadoop_conf_dir,
configurations=params.config['configurations']['yarn-site'],
configuration_attributes=params.config['configuration_attributes']['yarn-site'],
owner=params.yarn_user,
@@ -105,7 +105,7 @@ def yarn(name = None):
)
XmlConfig("capacity-scheduler.xml",
- conf_dir=params.config_dir,
+ conf_dir=params.hadoop_conf_dir,
configurations=params.config['configurations']['capacity-scheduler'],
configuration_attributes=params.config['configuration_attributes']['capacity-scheduler'],
owner=params.yarn_user,
@@ -140,7 +140,7 @@ def yarn(name = None):
content=Template('mapreduce.conf.j2')
)
- File(format("{config_dir}/yarn-env.sh"),
+ File(format("{hadoop_conf_dir}/yarn-env.sh"),
owner=params.yarn_user,
group=params.user_group,
mode=0755,
@@ -154,7 +154,7 @@ def yarn(name = None):
mode=06050
)
- File(format("{config_dir}/container-executor.cfg"),
+ File(format("{hadoop_conf_dir}/container-executor.cfg"),
group=params.user_group,
mode=0644,
content=Template('container-executor.cfg.j2')
@@ -168,7 +168,7 @@ def yarn(name = None):
tc_mode = None
tc_owner = params.hdfs_user
- File(format("{config_dir}/mapred-env.sh"),
+ File(format("{hadoop_conf_dir}/mapred-env.sh"),
owner=tc_owner,
content=InlineTemplate(params.mapred_env_sh_template)
)
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/params.py
index 2dc3792..6016b99 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/package/scripts/params.py
@@ -26,15 +26,24 @@ import status_params
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
-config_dir = "/etc/zookeeper/conf"
+#RPM versioning support
+rpm_version = default("/configurations/hadoop-env/rpm_version", None)
+
+#hadoop params
+if rpm_version is not None:
+ config_dir = format('/usr/hdp/{rpm_version}/etc/zookeeper/conf')
+ zk_bin = format('/usr/hdp/{rpm_version}/zookeeper/bin')
+ smoke_script = format('/usr/hdp/{rpm_version}/zookeeper/bin/zkCli.sh')
+else:
+ config_dir = "/etc/zookeeper/conf"
+ zk_bin = '/usr/lib/zookeeper/bin'
+ smoke_script = "/usr/lib/zookeeper/bin/zkCli.sh"
+
zk_user = config['configurations']['zookeeper-env']['zk_user']
hostname = config['hostname']
-zk_bin = '/usr/lib/zookeeper/bin'
user_group = config['configurations']['cluster-env']['user_group']
zk_env_sh_template = config['configurations']['zookeeper-env']['content']
-smoke_script = "/usr/lib/zookeeper/bin/zkCli.sh"
-
zk_log_dir = config['configurations']['zookeeper-env']['zk_log_dir']
zk_data_dir = config['configurations']['zookeeper-env']['zk_data_dir']
zk_pid_dir = status_params.zk_pid_dir
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.1/services/FALCON/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1/services/FALCON/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1/services/FALCON/package/scripts/params.py
index 7a61c8a..79bdef3 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1/services/FALCON/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1/services/FALCON/package/scripts/params.py
@@ -23,6 +23,17 @@ from status_params import *
config = Script.get_config()
+#RPM versioning support
+rpm_version = default("/configurations/hadoop-env/rpm_version", None)
+
+#hadoop params
+if rpm_version is not None:
+ hadoop_conf_dir = format("/usr/hdp/{rpm_version}/etc/hadoop/conf")
+ hadoop_bin_dir = format("/usr/hdp/{rpm_version}/hadoop/bin")
+else:
+ hadoop_conf_dir = "/etc/hadoop/conf"
+ hadoop_bin_dir = "/usr/bin"
+
oozie_user = config['configurations']['oozie-env']['oozie_user']
falcon_user = config['configurations']['falcon-env']['falcon_user']
smoke_user = config['configurations']['cluster-env']['smokeuser']
@@ -53,7 +64,6 @@ flacon_apps_dir = '/apps/falcon'
#for create_hdfs_directory
security_enabled = config['configurations']['cluster-env']['security_enabled']
hostname = config["hostname"]
-hadoop_conf_dir = "/etc/hadoop/conf"
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
@@ -67,5 +77,6 @@ HdfsDirectory = functools.partial(
hdfs_user=hdfs_user,
security_enabled = security_enabled,
keytab = hdfs_user_keytab,
- kinit_path_local = kinit_path_local
+ kinit_path_local = kinit_path_local,
+ bin_dir = hadoop_bin_dir
)
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.1/services/STORM/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1/services/STORM/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1/services/STORM/package/scripts/params.py
index 19668c7..7115de4 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.1/services/STORM/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1/services/STORM/package/scripts/params.py
@@ -37,8 +37,7 @@ nimbus_host = config['configurations']['storm-site']['nimbus.host']
rest_api_port = "8745"
rest_api_admin_port = "8746"
rest_api_conf_file = format("{conf_dir}/config.yaml")
-rest_lib_dir = "/usr/lib/storm/contrib/storm-rest"
-java_home = config['hostLevelParams']['java_home']
+rest_lib_dir = default("/configurations/storm-env/rest_lib_dir","/usr/lib/storm/contrib/storm-rest")
storm_env_sh_template = config['configurations']['storm-env']['content']
if 'ganglia_server_host' in config['clusterHostInfo'] and \
@@ -48,7 +47,7 @@ if 'ganglia_server_host' in config['clusterHostInfo'] and \
ganglia_report_interval = 60
else:
ganglia_installed = False
-
+
security_enabled = config['configurations']['cluster-env']['security_enabled']
if security_enabled:
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/metainfo.xml
new file mode 100644
index 0000000..0be6cb6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/metainfo.xml
@@ -0,0 +1,23 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <versions>
+ <active>true</active>
+ </versions>
+ <extends>2.1</extends>
+</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/repos/repoinfo.xml
new file mode 100644
index 0000000..c99f92a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/repos/repoinfo.xml
@@ -0,0 +1,82 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<reposinfo>
+ <os type="redhat6">
+ <repo>
+ <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.9.9.9-98</baseurl>
+ <repoid>HDP-2.9.9.9-98</repoid>
+ <reponame>HDP</reponame>
+ </repo>
+ <repo>
+ <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.2.0.0</baseurl>
+ <repoid>HDP-2.2.0.0</repoid>
+ <reponame>HDP-2.2</reponame>
+ </repo>
+ <repo>
+ <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.17/repos/centos6</baseurl>
+ <repoid>HDP-UTILS-1.1.0.17</repoid>
+ <reponame>HDP-UTILS</reponame>
+ </repo>
+ </os>
+ <os type="redhat5">
+ <repo>
+ <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos5/2.x/BUILDS/2.9.9.9-98</baseurl>
+ <repoid>HDP-2.9.9.9-98</repoid>
+ <reponame>HDP</reponame>
+ </repo>
+ <repo>
+ <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos5/2.x/updates/2.2.0.0</baseurl>
+ <repoid>HDP-2.2.0.0</repoid>
+ <reponame>HDP-2.2</reponame>
+ </repo>
+ <repo>
+ <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.17/repos/centos5</baseurl>
+ <repoid>HDP-UTILS-1.1.0.17</repoid>
+ <reponame>HDP-UTILS</reponame>
+ </repo>
+ </os>
+ <os type="suse11">
+ <repo>
+ <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/suse11/2.x/BUILDS/2.9.9.9-98</baseurl>
+ <repoid>HDP-2.9.9.9-98</repoid>
+ <reponame>HDP</reponame>
+ </repo>
+ <repo>
+ <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/suse11/2.x/updates/2.2.0.0</baseurl>
+ <repoid>HDP-2.2.0.0</repoid>
+ <reponame>HDP-2.2</reponame>
+ </repo>
+ <repo>
+ <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.17/repos/suse11</baseurl>
+ <repoid>HDP-UTILS-1.1.0.17</repoid>
+ <reponame>HDP-UTILS</reponame>
+ </repo>
+ </os>
+ <os type="debian12">
+ <repo>
+ <baseurl>REPLACE_WITH_UBUNTU12_URL</baseurl>
+ <repoid>HDP-2.1</repoid>
+ <reponame>HDP</reponame>
+ </repo>
+ <repo>
+ <baseurl>http://dev.hortonworks.com.s3.amazonaws.com/HDP-UTILS-1.1.0.19/repos/ubuntu12</baseurl>
+ <repoid>HDP-UTILS-1.1.0.19</repoid>
+ <reponame>HDP-UTILS</reponame>
+ </repo>
+ </os>
+</reposinfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/role_command_order.json b/ambari-server/src/main/resources/stacks/HDP/2.2/role_command_order.json
new file mode 100644
index 0000000..a6f3e07
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/role_command_order.json
@@ -0,0 +1,88 @@
+{
+ "_comment" : "Record format:",
+ "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
+ "general_deps" : {
+ "_comment" : "dependencies for all cases",
+ "NAGIOS_SERVER-INSTALL" : ["HIVE_CLIENT-INSTALL", "HCAT-INSTALL",
+ "MAPREDUCE_CLIENT-INSTALL", "OOZIE_CLIENT-INSTALL"],
+ "NIMBUS-START" : ["ZOOKEEPER_SERVER-START", "NODEMANAGER-START", "RESOURCEMANAGER-START"],
+ "SUPERVISOR-START" : ["NIMBUS-START"],
+ "STORM_UI_SERVER-START" : ["NIMBUS-START"],
+ "DRPC_SERVER-START" : ["NIMBUS-START"],
+ "STORM_REST_API-START" : ["NIMBUS-START", "STORM_UI_SERVER-START", "SUPERVISOR-START", "DRPC_SERVER-START"],
+ "HBASE_MASTER-START": ["ZOOKEEPER_SERVER-START"],
+ "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
+ "OOZIE_SERVER-START": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
+ "WEBHCAT_SERVER-START": ["NODEMANAGER-START", "HIVE_SERVER-START"],
+ "HIVE_METASTORE-START": ["MYSQL_SERVER-START"],
+ "HIVE_SERVER-START": ["NODEMANAGER-START", "MYSQL_SERVER-START"],
+ "HUE_SERVER-START": ["HIVE_SERVER-START", "HCAT-START", "OOZIE_SERVER-START"],
+ "FLUME_HANDLER-START": ["OOZIE_SERVER-START"],
+ "FALCON_SERVER-START": ["NAMENODE-START", "DATANODE-START", "OOZIE_SERVER-START"],
+ "NAGIOS_SERVER-START": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START",
+ "GANGLIA_SERVER-START", "GANGLIA_MONITOR-START", "HCAT-START",
+ "HIVE_SERVER-START", "HIVE_METASTORE-START", "HUE_SERVER-START",
+ "NODEMANAGER-START", "RESOURCEMANAGER-START", "ZOOKEEPER_SERVER-START",
+ "MYSQL_SERVER-START", "OOZIE_SERVER-START", "PIG-START", "SQOOP-START",
+ "WEBHCAT_SERVER-START", "FLUME_HANDLER-START"],
+ "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
+ "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START"],
+ "WEBHCAT_SERVICE_CHECK-SERVICE_CHECK": ["WEBHCAT_SERVER-START"],
+ "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
+ "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", "HIVE_METASTORE-START"],
+ "HCAT_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START"],
+ "PIG_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
+ "SQOOP_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
+ "ZOOKEEPER_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],
+ "ZOOKEEPER_QUORUM_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],
+ "STORM_SERVICE_CHECK-SERVICE_CHECK": ["NIMBUS-START", "SUPERVISOR-START", "STORM_UI_SERVER-START",
+ "DRPC_SERVER-START"],
+ "FLUME_SERVICE_CHECK-SERVICE_CHECK": ["FLUME_HANDLER-START"],
+ "FALCON_SERVICE_CHECK-SERVICE_CHECK": ["FALCON_SERVER-START"],
+ "ZOOKEEPER_SERVER-STOP" : ["HBASE_MASTER-STOP", "HBASE_REGIONSERVER-STOP"],
+ "HBASE_MASTER-STOP": ["HBASE_REGIONSERVER-STOP"],
+ "NIMBUS-STOP" : ["SUPERVISOR-STOP", "STORM_UI_SERVER-STOP", "DRPC_SERVER-STOP"]
+ },
+ "_comment" : "GLUSTERFS-specific dependencies",
+ "optional_glusterfs": {
+ "HBASE_MASTER-START": ["PEERSTATUS-START"],
+ "GLUSTERFS_SERVICE_CHECK-SERVICE_CHECK": ["PEERSTATUS-START"]
+ },
+ "_comment" : "Dependencies that are used when GLUSTERFS is not present in cluster",
+ "optional_no_glusterfs": {
+ "SECONDARY_NAMENODE-START": ["NAMENODE-START"],
+ "RESOURCEMANAGER-START": ["NAMENODE-START", "DATANODE-START"],
+ "NODEMANAGER-START": ["NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START"],
+ "HISTORYSERVER-START": ["NAMENODE-START", "DATANODE-START"],
+ "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START"],
+ "APP_TIMELINE_SERVER-START": ["NAMENODE-START", "DATANODE-START"],
+ "FALCON_SERVER-START": ["NAMENODE-START", "DATANODE-START"],
+ "FALCON_SERVICE_CHECK-SERVICE_CHECK": ["FALCON_SERVER-START"],
+ "HIVE_SERVER-START": ["DATANODE-START"],
+ "WEBHCAT_SERVER-START": ["DATANODE-START"],
+ "NAGIOS_SERVER-START": ["NAMENODE-START", "SECONDARY_NAMENODE-START",
+ "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START", "HISTORYSERVER-START"],
+ "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START",
+ "SECONDARY_NAMENODE-START"],
+ "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START",
+ "RESOURCEMANAGER-START", "HISTORYSERVER-START", "YARN_SERVICE_CHECK-SERVICE_CHECK"],
+ "YARN_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
+ "RESOURCEMANAGER_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START"],
+ "PIG_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START", "NODEMANAGER-START"],
+ "NAMENODE-STOP": ["RESOURCEMANAGER-STOP", "NODEMANAGER-STOP",
+ "HISTORYSERVER-STOP", "HBASE_MASTER-STOP", "FALCON_SERVER-STOP"],
+ "DATANODE-STOP": ["RESOURCEMANAGER-STOP", "NODEMANAGER-STOP",
+ "HISTORYSERVER-STOP", "HBASE_MASTER-STOP", "FALCON_SERVER-STOP"]
+ },
+ "_comment" : "Dependencies that are used in HA NameNode cluster",
+ "namenode_optional_ha": {
+ "NAMENODE-START": ["ZKFC-START", "JOURNALNODE-START", "ZOOKEEPER_SERVER-START"],
+ "ZKFC-START": ["ZOOKEEPER_SERVER-START"],
+ "NAGIOS_SERVER-START": ["ZKFC-START", "JOURNALNODE-START"]
+ },
+ "_comment" : "Dependencies that are used in ResourceManager HA cluster",
+ "resourcemanager_optional_ha" : {
+ "RESOURCEMANAGER-START": ["ZOOKEEPER_SERVER-START"]
+ }
+}
+
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/FALCON/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/FALCON/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/FALCON/metainfo.xml
new file mode 100644
index 0000000..4a46139
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/FALCON/metainfo.xml
@@ -0,0 +1,28 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>FALCON</name>
+ <displayName>Falcon</displayName>
+ <comment>Data management and processing platform</comment>
+ <version>0.6.0.2.2.0.0</version>
+ </service>
+ </services>
+</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/FLUME/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/FLUME/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/FLUME/metainfo.xml
new file mode 100644
index 0000000..6b702c8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/FLUME/metainfo.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>FLUME</name>
+ <displayName>Flume</displayName>
+ <comment>Data management and processing platform</comment>
+ <version>1.5.0.1.2.9.9.9</version>
+
+ <osSpecifics>
+ <osSpecific>
+ <osFamily>any</osFamily>
+ <packages>
+ <package>
+ <name>flume_2_9_9_9_98</name>
+ </package>
+ </packages>
+ </osSpecific>
+ </osSpecifics>
+
+ </service>
+ </services>
+</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/metainfo.xml
new file mode 100644
index 0000000..52cd10d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/metainfo.xml
@@ -0,0 +1,42 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>HBASE</name>
+ <displayName>HBase</displayName>
+ <comment>Non-relational distributed database and centralized service for configuration management &
+ synchronization
+ </comment>
+ <version>0.98.4.2.9.9.9</version>
+
+ <osSpecifics>
+ <osSpecific>
+ <osFamily>any</osFamily>
+ <packages>
+ <package>
+ <name>hbase_2_9_9_9_98</name>
+ </package>
+ </packages>
+ </osSpecific>
+ </osSpecifics>
+
+ </service>
+ </services>
+</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hadoop-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hadoop-env.xml
new file mode 100644
index 0000000..3213506
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hadoop-env.xml
@@ -0,0 +1,29 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+ <property>
+ <name>rpm_version</name>
+ <value>2.9.9.9-98</value>
+ <description>Hadoop RPM version</description>
+ </property>
+</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hdfs-site.xml
new file mode 100644
index 0000000..4f46cb7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hdfs-site.xml
@@ -0,0 +1,34 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration supports_final="true">
+
+ <property>
+ <name>dfs.hosts.exclude</name>
+ <value>/usr/hdp/2.9.9.9-98/etc/hadoop/conf/dfs.exclude</value>
+ <description>Names a file that contains a list of hosts that are
+ not permitted to connect to the namenode. The full pathname of the
+ file must be specified. If the value is empty, no hosts are
+ excluded.</description>
+ </property>
+
+</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/metainfo.xml
new file mode 100644
index 0000000..b520a34
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/metainfo.xml
@@ -0,0 +1,68 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>HDFS</name>
+ <displayName>HDFS</displayName>
+ <comment>Apache Hadoop Distributed File System</comment>
+ <version>2.6.0.2.9.9.9</version>
+
+ <osSpecifics>
+ <osSpecific>
+ <osFamily>any</osFamily>
+ <packages>
+ <package>
+ <name>hadoop_2_9_9_9_98</name>
+ </package>
+ <package>
+ <name>hadoop-lzo</name>
+ </package>
+ </packages>
+ </osSpecific>
+
+ <osSpecific>
+ <osFamily>redhat5,redhat6,suse11</osFamily>
+ <packages>
+ <package>
+ <name>snappy</name>
+ </package>
+ <package>
+ <name>snappy-devel</name>
+ </package>
+ <package>
+ <name>lzo</name>
+ </package>
+ <package>
+ <name>hadoop-lzo-native</name>
+ </package>
+ <package>
+ <name>hadoop_2_9_9_9_98-libhdfs</name>
+ </package>
+ <package>
+ <name>ambari-log4j</name>
+ </package>
+ </packages>
+ </osSpecific>
+
+ </osSpecifics>
+
+ </service>
+ </services>
+</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/metainfo.xml
new file mode 100644
index 0000000..28567a7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/metainfo.xml
@@ -0,0 +1,44 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>HIVE</name>
+ <comment>Data warehouse system for ad-hoc queries & analysis of large datasets and table & storage management service</comment>
+ <version>0.14.0.2.9.9.9</version>
+ </service>
+
+ <service>
+ <name>HCATALOG</name>
+ <comment>This is comment for HCATALOG service</comment>
+ <version>0.14.0.2.9.9.9</version>
+ <osSpecifics>
+ <osSpecific>
+ <osFamily>any</osFamily>
+ <packages>
+ <package>
+ <name>hive_2_9_9_9_98-hcatalog</name>
+ </package>
+ </packages>
+ </osSpecific>
+ </osSpecifics>
+ </service>
+
+ </services>
+</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/OOZIE/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/OOZIE/configuration/oozie-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/OOZIE/configuration/oozie-site.xml
new file mode 100644
index 0000000..d39f542
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/OOZIE/configuration/oozie-site.xml
@@ -0,0 +1,38 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements. See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership. The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<configuration supports_final="true">
+
+ <property>
+ <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
+ <value>*=/usr/hdp/2.9.9.9-98/etc/hadoop/conf</value>
+ <description>
+ Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
+ the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
+ used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
+ the relevant Hadoop *-site.xml files. If the path is relative is looked within
+ the Oozie configuration directory; though the path can be absolute (i.e. to point
+ to Hadoop client conf/ directories in the local filesystem.
+ </description>
+ </property>
+
+
+
+
+</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/OOZIE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/OOZIE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/OOZIE/metainfo.xml
new file mode 100644
index 0000000..5c77061
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/OOZIE/metainfo.xml
@@ -0,0 +1,28 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>OOZIE</name>
+ <comment>System for workflow coordination and execution of Apache Hadoop jobs. This also includes the installation of the optional Oozie Web Console which relies on and will install the <a target="_blank" href="http://www.sencha.com/legal/open-source-faq/">ExtJS</a> Library.
+ </comment>
+ <version>4.1.0.2.2.0.0</version>
+ </service>
+ </services>
+</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/PIG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/PIG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/PIG/metainfo.xml
new file mode 100644
index 0000000..335993f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/PIG/metainfo.xml
@@ -0,0 +1,41 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>PIG</name>
+ <displayName>Pig</displayName>
+ <comment>Scripting platform for analyzing large datasets</comment>
+ <version>0.14.0.2.9.9.9</version>
+
+ <osSpecifics>
+ <osSpecific>
+ <osFamily>any</osFamily>
+ <packages>
+ <package>
+ <name>pig_2_9_9_9_98</name>
+ </package>
+ </packages>
+ </osSpecific>
+ </osSpecifics>
+
+
+ </service>
+ </services>
+</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/SQOOP/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/SQOOP/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/SQOOP/metainfo.xml
new file mode 100644
index 0000000..f644d74
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/SQOOP/metainfo.xml
@@ -0,0 +1,29 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>SQOOP</name>
+ <comment>Tool for transferring bulk data between Apache Hadoop and
+ structured data stores such as relational databases
+ </comment>
+ <version>1.4.5.2.2</version>
+ </service>
+ </services>
+</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/STORM/configuration/storm-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/STORM/configuration/storm-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/STORM/configuration/storm-env.xml
new file mode 100644
index 0000000..6b2b550
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/STORM/configuration/storm-env.xml
@@ -0,0 +1,29 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+ <property>
+ <name>rest_lib_dir</name>
+ <value>/usr/lib/storm/external/storm-rest</value>
+ <description></description>
+ </property>
+</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/STORM/configuration/storm-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/STORM/configuration/storm-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/STORM/configuration/storm-site.xml
new file mode 100644
index 0000000..396af4a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/STORM/configuration/storm-site.xml
@@ -0,0 +1,54 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration supports_final="true">
+
+
+ <property>
+ <name>nimbus.childopts</name>
+ <value>-Xmx1024m -Djava.security.auth.login.config=/etc/storm/conf/storm_jaas.conf -javaagent:/usr/lib/storm/external/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8649,wireformat31x=true,mode=multicast,config=/usr/lib/storm/external/storm-jmxetric/conf/jmxetric-conf.xml,process=Nimbus_JVM</value>
+ <description>This parameter is used by the storm-deploy project to configure the jvm options for the nimbus daemon.</description>
+ </property>
+
+ <property>
+ <name>worker.childopts</name>
+ <value>-Xmx768m -javaagent:/usr/lib/storm/external/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/lib/storm/external/storm-jmxetric/conf/jmxetric-conf.xml,process=Worker_%ID%_JVM</value>
+ <description>The jvm opts provided to workers launched by this supervisor. All \"%ID%\" substrings are replaced with an identifier for this worker.</description>
+ </property>
+
+
+
+ <property>
+ <name>ui.childopts</name>
+ <value>-Xmx768m -Djava.security.auth.login.config=/etc/storm/conf/storm_jaas.conf</value>
+ <description>Childopts for Storm UI Java process.</description>
+ </property>
+
+ <property>
+ <name>supervisor.childopts</name>
+ <value>-Xmx256m -Djava.security.auth.login.config=/etc/storm/conf/storm_jaas.conf -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.port=56431 -javaagent:/usr/lib/storm/external/storm-jmxetric/lib/jmxetric-1.0.4.jar=host=localhost,port=8650,wireformat31x=true,mode=multicast,config=/usr/lib/storm/external/storm-jmxetric/conf/jmxetric-conf.xml,process=Supervisor_JVM</value>
+ <description>This parameter is used by the storm-deploy project to configure the jvm options for the supervisor daemon.</description>
+ </property>
+
+
+
+</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/STORM/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/STORM/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/STORM/metainfo.xml
new file mode 100644
index 0000000..c25718d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/STORM/metainfo.xml
@@ -0,0 +1,29 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>STORM</name>
+ <displayName>Storm</displayName>
+ <comment>Apache Hadoop Stream processing framework</comment>
+ <version>0.9.3.2.2.0.0</version>
+ </service>
+ </services>
+</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/TEZ/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/TEZ/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/TEZ/metainfo.xml
new file mode 100644
index 0000000..25f579a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/TEZ/metainfo.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>TEZ</name>
+ <displayName>Tez</displayName>
+ <comment>Tez is the next generation Hadoop Query Processing framework written on top of YARN.</comment>
+ <version>0.6.0.2.9.9.9</version>
+
+ <osSpecifics>
+ <osSpecific>
+ <osFamily>any</osFamily>
+ <packages>
+ <package>
+ <name>tez_2_9_9_9_98</name>
+ </package>
+ </packages>
+ </osSpecific>
+ </osSpecifics>
+
+ </service>
+ </services>
+</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/WEBHCAT/configuration/webhcat-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/WEBHCAT/configuration/webhcat-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/WEBHCAT/configuration/webhcat-site.xml
new file mode 100644
index 0000000..d14be36
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/WEBHCAT/configuration/webhcat-site.xml
@@ -0,0 +1,59 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+<!-- The default settings for Templeton. -->
+<!-- Edit templeton-site.xml to change settings for your local -->
+<!-- install. -->
+
+<configuration supports_final="true">
+
+ <property>
+ <name>templeton.hadoop.conf.dir</name>
+ <value>/usr/hdp/2.9.9.9-98/etc/hadoop/conf</value>
+ <description>The path to the Hadoop configuration.</description>
+ </property>
+
+ <property>
+ <name>templeton.jar</name>
+ <value>/usr/hdp/2.9.9.9-98/hcatalog/share/webhcat/svr/webhcat.jar</value>
+ <description>The path to the Templeton jar file.</description>
+ </property>
+
+ <property>
+ <name>templeton.libjars</name>
+ <value>/usr/hdp/2.9.9.9-98/zookeeper/zookeeper.jar</value>
+ <description>Jars to add the the classpath.</description>
+ </property>
+
+
+ <property>
+ <name>templeton.hadoop</name>
+ <value>/usr/hdp/2.9.9.9-98/hadoop/bin/hadoop</value>
+ <description>The path to the Hadoop executable.</description>
+ </property>
+
+
+ <property>
+ <name>templeton.hcat</name>
+ <value>/usr/hdp/2.9.9.9-98/hive/bin/hcat</value>
+ <description>The path to the hcatalog executable.</description>
+ </property>
+
+
+</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/WEBHCAT/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/WEBHCAT/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/WEBHCAT/metainfo.xml
new file mode 100644
index 0000000..a05f9e7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/WEBHCAT/metainfo.xml
@@ -0,0 +1,44 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>WEBHCAT</name>
+ <comment>This is comment for WEBHCAT service</comment>
+ <version>0.14.0.2.9.9.9</version>
+ <osSpecifics>
+ <osSpecific>
+ <osFamily>any</osFamily>
+ <packages>
+ <package>
+ <name>hive_2_9_9_9_98-webhcat</name>
+ </package>
+ <package>
+ <name>webhcat-tar-hive</name>
+ </package>
+ <package>
+ <name>webhcat-tar-pig</name>
+ </package>
+ </packages>
+ </osSpecific>
+ </osSpecifics>
+
+ </service>
+ </services>
+</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration-mapred/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration-mapred/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration-mapred/mapred-site.xml
new file mode 100644
index 0000000..a831936
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration-mapred/mapred-site.xml
@@ -0,0 +1,36 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
+
+ <property>
+ <name>mapreduce.admin.user.env</name>
+ <value>LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/hdp/2.9.9.9-98/hadoop/lib/native/Linux-amd64-64</value>
+ <description>
+ Additional execution environment entries for map and reduce task processes.
+ This is not an additive property. You must preserve the original value if
+ you want your map and reduce tasks to have access to native libraries (compression, etc)
+ </description>
+ </property>
+
+
+</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/yarn-site.xml
new file mode 100644
index 0000000..065f57e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/yarn-site.xml
@@ -0,0 +1,35 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration supports_final="true" xmlns:xi="http://www.w3.org/2001/XInclude">
+
+ <property>
+ <name>yarn.resourcemanager.nodes.exclude-path</name>
+ <value>/usr/hdp/2.9.9.9-98/etc/hadoop/conf/yarn.exclude</value>
+ <description>
+ Names a file that contains a list of hosts that are
+ not permitted to connect to the resource manager. The full pathname of the
+ file must be specified. If the value is empty, no hosts are
+ excluded.
+ </description>
+ </property>
+
+</configuration>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/metainfo.xml
new file mode 100644
index 0000000..7a30894
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/metainfo.xml
@@ -0,0 +1,71 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>YARN</name>
+ <displayName>YARN</displayName>
+ <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
+ <version>2.6.0.2.9.9.9</version>
+ <components>
+ <components>
+ <component>
+ <name>APP_TIMELINE_SERVER</name>
+ <cardinality>1</cardinality>
+ </component>
+ </components>
+ </components>
+
+ <osSpecifics>
+ <osSpecific>
+ <osFamily>any</osFamily>
+ <packages>
+ <package>
+ <name>hadoop_2_9_9_9_98-yarn</name>
+ </package>
+ <package>
+ <name>hadoop_2_9_9_9_98-mapreduce</name>
+ </package>
+ </packages>
+ </osSpecific>
+ </osSpecifics>
+ </service>
+
+ <service>
+ <name>MAPREDUCE2</name>
+ <displayName>MapReduce2</displayName>
+ <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
+ <version>2.6.0.2.9.9.9</version>
+ <osSpecifics>
+ <osSpecific>
+ <osFamily>any</osFamily>
+ <packages>
+ <package>
+ <name>hadoop_2_9_9_9_98-mapreduce</name>
+ </package>
+ </packages>
+ </osSpecific>
+ </osSpecifics>
+ <configuration-dir>configuration-mapred</configuration-dir>
+
+ </service>
+
+ </services>
+</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.2/services/ZOOKEEPER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.2/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.2/services/ZOOKEEPER/metainfo.xml
new file mode 100644
index 0000000..525faef
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.2/services/ZOOKEEPER/metainfo.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0"?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements. See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<metainfo>
+ <schemaVersion>2.0</schemaVersion>
+ <services>
+ <service>
+ <name>ZOOKEEPER</name>
+ <displayName>ZooKeeper</displayName>
+ <comment>Centralized service which provides highly reliable distributed coordination</comment>
+ <version>3.4.5.2.9.9.9</version>
+
+ <osSpecifics>
+ <osSpecific>
+ <osFamily>any</osFamily>
+ <packages>
+ <package>
+ <name>zookeeper_2_9_9_9_98</name>
+ </package>
+ </packages>
+ </osSpecific>
+ </osSpecifics>
+
+ </service>
+ </services>
+</metainfo>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_service_check.py b/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_service_check.py
index 78cfde7..beed46a 100644
--- a/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_service_check.py
+++ b/ambari-server/src/test/python/stacks/1.3.2/HDFS/test_service_check.py
@@ -50,18 +50,29 @@ class TestServiceCheck(RMFTestCase):
tries = 20,
conf_dir = '/etc/hadoop/conf',
try_sleep = 3,
+ bin_dir = '/usr/bin',
user = 'ambari-qa',
)
- self.assertResourceCalled('ExecuteHadoop', 'fs -mkdir /tmp ; hadoop fs -chmod 777 /tmp',
+ self.assertResourceCalled('ExecuteHadoop', 'fs -mkdir /tmp',
conf_dir = '/etc/hadoop/conf',
+ bin_dir = '/usr/bin',
logoutput = True,
- not_if = 'hadoop fs -test -e /tmp',
+ not_if = 'hadoop --config /etc/hadoop/conf fs -test -e /tmp',
try_sleep = 3,
tries = 5,
user = 'ambari-qa',
)
- self.assertResourceCalled('ExecuteHadoop', 'fs -rm /tmp/; hadoop fs -put /etc/passwd /tmp/',
+ self.assertResourceCalled('ExecuteHadoop', 'fs -chmod 777 /tmp',
+ conf_dir = '/etc/hadoop/conf',
+ bin_dir = '/usr/bin',
+ logoutput = True,
+ try_sleep = 3,
+ tries = 5,
+ user = 'ambari-qa',
+ )
+ self.assertResourceCalled('ExecuteHadoop', 'fs -rm /tmp/; hadoop --config /etc/hadoop/conf fs -put /etc/passwd /tmp/',
logoutput = True,
+ bin_dir = '/usr/bin',
tries = 5,
conf_dir = '/etc/hadoop/conf',
try_sleep = 3,
@@ -70,6 +81,7 @@ class TestServiceCheck(RMFTestCase):
self.assertResourceCalled('ExecuteHadoop', 'fs -test -e /tmp/',
logoutput = True,
tries = 5,
+ bin_dir = '/usr/bin',
conf_dir = '/etc/hadoop/conf',
try_sleep = 3,
user = 'ambari-qa',
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
index c820120..a2261fb 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py
@@ -250,6 +250,7 @@ class TestHBaseMaster(RMFTestCase):
hdfs_user = 'hdfs',
kinit_path_local = "/usr/bin/kinit",
owner = 'hbase',
+ bin_dir = '/usr/bin',
action = ['create_delayed'],
)
self.assertResourceCalled('HdfsDirectory', '/apps/hbase/staging',
@@ -260,6 +261,7 @@ class TestHBaseMaster(RMFTestCase):
kinit_path_local = "/usr/bin/kinit",
mode = 0711,
owner = 'hbase',
+ bin_dir = '/usr/bin',
action = ['create_delayed'],
)
self.assertResourceCalled('HdfsDirectory', None,
@@ -268,6 +270,7 @@ class TestHBaseMaster(RMFTestCase):
conf_dir = '/etc/hadoop/conf',
hdfs_user = 'hdfs',
kinit_path_local = "/usr/bin/kinit",
+ bin_dir = '/usr/bin',
action = ['create'],
)
@@ -350,6 +353,7 @@ class TestHBaseMaster(RMFTestCase):
hdfs_user = 'hdfs',
kinit_path_local = '/usr/bin/kinit',
owner = 'hbase',
+ bin_dir = '/usr/bin',
action = ['create_delayed'],
)
self.assertResourceCalled('HdfsDirectory', '/apps/hbase/staging',
@@ -360,6 +364,7 @@ class TestHBaseMaster(RMFTestCase):
kinit_path_local = '/usr/bin/kinit',
mode = 0711,
owner = 'hbase',
+ bin_dir = '/usr/bin',
action = ['create_delayed'],
)
self.assertResourceCalled('HdfsDirectory', None,
@@ -368,5 +373,6 @@ class TestHBaseMaster(RMFTestCase):
conf_dir = '/etc/hadoop/conf',
hdfs_user = 'hdfs',
kinit_path_local = '/usr/bin/kinit',
+ bin_dir = '/usr/bin',
action = ['create'],
)
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
index 6a97941..c705fbd 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_regionserver.py
@@ -179,6 +179,7 @@ class TestHbaseRegionServer(RMFTestCase):
hdfs_user = 'hdfs',
kinit_path_local = '/usr/bin/kinit',
owner = 'hbase',
+ bin_dir = '/usr/bin',
action = ['create_delayed'],
)
self.assertResourceCalled('HdfsDirectory', '/apps/hbase/staging',
@@ -189,6 +190,7 @@ class TestHbaseRegionServer(RMFTestCase):
kinit_path_local = '/usr/bin/kinit',
mode = 0711,
owner = 'hbase',
+ bin_dir = '/usr/bin',
action = ['create_delayed'],
)
self.assertResourceCalled('HdfsDirectory', None,
@@ -197,6 +199,7 @@ class TestHbaseRegionServer(RMFTestCase):
conf_dir = '/etc/hadoop/conf',
hdfs_user = 'hdfs',
kinit_path_local = '/usr/bin/kinit',
+ bin_dir = '/usr/bin',
action = ['create'],
)
@@ -279,6 +282,7 @@ class TestHbaseRegionServer(RMFTestCase):
hdfs_user = 'hdfs',
kinit_path_local = '/usr/bin/kinit',
owner = 'hbase',
+ bin_dir = '/usr/bin',
action = ['create_delayed'],
)
self.assertResourceCalled('HdfsDirectory', '/apps/hbase/staging',
@@ -289,6 +293,7 @@ class TestHbaseRegionServer(RMFTestCase):
kinit_path_local = '/usr/bin/kinit',
mode = 0711,
owner = 'hbase',
+ bin_dir = '/usr/bin',
action = ['create_delayed'],
)
self.assertResourceCalled('HdfsDirectory', None,
@@ -297,5 +302,6 @@ class TestHbaseRegionServer(RMFTestCase):
conf_dir = '/etc/hadoop/conf',
hdfs_user = 'hdfs',
kinit_path_local = '/usr/bin/kinit',
+ bin_dir = '/usr/bin',
action = ['create'],
)
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_service_check.py b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_service_check.py
index 7f9bfa4..7dab7fc 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_service_check.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_service_check.py
@@ -39,13 +39,13 @@ class TestServiceCheck(RMFTestCase):
content = Template('hbase-smoke.sh.j2'),
mode = 0755,
)
- self.assertResourceCalled('Execute', ' hbase --config /etc/hbase/conf shell /tmp/hbase-smoke.sh',
+ self.assertResourceCalled('Execute', ' /usr/lib/hbase/bin/hbase --config /etc/hbase/conf shell /tmp/hbase-smoke.sh',
logoutput = True,
tries = 3,
user = 'ambari-qa',
try_sleep = 5,
)
- self.assertResourceCalled('Execute', ' /tmp/hbaseSmokeVerify.sh /etc/hbase/conf ',
+ self.assertResourceCalled('Execute', ' /tmp/hbaseSmokeVerify.sh /etc/hbase/conf /usr/lib/hbase/bin/hbase',
logoutput = True,
tries = 3,
user = 'ambari-qa',
@@ -74,16 +74,16 @@ class TestServiceCheck(RMFTestCase):
group = 'hadoop',
mode = 0644,
)
- self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/hbase.headless.keytab hbase; hbase shell /tmp/hbase_grant_permissions.sh',
+ self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/hbase.headless.keytab hbase; /usr/lib/hbase/bin/hbase shell /tmp/hbase_grant_permissions.sh',
user = 'hbase',
)
- self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa; hbase --config /etc/hbase/conf shell /tmp/hbase-smoke.sh',
+ self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa; /usr/lib/hbase/bin/hbase --config /etc/hbase/conf shell /tmp/hbase-smoke.sh',
logoutput = True,
tries = 3,
user = 'ambari-qa',
try_sleep = 5,
)
- self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa; /tmp/hbaseSmokeVerify.sh /etc/hbase/conf ',
+ self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa; /tmp/hbaseSmokeVerify.sh /etc/hbase/conf /usr/lib/hbase/bin/hbase',
logoutput = True,
tries = 3,
user = 'ambari-qa',
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
index 5e38f66..c7d2601 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_namenode.py
@@ -48,7 +48,7 @@ class TestNamenode(RMFTestCase):
content = StaticFile('checkForFormat.sh'),
mode = 0755,
)
- self.assertResourceCalled('Execute', '/tmp/checkForFormat.sh hdfs /etc/hadoop/conf /var/run/hadoop/hdfs/namenode/formatted/ /var/lib/hdfs/namenode/formatted/ /hadoop/hdfs/namenode',
+ self.assertResourceCalled('Execute', '/tmp/checkForFormat.sh hdfs /etc/hadoop/conf /usr/bin /var/run/hadoop/hdfs/namenode/formatted/ /var/lib/hdfs/namenode/formatted/ /hadoop/hdfs/namenode',
path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
not_if = 'test -d /var/run/hadoop/hdfs/namenode/formatted/ || test -d /var/lib/hdfs/namenode/formatted/',
)
@@ -75,7 +75,7 @@ class TestNamenode(RMFTestCase):
self.assertResourceCalled('Execute', 'ulimit -c unlimited; su - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode\'',
not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
)
- self.assertResourceCalled('Execute', "su - hdfs -c 'hadoop dfsadmin -safemode get' | grep 'Safe mode is OFF'",
+ self.assertResourceCalled('Execute', "su - hdfs -c 'export PATH=$PATH:/usr/bin ; hadoop --config /etc/hadoop/conf dfsadmin -safemode get' | grep 'Safe mode is OFF'",
tries = 40,
only_if = None,
try_sleep = 10,
@@ -88,6 +88,7 @@ class TestNamenode(RMFTestCase):
kinit_path_local = '/usr/bin/kinit',
mode = 0777,
owner = 'hdfs',
+ bin_dir = '/usr/bin',
action = ['create_delayed'],
)
self.assertResourceCalled('HdfsDirectory', '/user/ambari-qa',
@@ -98,6 +99,7 @@ class TestNamenode(RMFTestCase):
kinit_path_local = '/usr/bin/kinit',
mode = 0770,
owner = 'ambari-qa',
+ bin_dir = '/usr/bin',
action = ['create_delayed'],
)
self.assertResourceCalled('HdfsDirectory', None,
@@ -107,6 +109,7 @@ class TestNamenode(RMFTestCase):
hdfs_user = 'hdfs',
kinit_path_local = '/usr/bin/kinit',
action = ['create'],
+ bin_dir = '/usr/bin',
only_if = None,
)
self.assertNoMoreResources()
@@ -149,7 +152,7 @@ class TestNamenode(RMFTestCase):
content = StaticFile('checkForFormat.sh'),
mode = 0755,
)
- self.assertResourceCalled('Execute', '/tmp/checkForFormat.sh hdfs /etc/hadoop/conf /var/run/hadoop/hdfs/namenode/formatted/ /var/lib/hdfs/namenode/formatted/ /hadoop/hdfs/namenode',
+ self.assertResourceCalled('Execute', '/tmp/checkForFormat.sh hdfs /etc/hadoop/conf /usr/bin /var/run/hadoop/hdfs/namenode/formatted/ /var/lib/hdfs/namenode/formatted/ /hadoop/hdfs/namenode',
path = ['/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'],
not_if = 'test -d /var/run/hadoop/hdfs/namenode/formatted/ || test -d /var/lib/hdfs/namenode/formatted/',
)
@@ -179,7 +182,7 @@ class TestNamenode(RMFTestCase):
self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/hdfs.headless.keytab hdfs',
user = 'hdfs',
)
- self.assertResourceCalled('Execute', "su - hdfs -c 'hadoop dfsadmin -safemode get' | grep 'Safe mode is OFF'",
+ self.assertResourceCalled('Execute', "su - hdfs -c 'export PATH=$PATH:/usr/bin ; hadoop --config /etc/hadoop/conf dfsadmin -safemode get' | grep 'Safe mode is OFF'",
tries = 40,
only_if = None,
try_sleep = 10,
@@ -192,6 +195,7 @@ class TestNamenode(RMFTestCase):
kinit_path_local = '/usr/bin/kinit',
mode = 0777,
owner = 'hdfs',
+ bin_dir = '/usr/bin',
action = ['create_delayed'],
)
self.assertResourceCalled('HdfsDirectory', '/user/ambari-qa',
@@ -202,6 +206,7 @@ class TestNamenode(RMFTestCase):
kinit_path_local = '/usr/bin/kinit',
mode = 0770,
owner = 'ambari-qa',
+ bin_dir = '/usr/bin',
action = ['create_delayed'],
)
self.assertResourceCalled('HdfsDirectory', None,
@@ -211,6 +216,7 @@ class TestNamenode(RMFTestCase):
hdfs_user = 'hdfs',
kinit_path_local = '/usr/bin/kinit',
action = ['create'],
+ bin_dir = '/usr/bin',
only_if = None,
)
self.assertNoMoreResources()
@@ -260,9 +266,9 @@ class TestNamenode(RMFTestCase):
self.assertResourceCalled('Execute', 'ulimit -c unlimited; su - hdfs -c \'export HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec && /usr/lib/hadoop/sbin/hadoop-daemon.sh --config /etc/hadoop/conf start namenode\'',
not_if = 'ls /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid >/dev/null 2>&1 && ps `cat /var/run/hadoop/hdfs/hadoop-hdfs-namenode.pid` >/dev/null 2>&1',
)
- self.assertResourceCalled('Execute', "su - hdfs -c 'hadoop dfsadmin -safemode get' | grep 'Safe mode is OFF'",
+ self.assertResourceCalled('Execute', "su - hdfs -c 'export PATH=$PATH:/usr/bin ; hadoop --config /etc/hadoop/conf dfsadmin -safemode get' | grep 'Safe mode is OFF'",
tries = 40,
- only_if = "su - hdfs -c 'hdfs haadmin -getServiceState nn1 | grep active > /dev/null'",
+ only_if = "su - hdfs -c 'export PATH=$PATH:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active > /dev/null'",
try_sleep = 10,
)
self.assertResourceCalled('HdfsDirectory', '/tmp',
@@ -273,6 +279,7 @@ class TestNamenode(RMFTestCase):
kinit_path_local = '/usr/bin/kinit',
mode = 0777,
owner = 'hdfs',
+ bin_dir = '/usr/bin',
action = ['create_delayed'],
)
self.assertResourceCalled('HdfsDirectory', '/user/ambari-qa',
@@ -283,6 +290,7 @@ class TestNamenode(RMFTestCase):
kinit_path_local = '/usr/bin/kinit',
mode = 0770,
owner = 'ambari-qa',
+ bin_dir = '/usr/bin',
action = ['create_delayed'],
)
self.assertResourceCalled('HdfsDirectory', None,
@@ -292,7 +300,8 @@ class TestNamenode(RMFTestCase):
hdfs_user = 'hdfs',
kinit_path_local = '/usr/bin/kinit',
action = ['create'],
- only_if = "su - hdfs -c 'hdfs haadmin -getServiceState nn1 | grep active > /dev/null'",
+ bin_dir = '/usr/bin',
+ only_if = "su - hdfs -c 'export PATH=$PATH:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active > /dev/null'",
)
self.assertNoMoreResources()
@@ -326,9 +335,9 @@ class TestNamenode(RMFTestCase):
self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/hdfs.headless.keytab hdfs',
user = 'hdfs',
)
- self.assertResourceCalled('Execute', "su - hdfs -c 'hadoop dfsadmin -safemode get' | grep 'Safe mode is OFF'",
+ self.assertResourceCalled('Execute', "su - hdfs -c 'export PATH=$PATH:/usr/bin ; hadoop --config /etc/hadoop/conf dfsadmin -safemode get' | grep 'Safe mode is OFF'",
tries = 40,
- only_if = "su - hdfs -c 'hdfs haadmin -getServiceState nn1 | grep active > /dev/null'",
+ only_if = "su - hdfs -c 'export PATH=$PATH:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active > /dev/null'",
try_sleep = 10,
)
self.assertResourceCalled('HdfsDirectory', '/tmp',
@@ -339,6 +348,7 @@ class TestNamenode(RMFTestCase):
kinit_path_local = '/usr/bin/kinit',
mode = 0777,
owner = 'hdfs',
+ bin_dir = '/usr/bin',
action = ['create_delayed'],
)
self.assertResourceCalled('HdfsDirectory', '/user/ambari-qa',
@@ -349,6 +359,7 @@ class TestNamenode(RMFTestCase):
kinit_path_local = '/usr/bin/kinit',
mode = 0770,
owner = 'ambari-qa',
+ bin_dir = '/usr/bin',
action = ['create_delayed'],
)
self.assertResourceCalled('HdfsDirectory', None,
@@ -358,7 +369,8 @@ class TestNamenode(RMFTestCase):
hdfs_user = 'hdfs',
kinit_path_local = '/usr/bin/kinit',
action = ['create'],
- only_if = "su - hdfs -c 'hdfs haadmin -getServiceState nn1 | grep active > /dev/null'",
+ bin_dir = '/usr/bin',
+ only_if = "su - hdfs -c 'export PATH=$PATH:/usr/bin ; hdfs --config /etc/hadoop/conf haadmin -getServiceState nn1 | grep active > /dev/null'",
)
self.assertNoMoreResources()
@@ -377,6 +389,7 @@ class TestNamenode(RMFTestCase):
self.assertResourceCalled('ExecuteHadoop', 'dfsadmin -refreshNodes',
user = 'hdfs',
conf_dir = '/etc/hadoop/conf',
+ bin_dir = '/usr/bin',
kinit_override = True)
self.assertNoMoreResources()
@@ -394,7 +407,8 @@ class TestNamenode(RMFTestCase):
self.assertResourceCalled('Execute', '', user = 'hdfs')
self.assertResourceCalled('ExecuteHadoop', 'dfsadmin -fs hdfs://c6401.ambari.apache.org:8020 -refreshNodes',
user = 'hdfs',
- conf_dir = '/etc/hadoop/conf',
+ conf_dir = '/etc/hadoop/conf',
+ bin_dir = '/usr/bin',
kinit_override = True)
self.assertNoMoreResources()
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_service_check.py b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_service_check.py
index 78cfde7..57abab3 100644
--- a/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_service_check.py
+++ b/ambari-server/src/test/python/stacks/2.0.6/HDFS/test_service_check.py
@@ -50,20 +50,31 @@ class TestServiceCheck(RMFTestCase):
tries = 20,
conf_dir = '/etc/hadoop/conf',
try_sleep = 3,
+ bin_dir = '/usr/bin',
user = 'ambari-qa',
)
- self.assertResourceCalled('ExecuteHadoop', 'fs -mkdir /tmp ; hadoop fs -chmod 777 /tmp',
+ self.assertResourceCalled('ExecuteHadoop', 'fs -mkdir /tmp',
conf_dir = '/etc/hadoop/conf',
logoutput = True,
- not_if = 'hadoop fs -test -e /tmp',
+ not_if = 'hadoop --config /etc/hadoop/conf fs -test -e /tmp',
try_sleep = 3,
tries = 5,
+ bin_dir = '/usr/bin',
user = 'ambari-qa',
)
- self.assertResourceCalled('ExecuteHadoop', 'fs -rm /tmp/; hadoop fs -put /etc/passwd /tmp/',
+ self.assertResourceCalled('ExecuteHadoop', 'fs -chmod 777 /tmp',
+ conf_dir = '/etc/hadoop/conf',
+ logoutput = True,
+ try_sleep = 3,
+ tries = 5,
+ bin_dir = '/usr/bin',
+ user = 'ambari-qa',
+ )
+ self.assertResourceCalled('ExecuteHadoop', 'fs -rm /tmp/; hadoop --config /etc/hadoop/conf fs -put /etc/passwd /tmp/',
logoutput = True,
tries = 5,
conf_dir = '/etc/hadoop/conf',
+ bin_dir = '/usr/bin',
try_sleep = 3,
user = 'ambari-qa',
)
@@ -71,6 +82,7 @@ class TestServiceCheck(RMFTestCase):
logoutput = True,
tries = 5,
conf_dir = '/etc/hadoop/conf',
+ bin_dir = '/usr/bin',
try_sleep = 3,
user = 'ambari-qa',
)
[3/3] git commit: AMBARI-7257 Use Versioned RPMS for HDP 2.2 stack
and make it plugabable to be able to reuse the scripts for HDP 2.* (dsen)
Posted by ds...@apache.org.
AMBARI-7257 Use Versioned RPMS for HDP 2.2 stack and make it plugabable to be able to reuse the scripts for HDP 2.* (dsen)
Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/7d9feb6a
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/7d9feb6a
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/7d9feb6a
Branch: refs/heads/trunk
Commit: 7d9feb6afa6cd310fb8b11221ab67f01f048dd38
Parents: 8778556
Author: Dmytro Sen <ds...@hortonworks.com>
Authored: Thu Sep 11 22:26:27 2014 +0300
Committer: Dmytro Sen <ds...@hortonworks.com>
Committed: Thu Sep 11 22:26:27 2014 +0300
----------------------------------------------------------------------
.../libraries/providers/execute_hadoop.py | 9 +-
.../libraries/providers/hdfs_directory.py | 16 +++-
.../libraries/resources/execute_hadoop.py | 1 +
.../libraries/resources/hdfs_directory.py | 1 +
.../2.0.6/hooks/after-INSTALL/scripts/params.py | 20 +++--
.../hooks/before-INSTALL/scripts/params.py | 3 +-
.../hooks/before-START/files/checkForFormat.sh | 3 +
.../2.0.6/hooks/before-START/scripts/params.py | 25 ++++--
.../services/FLUME/package/scripts/flume.py | 2 +-
.../FLUME/package/scripts/flume_check.py | 2 +-
.../services/FLUME/package/scripts/params.py | 14 +++-
.../HBASE/package/files/hbaseSmokeVerify.sh | 3 +-
.../services/HBASE/package/scripts/params.py | 37 ++++++--
.../HBASE/package/scripts/service_check.py | 6 +-
.../HDFS/package/files/checkForFormat.sh | 4 +-
.../HDFS/package/scripts/hdfs_namenode.py | 15 ++--
.../services/HDFS/package/scripts/namenode.py | 2 +-
.../services/HDFS/package/scripts/params.py | 34 +++++---
.../HDFS/package/scripts/service_check.py | 27 ++++--
.../2.0.6/services/HIVE/package/scripts/hcat.py | 6 ++
.../HIVE/package/scripts/hcat_service_check.py | 8 +-
.../2.0.6/services/HIVE/package/scripts/hive.py | 2 +
.../HIVE/package/scripts/hive_service.py | 9 +-
.../HIVE/package/scripts/install_jars.py | 6 +-
.../services/HIVE/package/scripts/params.py | 73 ++++++++++------
.../package/templates/startHiveserver2.sh.j2 | 2 +-
.../services/OOZIE/configuration/oozie-env.xml | 2 +-
.../services/OOZIE/package/files/oozieSmoke2.sh | 8 +-
.../OOZIE/package/scripts/oozie_service.py | 4 +-
.../services/OOZIE/package/scripts/params.py | 24 ++++--
.../services/PIG/package/scripts/params.py | 20 ++++-
.../PIG/package/scripts/service_check.py | 10 ++-
.../services/SQOOP/package/scripts/params.py | 10 ++-
.../WEBHCAT/configuration/webhcat-env.xml | 2 +-
.../services/WEBHCAT/package/scripts/params.py | 41 ++++++---
.../services/WEBHCAT/package/scripts/webhcat.py | 11 ++-
.../services/YARN/package/scripts/params.py | 45 ++++++----
.../YARN/package/scripts/resourcemanager.py | 5 +-
.../services/YARN/package/scripts/service.py | 2 +-
.../YARN/package/scripts/service_check.py | 3 +-
.../2.0.6/services/YARN/package/scripts/yarn.py | 14 ++--
.../ZOOKEEPER/package/scripts/params.py | 17 +++-
.../services/FALCON/package/scripts/params.py | 15 +++-
.../services/STORM/package/scripts/params.py | 5 +-
.../main/resources/stacks/HDP/2.2/metainfo.xml | 23 +++++
.../resources/stacks/HDP/2.2/repos/repoinfo.xml | 82 ++++++++++++++++++
.../stacks/HDP/2.2/role_command_order.json | 88 ++++++++++++++++++++
.../stacks/HDP/2.2/services/FALCON/metainfo.xml | 28 +++++++
.../stacks/HDP/2.2/services/FLUME/metainfo.xml | 40 +++++++++
.../stacks/HDP/2.2/services/HBASE/metainfo.xml | 42 ++++++++++
.../services/HDFS/configuration/hadoop-env.xml | 29 +++++++
.../services/HDFS/configuration/hdfs-site.xml | 34 ++++++++
.../stacks/HDP/2.2/services/HDFS/metainfo.xml | 68 +++++++++++++++
.../stacks/HDP/2.2/services/HIVE/metainfo.xml | 44 ++++++++++
.../services/OOZIE/configuration/oozie-site.xml | 38 +++++++++
.../stacks/HDP/2.2/services/OOZIE/metainfo.xml | 28 +++++++
.../stacks/HDP/2.2/services/PIG/metainfo.xml | 41 +++++++++
.../stacks/HDP/2.2/services/SQOOP/metainfo.xml | 29 +++++++
.../services/STORM/configuration/storm-env.xml | 29 +++++++
.../services/STORM/configuration/storm-site.xml | 54 ++++++++++++
.../stacks/HDP/2.2/services/STORM/metainfo.xml | 29 +++++++
.../stacks/HDP/2.2/services/TEZ/metainfo.xml | 40 +++++++++
.../WEBHCAT/configuration/webhcat-site.xml | 59 +++++++++++++
.../HDP/2.2/services/WEBHCAT/metainfo.xml | 44 ++++++++++
.../YARN/configuration-mapred/mapred-site.xml | 36 ++++++++
.../services/YARN/configuration/yarn-site.xml | 35 ++++++++
.../stacks/HDP/2.2/services/YARN/metainfo.xml | 71 ++++++++++++++++
.../HDP/2.2/services/ZOOKEEPER/metainfo.xml | 40 +++++++++
.../stacks/1.3.2/HDFS/test_service_check.py | 18 +++-
.../stacks/2.0.6/HBASE/test_hbase_master.py | 6 ++
.../2.0.6/HBASE/test_hbase_regionserver.py | 6 ++
.../2.0.6/HBASE/test_hbase_service_check.py | 10 +--
.../python/stacks/2.0.6/HDFS/test_namenode.py | 36 +++++---
.../stacks/2.0.6/HDFS/test_service_check.py | 18 +++-
.../stacks/2.0.6/HIVE/test_hcat_client.py | 10 ++-
.../stacks/2.0.6/HIVE/test_hive_metastore.py | 8 ++
.../stacks/2.0.6/HIVE/test_hive_server.py | 18 ++++
.../2.0.6/HIVE/test_hive_service_check.py | 7 ++
.../stacks/2.0.6/OOZIE/test_oozie_server.py | 10 ++-
.../stacks/2.0.6/PIG/test_pig_service_check.py | 12 ++-
.../stacks/2.0.6/WEBHCAT/test_webhcat_server.py | 12 +++
.../stacks/2.0.6/YARN/test_historyserver.py | 12 +++
.../stacks/2.0.6/YARN/test_nodemanager.py | 12 +++
.../2.0.6/YARN/test_yarn_service_check.py | 9 +-
.../stacks/2.1/FALCON/test_falcon_server.py | 2 +
.../stacks/2.1/HIVE/test_hive_metastore.py | 7 ++
86 files changed, 1636 insertions(+), 196 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-common/src/main/python/resource_management/libraries/providers/execute_hadoop.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/providers/execute_hadoop.py b/ambari-common/src/main/python/resource_management/libraries/providers/execute_hadoop.py
index 8ab71ff..f367e99 100644
--- a/ambari-common/src/main/python/resource_management/libraries/providers/execute_hadoop.py
+++ b/ambari-common/src/main/python/resource_management/libraries/providers/execute_hadoop.py
@@ -19,6 +19,7 @@ limitations under the License.
Ambari Agent
"""
+import os
from resource_management import *
@@ -27,6 +28,7 @@ class ExecuteHadoopProvider(Provider):
kinit__path_local = self.resource.kinit_path_local
keytab = self.resource.keytab
conf_dir = self.resource.conf_dir
+ bin_dir = self.resource.bin_dir
command = self.resource.command
principal = self.resource.principal
@@ -39,10 +41,15 @@ class ExecuteHadoopProvider(Provider):
path = ['/bin'],
user = self.resource.user
)
-
+
+ path = os.environ['PATH']
+ if bin_dir is not None:
+ path += os.pathsep + bin_dir
+
Execute (format("hadoop --config {conf_dir} {command}"),
user = self.resource.user,
tries = self.resource.tries,
try_sleep = self.resource.try_sleep,
logoutput = self.resource.logoutput,
+ environment = {'PATH' : path}
)
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_directory.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_directory.py b/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_directory.py
index 6a40b6d..33cc1be 100644
--- a/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_directory.py
+++ b/ambari-common/src/main/python/resource_management/libraries/providers/hdfs_directory.py
@@ -19,6 +19,7 @@ limitations under the License.
Ambari Agent
"""
+import os
from resource_management import *
directories_list = [] #direcotries list for mkdir
@@ -68,6 +69,7 @@ class HdfsDirectoryProvider(Provider):
secured = self.resource.security_enabled
keytab_file = self.resource.keytab
kinit_path = self.resource.kinit_path_local
+ bin_dir = self.resource.bin_dir
chmod_commands = []
chown_commands = []
@@ -76,7 +78,7 @@ class HdfsDirectoryProvider(Provider):
mode = chmod_key[0]
recursive = chmod_key[1]
chmod_dirs_str = ' '.join(chmod_dirs)
- chmod_commands.append(format("hadoop fs -chmod {recursive} {mode} {chmod_dirs_str}"))
+ chmod_commands.append(format("hadoop --config {hdp_conf_dir} fs -chmod {recursive} {mode} {chmod_dirs_str}"))
for chown_key, chown_dirs in chown_map.items():
owner = chown_key[0]
@@ -87,7 +89,7 @@ class HdfsDirectoryProvider(Provider):
chown = owner
if group:
chown = format("{owner}:{group}")
- chown_commands.append(format("hadoop fs -chown {recursive} {chown} {chown_dirs_str}"))
+ chown_commands.append(format("hadoop --config {hdp_conf_dir} fs -chown {recursive} {chown} {chown_dirs_str}"))
if secured:
Execute(format("{kinit_path} -kt {keytab_file} {hdfs_principal_name}"),
@@ -97,11 +99,17 @@ class HdfsDirectoryProvider(Provider):
#for hadoop 2 we need to specify -p to create directories recursively
parent_flag = '`rpm -q hadoop | grep -q "hadoop-1" || echo "-p"`'
- Execute(format('hadoop fs -mkdir {parent_flag} {dir_list_str} && {chmod_cmd} && {chown_cmd}',
+ path = os.environ['PATH']
+ if bin_dir is not None:
+ path += os.pathsep + bin_dir
+
+ Execute(format('hadoop --config {hdp_conf_dir} fs -mkdir {parent_flag} {dir_list_str} && {chmod_cmd} && {chown_cmd}',
chmod_cmd=' && '.join(chmod_commands),
chown_cmd=' && '.join(chown_commands)),
user=hdp_hdfs_user,
- not_if=format("su - {hdp_hdfs_user} -c 'hadoop fs -ls {dir_list_str}'")
+ environment = {'PATH' : path},
+ not_if=format("su - {hdp_hdfs_user} -c 'export PATH=$PATH:{bin_dir} ; "
+ "hadoop --config {hdp_conf_dir} fs -ls {dir_list_str}'")
)
directories_list[:] = []
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-common/src/main/python/resource_management/libraries/resources/execute_hadoop.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/resources/execute_hadoop.py b/ambari-common/src/main/python/resource_management/libraries/resources/execute_hadoop.py
index 94daf5b..149548d 100644
--- a/ambari-common/src/main/python/resource_management/libraries/resources/execute_hadoop.py
+++ b/ambari-common/src/main/python/resource_management/libraries/resources/execute_hadoop.py
@@ -32,6 +32,7 @@ class ExecuteHadoop(Resource):
user = ResourceArgument()
logoutput = BooleanArgument(default=False)
principal = ResourceArgument(default=lambda obj: obj.user)
+ bin_dir = ResourceArgument() # appended to $PATH
conf_dir = ResourceArgument()
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-common/src/main/python/resource_management/libraries/resources/hdfs_directory.py
----------------------------------------------------------------------
diff --git a/ambari-common/src/main/python/resource_management/libraries/resources/hdfs_directory.py b/ambari-common/src/main/python/resource_management/libraries/resources/hdfs_directory.py
index 63d9cc2..7888cd8 100644
--- a/ambari-common/src/main/python/resource_management/libraries/resources/hdfs_directory.py
+++ b/ambari-common/src/main/python/resource_management/libraries/resources/hdfs_directory.py
@@ -38,6 +38,7 @@ class HdfsDirectory(Resource):
keytab = ResourceArgument()
kinit_path_local = ResourceArgument()
hdfs_user = ResourceArgument()
+ bin_dir = ResourceArgument(default="")
#action 'create' immediately creates all pending directory in efficient manner
#action 'create_delayed' add directory to list of pending directories
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
index d537199..389d6ab 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
@@ -19,17 +19,29 @@ limitations under the License.
from resource_management import *
from resource_management.core.system import System
-import os
config = Script.get_config()
+#RPM versioning support
+rpm_version = default("/configurations/hadoop-env/rpm_version", None)
+
+#hadoop params
+if rpm_version is not None:
+ hadoop_conf_dir = format("/usr/hdp/{rpm_version}/etc/hadoop/conf")
+ hadoop_conf_empty_dir = format("/usr/hdp/{rpm_version}/etc/hadoop/conf.empty")
+ mapreduce_libs_path = format("/usr/hdp/{rpm_version}/hadoop-mapreduce/*")
+ hadoop_libexec_dir = format("/usr/hdp/{rpm_version}/hadoop/libexec")
+else:
+ hadoop_conf_dir = "/etc/hadoop/conf"
+ hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
+ mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+ hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
+
#security params
security_enabled = config['configurations']['cluster-env']['security_enabled']
#java params
java_home = config['hostLevelParams']['java_home']
#hadoop params
-hadoop_conf_dir = "/etc/hadoop/conf"
-hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
@@ -56,8 +68,6 @@ ttnode_heapsize = "1024m"
dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
-hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
#users and groups
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py
index 01789a7..5700e28 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py
@@ -19,7 +19,6 @@ limitations under the License.
from resource_management import *
from resource_management.core.system import System
-import os
import json
import collections
@@ -38,6 +37,8 @@ user_group = config['configurations']['cluster-env']['user_group']
proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
nagios_group = config['configurations']['nagios-env']['nagios_group']
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+
#hosts
hostname = config["hostname"]
ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/checkForFormat.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/checkForFormat.sh
index f92f613..9036ab2 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/checkForFormat.sh
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/checkForFormat.sh
@@ -24,6 +24,8 @@ export hdfs_user=$1
shift
export conf_dir=$1
shift
+export bin_dir=$1
+shift
export mark_dir=$1
shift
export name_dirs=$*
@@ -50,6 +52,7 @@ if [[ ! -d $mark_dir ]] ; then
done
if [[ $EXIT_CODE == 0 ]] ; then
+ export PATH=$PATH:$bin_dir
su - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}"
else
echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
index fc525a6..8fb2d90 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
@@ -23,6 +23,25 @@ import os
config = Script.get_config()
+#RPM versioning support
+rpm_version = default("/configurations/hadoop-env/rpm_version", None)
+
+#hadoop params
+if rpm_version is not None:
+ hadoop_conf_dir = format("/usr/hdp/{rpm_version}/etc/hadoop/conf")
+ mapreduce_libs_path = format("/usr/hdp/{rpm_version}/hadoop-mapreduce/*")
+ hadoop_libexec_dir = format("/usr/hdp/{rpm_version}/hadoop/libexec")
+ hadoop_lib_home = format("/usr/hdp/{rpm_version}/hadoop/lib")
+ hadoop_bin = format("/usr/hdp/{rpm_version}/hadoop/sbin")
+ hadoop_home = format('/usr/hdp/{rpm_version}/hadoop')
+else:
+ hadoop_conf_dir = "/etc/hadoop/conf"
+ mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+ hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
+ hadoop_lib_home = "/usr/lib/hadoop/lib"
+ hadoop_bin = "/usr/lib/hadoop/sbin"
+ hadoop_home = '/usr'
+
#security params
security_enabled = config['configurations']['cluster-env']['security_enabled']
@@ -72,11 +91,7 @@ if has_ganglia_server:
if has_namenode:
hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
-hadoop_lib_home = "/usr/lib/hadoop/lib"
-hadoop_conf_dir = "/etc/hadoop/conf"
hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
-hadoop_home = "/usr"
-hadoop_bin = "/usr/lib/hadoop/sbin"
task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
@@ -127,8 +142,6 @@ ttnode_heapsize = "1024m"
dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
-hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
#log4j.properties
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/flume.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/flume.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/flume.py
index 6109d3e..1404d27 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/flume.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/flume.py
@@ -63,7 +63,7 @@ def flume(action = None):
_set_desired_state('STARTED')
flume_base = format('su -s /bin/bash {flume_user} -c "export JAVA_HOME={java_home}; '
- '/usr/bin/flume-ng agent --name {{0}} --conf {{1}} --conf-file {{2}} {{3}}"')
+ '{flume_bin} agent --name {{0}} --conf {{1}} --conf-file {{2}} {{3}}"')
for agent in cmd_target_names():
flume_agent_conf_dir = params.flume_conf_dir + os.sep + agent
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/flume_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/flume_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/flume_check.py
index 3036e20..b93b8e8 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/flume_check.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/flume_check.py
@@ -31,7 +31,7 @@ class FlumeServiceCheck(Script):
Execute(format("{kinit_path_local} -kt {http_keytab} {principal_replaced}"),
user=params.smoke_user)
- Execute(format('env JAVA_HOME={java_home} /usr/bin/flume-ng version'),
+ Execute(format('env JAVA_HOME={java_home} {flume_bin} version'),
logoutput=True,
tries = 3,
try_sleep = 20)
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/params.py
index 128eed4..c1f8804 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/params.py
@@ -26,9 +26,19 @@ proxyuser_group = config['configurations']['hadoop-env']['proxyuser_group']
security_enabled = False
-java_home = config['hostLevelParams']['java_home']
+#RPM versioning support
+rpm_version = default("/configurations/hadoop-env/rpm_version", None)
+
+#hadoop params
+if rpm_version is not None:
+ flume_conf_dir = format('/usr/hdp/{rpm_version}/etc/flume/conf')
+ flume_bin = format('/usr/hdp/{rpm_version}/flume/bin/flume-ng')
-flume_conf_dir = '/etc/flume/conf'
+else:
+ flume_conf_dir = '/etc/flume/conf'
+ flume_bin = '/usr/bin/flume-ng'
+
+java_home = config['hostLevelParams']['java_home']
flume_log_dir = '/var/log/flume'
flume_run_dir = '/var/run/flume'
flume_user = 'flume'
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/files/hbaseSmokeVerify.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/files/hbaseSmokeVerify.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/files/hbaseSmokeVerify.sh
index eedffd3..5c320c0 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/files/hbaseSmokeVerify.sh
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/files/hbaseSmokeVerify.sh
@@ -21,7 +21,8 @@
#
conf_dir=$1
data=$2
-echo "scan 'ambarismoketest'" | hbase --config $conf_dir shell > /tmp/hbase_chk_verify
+hbase_cmd=$3
+echo "scan 'ambarismoketest'" | $hbase_cmd --config $conf_dir shell > /tmp/hbase_chk_verify
cat /tmp/hbase_chk_verify
echo "Looking for $data"
grep -q $data /tmp/hbase_chk_verify
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/params.py
index 364649c..d07ebd1 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/params.py
@@ -26,11 +26,32 @@ import status_params
config = Script.get_config()
exec_tmp_dir = Script.get_tmp_dir()
-hbase_conf_dir = "/etc/hbase/conf"
-daemon_script = "/usr/lib/hbase/bin/hbase-daemon.sh"
-region_mover = "/usr/lib/hbase/bin/region_mover.rb"
-region_drainer = "/usr/lib/hbase/bin/draining_servers.rb"
-hbase_cmd = "/usr/lib/hbase/bin/hbase"
+#RPM versioning support
+rpm_version = default("/configurations/hadoop-env/rpm_version", None)
+
+#hadoop params
+if rpm_version is not None:
+#RPM versioning support
+ rpm_version = default("/configurations/hadoop-env/rpm_version", None)
+
+#hadoop params
+if rpm_version is not None:
+ hadoop_conf_dir = format("/usr/hdp/{rpm_version}/etc/hadoop/conf")
+ hadoop_bin_dir = format("/usr/hdp/{rpm_version}/hadoop/bin")
+ hbase_conf_dir = format('/usr/hdp/{rpm_version}/etc/hbase/conf')
+ daemon_script = format('/usr/hdp/{rpm_version}/hbase/bin/hbase-daemon.sh')
+ region_mover = format('/usr/hdp/{rpm_version}/hbase/bin/region_mover.rb')
+ region_drainer = format('/usr/hdp/{rpm_version}hbase/bin/draining_servers.rb')
+ hbase_cmd = format('/usr/hdp/{rpm_version}/hbase/bin/hbase')
+else:
+ hadoop_conf_dir = "/etc/hadoop/conf"
+ hadoop_bin_dir = "/usr/bin"
+ hbase_conf_dir = "/etc/hbase/conf"
+ daemon_script = "/usr/lib/hbase/bin/hbase-daemon.sh"
+ region_mover = "/usr/lib/hbase/bin/region_mover.rb"
+ region_drainer = "/usr/lib/hbase/bin/draining_servers.rb"
+ hbase_cmd = "/usr/lib/hbase/bin/hbase"
+
hbase_excluded_hosts = config['commandParams']['excluded_hosts']
hbase_drain_only = config['commandParams']['mark_draining_only']
hbase_included_hosts = config['commandParams']['included_hosts']
@@ -72,7 +93,7 @@ if 'slave_hosts' in config['clusterHostInfo']:
rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/slave_hosts') #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
else:
rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/all_hosts')
-
+
smoke_test_user = config['configurations']['cluster-env']['smokeuser']
smokeuser_permissions = "RWXCA"
service_check_data = functions.get_unique_id_and_date()
@@ -105,7 +126,6 @@ hbase_hdfs_root_dir = config['configurations']['hbase-site']['hbase.rootdir']
hbase_staging_dir = "/apps/hbase/staging"
#for create_hdfs_directory
hostname = config["hostname"]
-hadoop_conf_dir = "/etc/hadoop/conf"
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
@@ -119,5 +139,6 @@ HdfsDirectory = functools.partial(
hdfs_user=hdfs_user,
security_enabled = security_enabled,
keytab = hdfs_user_keytab,
- kinit_path_local = kinit_path_local
+ kinit_path_local = kinit_path_local,
+ bin_dir = hadoop_bin_dir
)
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/service_check.py
index 8fb38f7..15a306b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/service_check.py
@@ -44,7 +44,7 @@ class HbaseServiceCheck(Script):
if params.security_enabled:
hbase_grant_premissions_file = format("{exec_tmp_dir}/hbase_grant_permissions.sh")
- grantprivelegecmd = format("{kinit_cmd} hbase shell {hbase_grant_premissions_file}")
+ grantprivelegecmd = format("{kinit_cmd} {hbase_cmd} shell {hbase_grant_premissions_file}")
File( hbase_grant_premissions_file,
owner = params.hbase_user,
@@ -57,8 +57,8 @@ class HbaseServiceCheck(Script):
user = params.hbase_user,
)
- servicecheckcmd = format("{smokeuser_kinit_cmd} hbase --config {hbase_conf_dir} shell {hbase_servicecheck_file}")
- smokeverifycmd = format("{smokeuser_kinit_cmd} {exec_tmp_dir}/hbaseSmokeVerify.sh {hbase_conf_dir} {service_check_data}")
+ servicecheckcmd = format("{smokeuser_kinit_cmd} {hbase_cmd} --config {hbase_conf_dir} shell {hbase_servicecheck_file}")
+ smokeverifycmd = format("{smokeuser_kinit_cmd} {exec_tmp_dir}/hbaseSmokeVerify.sh {hbase_conf_dir} {service_check_data} {hbase_cmd}")
Execute( servicecheckcmd,
tries = 3,
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/checkForFormat.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/checkForFormat.sh
index d22d901..c9a3828 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/checkForFormat.sh
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/files/checkForFormat.sh
@@ -24,6 +24,8 @@ export hdfs_user=$1
shift
export conf_dir=$1
shift
+export bin_dir=$1
+shift
export old_mark_dir=$1
shift
export mark_dir=$1
@@ -56,7 +58,7 @@ if [[ ! -d $mark_dir ]] ; then
done
if [[ $EXIT_CODE == 0 ]] ; then
- su - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}"
+ su - ${hdfs_user} -c "export PATH=$PATH:${bin_dir} ; yes Y | hadoop --config ${conf_dir} ${command}"
else
echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
fi
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_namenode.py
index c4b48c6..68cf4fd 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_namenode.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/hdfs_namenode.py
@@ -45,11 +45,11 @@ def namenode(action=None, do_format=True):
create_log_dir=True
)
if params.dfs_ha_enabled:
- dfs_check_nn_status_cmd = format("su - {hdfs_user} -c 'hdfs haadmin -getServiceState {namenode_id} | grep active > /dev/null'")
+ dfs_check_nn_status_cmd = format("su - {hdfs_user} -c 'export PATH=$PATH:{hadoop_bin_dir} ; hdfs --config {hadoop_conf_dir} haadmin -getServiceState {namenode_id} | grep active > /dev/null'")
else:
dfs_check_nn_status_cmd = None
- namenode_safe_mode_off = format("su - {hdfs_user} -c 'hadoop dfsadmin -safemode get' | grep 'Safe mode is OFF'")
+ namenode_safe_mode_off = format("su - {hdfs_user} -c 'export PATH=$PATH:{hadoop_bin_dir} ; hadoop --config {hadoop_conf_dir} dfsadmin -safemode get' | grep 'Safe mode is OFF'")
if params.security_enabled:
Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
@@ -110,14 +110,16 @@ def format_namenode(force=None):
if not params.dfs_ha_enabled:
if force:
ExecuteHadoop('namenode -format',
- kinit_override=True)
+ kinit_override=True,
+ bin_dir=params.hadoop_bin_dir,
+ conf_dir=hadoop_conf_dir)
else:
File(format("{tmp_dir}/checkForFormat.sh"),
content=StaticFile("checkForFormat.sh"),
mode=0755)
Execute(format(
- "{tmp_dir}/checkForFormat.sh {hdfs_user} {hadoop_conf_dir} {old_mark_dir} "
- "{mark_dir} {dfs_name_dir}"),
+ "{tmp_dir}/checkForFormat.sh {hdfs_user} {hadoop_conf_dir} "
+ "{hadoop_bin_dir} {old_mark_dir} {mark_dir} {dfs_name_dir}"),
not_if=format("test -d {old_mark_dir} || test -d {mark_dir}"),
path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin"
)
@@ -154,4 +156,5 @@ def decommission():
ExecuteHadoop(nn_refresh_cmd,
user=hdfs_user,
conf_dir=conf_dir,
- kinit_override=True)
+ kinit_override=True,
+ bin_dir=params.hadoop_bin_dir)
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py
index 8dae3eb..a0b07aa 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py
@@ -88,7 +88,7 @@ class NameNode(Script):
def startRebalancingProcess(threshold):
- rebalanceCommand = format('hadoop --config {hadoop_conf_dir} balancer -threshold {threshold}')
+ rebalanceCommand = format('export PATH=$PATH:{hadoop_bin_dir} ; hadoop --config {hadoop_conf_dir} balancer -threshold {threshold}')
return ['su','-',params.hdfs_user,'-c', rebalanceCommand]
command = startRebalancingProcess(threshold)
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
index 47ee8ca..60198c7 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
@@ -24,6 +24,28 @@ import os
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
+#RPM versioning support
+rpm_version = default("/configurations/hadoop-env/rpm_version", None)
+
+#hadoop params
+if rpm_version is not None:
+ hadoop_conf_dir = format("/usr/hdp/{rpm_version}/etc/hadoop/conf")
+ hadoop_conf_empty_dir = format("/usr/hdp/{rpm_version}/etc/hadoop/conf.empty")
+ mapreduce_libs_path = format("/usr/hdp/{rpm_version}/hadoop-mapreduce/*")
+ hadoop_libexec_dir = format("/usr/hdp/{rpm_version}/hadoop/libexec")
+ hadoop_bin = format("/usr/hdp/{rpm_version}/hadoop/sbin")
+ hadoop_bin_dir = format("/usr/hdp/{rpm_version}/hadoop/bin")
+ limits_conf_dir = format("/usr/hdp/{rpm_version}/etc/security/limits.d")
+else:
+ hadoop_conf_dir = "/etc/hadoop/conf"
+ hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
+ mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+ hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
+ hadoop_bin = "/usr/lib/hadoop/sbin"
+ hadoop_bin_dir = "/usr/bin"
+ limits_conf_dir = "/etc/security/limits.d"
+
+execute_path = os.environ['PATH'] + os.pathsep + hadoop_bin_dir
ulimit_cmd = "ulimit -c unlimited; "
#security params
@@ -100,9 +122,7 @@ proxyuser_group = config['configurations']['hadoop-env']['proxyuser_group']
nagios_group = config['configurations']['nagios-env']['nagios_group']
#hadoop params
-hadoop_conf_dir = "/etc/hadoop/conf"
hadoop_pid_dir_prefix = status_params.hadoop_pid_dir_prefix
-hadoop_bin = "/usr/lib/hadoop/sbin"
hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
@@ -110,8 +130,6 @@ hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger'
dfs_domain_socket_path = config['configurations']['hdfs-site']['dfs.domain.socket.path']
dfs_domain_socket_dir = os.path.dirname(dfs_domain_socket_path)
-hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
-
jn_edits_dir = config['configurations']['hdfs-site']['dfs.journalnode.edits.dir']
dfs_name_dir = config['configurations']['hdfs-site']['dfs.namenode.name.dir']
@@ -171,11 +189,10 @@ HdfsDirectory = functools.partial(
hdfs_user=hdfs_user,
security_enabled = security_enabled,
keytab = hdfs_user_keytab,
- kinit_path_local = kinit_path_local
+ kinit_path_local = kinit_path_local,
+ bin_dir = hadoop_bin_dir
)
-limits_conf_dir = "/etc/security/limits.d"
-
io_compression_codecs = config['configurations']['core-site']['io.compression.codecs']
if not "com.hadoop.compression.lzo" in io_compression_codecs:
exclude_packages = ["lzo", "hadoop-lzo", "hadoop-lzo-native", "liblzo2-2"]
@@ -184,8 +201,6 @@ else:
name_node_params = default("/commandParams/namenode", None)
#hadoop params
-hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
-
hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
#hadoop-env.sh
@@ -209,5 +224,4 @@ ttnode_heapsize = "1024m"
dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/service_check.py
index 66f2ae1..18f58bd 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/service_check.py
@@ -31,13 +31,14 @@ class HdfsServiceCheck(Script):
safemode_command = "dfsadmin -safemode get | grep OFF"
- create_dir_cmd = format("fs -mkdir {dir} ; hadoop fs -chmod 777 {dir}")
- test_dir_exists = format("hadoop fs -test -e {dir}")
+ create_dir_cmd = format("fs -mkdir {dir}")
+ chmod_command = format("fs -chmod 777 {dir}")
+ test_dir_exists = format("hadoop --config {hadoop_conf_dir} fs -test -e {dir}")
cleanup_cmd = format("fs -rm {tmp_file}")
#cleanup put below to handle retries; if retrying there wil be a stale file
#that needs cleanup; exit code is fn of second command
create_file_cmd = format(
- "{cleanup_cmd}; hadoop fs -put /etc/passwd {tmp_file}")
+ "{cleanup_cmd}; hadoop --config {hadoop_conf_dir} fs -put /etc/passwd {tmp_file}")
test_cmd = format("fs -test -e {tmp_file}")
if params.security_enabled:
Execute(format(
@@ -48,7 +49,8 @@ class HdfsServiceCheck(Script):
logoutput=True,
conf_dir=params.hadoop_conf_dir,
try_sleep=3,
- tries=20
+ tries=20,
+ bin_dir=params.hadoop_bin_dir
)
ExecuteHadoop(create_dir_cmd,
user=params.smoke_user,
@@ -56,21 +58,32 @@ class HdfsServiceCheck(Script):
not_if=test_dir_exists,
conf_dir=params.hadoop_conf_dir,
try_sleep=3,
- tries=5
+ tries=5,
+ bin_dir=params.hadoop_bin_dir
+ )
+ ExecuteHadoop(chmod_command,
+ user=params.smoke_user,
+ logoutput=True,
+ conf_dir=params.hadoop_conf_dir,
+ try_sleep=3,
+ tries=5,
+ bin_dir=params.hadoop_bin_dir
)
ExecuteHadoop(create_file_cmd,
user=params.smoke_user,
logoutput=True,
conf_dir=params.hadoop_conf_dir,
try_sleep=3,
- tries=5
+ tries=5,
+ bin_dir=params.hadoop_bin_dir
)
ExecuteHadoop(test_cmd,
user=params.smoke_user,
logoutput=True,
conf_dir=params.hadoop_conf_dir,
try_sleep=3,
- tries=5
+ tries=5,
+ bin_dir=params.hadoop_bin_dir
)
if params.has_journalnode_hosts:
journalnode_port = params.journalnode_port
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat.py
index 53a62ce..eb05481 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat.py
@@ -25,6 +25,12 @@ import sys
def hcat():
import params
+ Directory(params.hive_conf_dir,
+ owner=params.hcat_user,
+ group=params.user_group,
+ )
+
+
Directory(params.hcat_conf_dir,
owner=params.hcat_user,
group=params.user_group,
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat_service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat_service_check.py
index ec8faa9..ede7e27 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat_service_check.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hcat_service_check.py
@@ -45,6 +45,7 @@ def hcat_service_check():
user=params.smokeuser,
try_sleep=5,
path=['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
+ environment = {'PATH' : params.execute_path},
logoutput=True)
if params.security_enabled:
@@ -55,7 +56,8 @@ def hcat_service_check():
security_enabled=params.security_enabled,
kinit_path_local=params.kinit_path_local,
keytab=params.hdfs_user_keytab,
- principal=params.hdfs_principal_name
+ principal=params.hdfs_principal_name,
+ bin_dir=params.hive_bin
)
else:
ExecuteHadoop(test_cmd,
@@ -64,7 +66,8 @@ def hcat_service_check():
conf_dir=params.hadoop_conf_dir,
security_enabled=params.security_enabled,
kinit_path_local=params.kinit_path_local,
- keytab=params.hdfs_user_keytab
+ keytab=params.hdfs_user_keytab,
+ bin_dir=params.hive_bin
)
cleanup_cmd = format("{kinit_cmd} {tmp_dir}/hcatSmoke.sh hcatsmoke{unique} cleanup")
@@ -72,6 +75,7 @@ def hcat_service_check():
Execute(cleanup_cmd,
tries=3,
user=params.smokeuser,
+ environment = {'PATH' : params.execute_path },
try_sleep=5,
path=['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
logoutput=True
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive.py
index 0b7fcb4..e6e5eb8 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive.py
@@ -188,6 +188,7 @@ def jdbc_connector():
Execute(cmd,
not_if=format("test -f {target}"),
creates=params.target,
+ environment= {'PATH' : params.execute_path },
path=["/bin", "/usr/bin/"])
elif params.hive_jdbc_driver == "org.postgresql.Driver":
cmd = format("hive mkdir -p {artifact_dir} ; cp /usr/share/java/{jdbc_jar_name} {target}")
@@ -195,6 +196,7 @@ def jdbc_connector():
Execute(cmd,
not_if=format("test -f {target}"),
creates=params.target,
+ environment= {'PATH' : params.execute_path },
path=["/bin", "usr/bin/"])
elif params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_service.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_service.py
index 8507816..d88d0b0 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_service.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive_service.py
@@ -49,6 +49,7 @@ def hive_service(
Execute(demon_cmd,
user=params.hive_user,
+ environment= {'PATH' : params.execute_path, 'HADOOP_HOME' : params.hadoop_home },
not_if=process_id_exists
)
@@ -103,8 +104,10 @@ def hive_service(
def check_fs_root():
import params
fs_root_url = format("{fs_root}{hive_apps_whs_dir}")
- cmd = "/usr/lib/hive/bin/metatool -listFSRoot 2>/dev/null | grep hdfs://"
+ cmd = format("metatool -listFSRoot 2>/dev/null | grep hdfs://")
code, out = call(cmd, user=params.hive_user)
if code == 0 and fs_root_url.strip() != out.strip():
- cmd = format("/usr/lib/hive/bin/metatool -updateLocation {fs_root}{hive_apps_whs_dir} {out}")
- Execute(cmd, user=params.hive_user)
\ No newline at end of file
+ cmd = format("metatool -updateLocation {fs_root}{hive_apps_whs_dir} {out}")
+ Execute(cmd,
+ environment= {'PATH' : params.execute_path },
+ user=params.hive_user)
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/install_jars.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/install_jars.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/install_jars.py
index b6d542d..3548de7 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/install_jars.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/install_jars.py
@@ -69,7 +69,8 @@ def install_tez_jars():
owner=params.tez_user,
dest_dir=app_dir_path,
kinnit_if_needed=kinit_if_needed,
- hdfs_user=params.hdfs_user
+ hdfs_user=params.hdfs_user,
+ hadoop_conf_dir=params.hadoop_conf_dir
)
pass
@@ -79,7 +80,8 @@ def install_tez_jars():
owner=params.tez_user,
dest_dir=lib_dir_path,
kinnit_if_needed=kinit_if_needed,
- hdfs_user=params.hdfs_user
+ hdfs_user=params.hdfs_user,
+ hadoop_conf_dir=params.hadoop_conf_dir
)
pass
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/params.py
index b1a4a49..a38c12a 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/params.py
@@ -26,6 +26,53 @@ import os
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
+#RPM versioning support
+rpm_version = default("/configurations/hadoop-env/rpm_version", None)
+
+hdp_stack_version = config['hostLevelParams']['stack_version']
+
+#hadoop params
+if rpm_version is not None:
+ hadoop_conf_dir = format("/usr/hdp/{rpm_version}/etc/hadoop/conf")
+ hadoop_bin_dir = format("/usr/hdp/{rpm_version}/hadoop/bin")
+ hadoop_home = format('/usr/hdp/{rpm_version}/hadoop')
+ hive_conf_dir = format('/usr/hdp/{rpm_version}/etc/hive/conf')
+ hive_client_conf_dir = format('/usr/hdp/{rpm_version}/etc/hive/conf')
+ hive_server_conf_dir = format('/usr/hdp/{rpm_version}/etc/hive/conf.server')
+ hive_bin = format('/usr/hdp/{rpm_version}/hive/bin')
+ hive_lib = format('/usr/hdp/{rpm_version}/hive/lib')
+ tez_local_api_jars = format('/usr/hdp/{rpm_version}/tez/tez*.jar')
+ tez_local_lib_jars = format('/usr/hdp/{rpm_version}/tez/lib/*.jar')
+
+ if str(hdp_stack_version).startswith('2.0'):
+ hcat_conf_dir = format('/usr/hdp/{rpm_version}/etc/hcatalog/conf')
+ hcat_lib = format('/usr/hdp/{rpm_version}/hive/hcatalog/share/hcatalog')
+ # for newer versions
+ else:
+ hcat_conf_dir = format('/usr/hdp/{rpm_version}/etc/hive-hcatalog/conf')
+ hcat_lib = format('/usr/hdp/{rpm_version}/hive/hive-hcatalog/share/hcatalog')
+
+else:
+ hadoop_conf_dir = "/etc/hadoop/conf"
+ hadoop_bin_dir = "/usr/bin"
+ hadoop_home = '/usr'
+ hive_conf_dir = "/etc/hive/conf"
+ hive_bin = '/usr/lib/hive/bin'
+ hive_lib = '/usr/lib/hive/lib/'
+ hive_client_conf_dir = "/etc/hive/conf"
+ hive_server_conf_dir = '/etc/hive/conf.server'
+ tez_local_api_jars = '/usr/lib/tez/tez*.jar'
+ tez_local_lib_jars = '/usr/lib/tez/lib/*.jar'
+
+ if str(hdp_stack_version).startswith('2.0'):
+ hcat_conf_dir = '/etc/hcatalog/conf'
+ hcat_lib = '/usr/lib/hcatalog/share/hcatalog'
+ # for newer versions
+ else:
+ hcat_conf_dir = '/etc/hive-hcatalog/conf'
+ hcat_lib = '/usr/lib/hive-hcatalog/share/hcatalog'
+
+execute_path = os.environ['PATH'] + os.pathsep + hive_bin
hive_metastore_user_name = config['configurations']['hive-site']['javax.jdo.option.ConnectionUserName']
hive_jdbc_connection_url = config['configurations']['hive-site']['javax.jdo.option.ConnectionURL']
@@ -34,7 +81,6 @@ hive_metastore_db_type = config['configurations']['hive-env']['hive_database_typ
#users
hive_user = config['configurations']['hive-env']['hive_user']
-hive_lib = '/usr/lib/hive/lib/'
#JDBC driver jar name
hive_jdbc_driver = config['configurations']['hive-site']['javax.jdo.option.ConnectionDriverName']
if hive_jdbc_driver == "com.mysql.jdbc.Driver":
@@ -51,11 +97,9 @@ check_db_connection_jar_name = "DBConnectionVerification.jar"
check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
#common
-hdp_stack_version = config['hostLevelParams']['stack_version']
hive_metastore_port = get_port_from_url(config['configurations']['hive-site']['hive.metastore.uris']) #"9083"
hive_var_lib = '/var/lib/hive'
ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-hive_bin = '/usr/lib/hive/bin'
hive_server_host = config['clusterHostInfo']['hive_server_host'][0]
hive_server_port = default('/configurations/hive-site/hive.server2.thrift.port',"10000")
hive_url = format("jdbc:hive2://{hive_server_host}:{hive_server_port}")
@@ -77,8 +121,6 @@ hive_log_dir = config['configurations']['hive-env']['hive_log_dir']
hive_pid_dir = status_params.hive_pid_dir
hive_pid = status_params.hive_pid
#Default conf dir for client
-hive_client_conf_dir = "/etc/hive/conf"
-hive_server_conf_dir = "/etc/hive/conf.server"
hive_conf_dirs_list = [hive_server_conf_dir, hive_client_conf_dir]
if 'role' in config and config['role'] in ["HIVE_SERVER", "HIVE_METASTORE"]:
@@ -92,8 +134,6 @@ hive_database_name = config['configurations']['hive-env']['hive_database_name']
#Starting hiveserver2
start_hiveserver2_script = 'startHiveserver2.sh.j2'
-hadoop_home = '/usr'
-
##Starting metastore
start_metastore_script = 'startMetastore.sh'
hive_metastore_pid = status_params.hive_metastore_pid
@@ -133,15 +173,6 @@ else:
########## HCAT
-if str(hdp_stack_version).startswith('2.0'):
- hcat_conf_dir = '/etc/hcatalog/conf'
- hcat_lib = '/usr/lib/hcatalog/share/hcatalog'
-# for newer versions
-else:
- hcat_conf_dir = '/etc/hive-hcatalog/conf'
- hcat_lib = '/usr/lib/hive-hcatalog/share/hcatalog'
-
-
hcat_dbroot = hcat_lib
hcat_user = config['configurations']['hive-env']['hcat_user']
@@ -150,8 +181,6 @@ webhcat_user = config['configurations']['hive-env']['webhcat_user']
hcat_pid_dir = status_params.hcat_pid_dir
hcat_log_dir = config['configurations']['hive-env']['hcat_log_dir']
-hadoop_conf_dir = '/etc/hadoop/conf'
-
#hive-log4j.properties.template
if (('hive-log4j' in config['configurations']) and ('content' in config['configurations']['hive-log4j'])):
log4j_props = config['configurations']['hive-log4j']['content']
@@ -172,7 +201,6 @@ hive_hdfs_user_mode = 0700
hive_apps_whs_dir = config['configurations']['hive-site']["hive.metastore.warehouse.dir"]
#for create_hdfs_directory
hostname = config["hostname"]
-hadoop_conf_dir = "/etc/hadoop/conf"
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
@@ -180,8 +208,6 @@ kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/
# Tez libraries
tez_lib_uris = default("/configurations/tez-site/tez.lib.uris", None)
-tez_local_api_jars = '/usr/lib/tez/tez*.jar'
-tez_local_lib_jars = '/usr/lib/tez/lib/*.jar'
tez_user = config['configurations']['tez-env']['tez_user']
if System.get_instance().os_family == "ubuntu":
@@ -205,13 +231,12 @@ else:
import functools
#create partial functions with common arguments for every HdfsDirectory call
#to create hdfs directory we need to call params.HdfsDirectory in code
-#create partial functions with common arguments for every HdfsDirectory call
-#to create hdfs directory we need to call params.HdfsDirectory in code
HdfsDirectory = functools.partial(
HdfsDirectory,
conf_dir=hadoop_conf_dir,
hdfs_user=hdfs_user,
security_enabled = security_enabled,
keytab = hdfs_user_keytab,
- kinit_path_local = kinit_path_local
+ kinit_path_local = kinit_path_local,
+ bin_dir = hadoop_bin_dir
)
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/templates/startHiveserver2.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/templates/startHiveserver2.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/templates/startHiveserver2.sh.j2
index a8fe21c..3ddf50f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/templates/startHiveserver2.sh.j2
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/templates/startHiveserver2.sh.j2
@@ -25,5 +25,5 @@ HIVE_SERVER2_OPTS=" -hiveconf hive.log.file=hiveserver2.log -hiveconf hive.log.d
HIVE_SERVER2_OPTS="${HIVE_SERVER2_OPTS} -hiveconf hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator -hiveconf hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory "
{% endif %}
-HIVE_CONF_DIR=$4 /usr/lib/hive/bin/hiveserver2 -hiveconf hive.metastore.uris=" " ${HIVE_SERVER2_OPTS} > $1 2> $2 &
+HIVE_CONF_DIR=$4 {{hive_bin}}/hiveserver2 -hiveconf hive.metastore.uris=" " ${HIVE_SERVER2_OPTS} > $1 2> $2 &
echo $!|cat>$3
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/oozie-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/oozie-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/oozie-env.xml
index fc47a70..9631f0d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/oozie-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/oozie-env.xml
@@ -122,7 +122,7 @@ export OOZIE_ADMIN_PORT={{oozie_server_admin_port}}
# The base URL for callback URLs to Oozie
#
# export OOZIE_BASE_URL="http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie"
-export JAVA_LIBRARY_PATH=/usr/lib/hadoop/lib/native/Linux-amd64-64
+export JAVA_LIBRARY_PATH={{hadoop_lib_home}}/native/Linux-amd64-64
</value>
</property>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/files/oozieSmoke2.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/files/oozieSmoke2.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/files/oozieSmoke2.sh
index 0a80d0f..6d43880 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/files/oozieSmoke2.sh
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/files/oozieSmoke2.sh
@@ -93,10 +93,10 @@ else
kinitcmd=""
fi
-su - ${smoke_test_user} -c "hdfs dfs -rm -r examples"
-su - ${smoke_test_user} -c "hdfs dfs -rm -r input-data"
-su - ${smoke_test_user} -c "hdfs dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples examples"
-su - ${smoke_test_user} -c "hdfs dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples/input-data input-data"
+su - ${smoke_test_user} -c "hdfs --config ${hadoop_conf_dir} dfs -rm -r examples"
+su - ${smoke_test_user} -c "hdfs --config ${hadoop_conf_dir} dfs -rm -r input-data"
+su - ${smoke_test_user} -c "hdfs --config ${hadoop_conf_dir} dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples examples"
+su - ${smoke_test_user} -c "hdfs --config ${hadoop_conf_dir} dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples/input-data input-data"
cmd="${kinitcmd}source ${oozie_conf_dir}/oozie-env.sh ; /usr/bin/oozie -Doozie.auth.token.cache=false job -oozie $OOZIE_SERVER -config $OOZIE_EXAMPLES_DIR/examples/apps/map-reduce/job.properties -run"
echo $cmd
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie_service.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie_service.py
index 78661b0..bbbedbe 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie_service.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie_service.py
@@ -37,7 +37,7 @@ def oozie_service(action = 'start'): # 'start' or 'stop'
db_connection_check_command = None
cmd1 = format("cd {oozie_tmp_dir} && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run")
- cmd2 = format("{kinit_if_needed} {put_shared_lib_to_hdfs_cmd} ; hadoop dfs -chmod -R 755 {oozie_hdfs_user_dir}/share")
+ cmd2 = format("{kinit_if_needed} {put_shared_lib_to_hdfs_cmd} ; hadoop --config {hadoop_conf_dir} dfs -chmod -R 755 {oozie_hdfs_user_dir}/share")
if not os.path.isfile(params.jdbc_driver_jar) and params.jdbc_driver_name == "org.postgresql.Driver":
print "ERROR: jdbc file " + params.jdbc_driver_jar + " is unavailable. Please, follow next steps:\n" \
@@ -58,7 +58,7 @@ def oozie_service(action = 'start'): # 'start' or 'stop'
Execute( cmd2,
user = params.oozie_user,
- not_if = format("{kinit_if_needed} hadoop dfs -ls /user/oozie/share | awk 'BEGIN {{count=0;}} /share/ {{count++}} END {{if (count > 0) {{exit 0}} else {{exit 1}}}}'")
+ not_if = format("{kinit_if_needed} hadoop --config {hadoop_conf_dir} dfs -ls /user/oozie/share | awk 'BEGIN {{count=0;}} /share/ {{count++}} END {{if (count > 0) {{exit 0}} else {{exit 1}}}}'")
)
Execute( start_cmd,
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/params.py
index a484c0e..ac26ede 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/params.py
@@ -25,15 +25,28 @@ import status_params
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
+#RPM versioning support
+rpm_version = default("/configurations/hadoop-env/rpm_version", None)
+
+#hadoop params
+if rpm_version is not None:
+ hadoop_conf_dir = format("/usr/hdp/{rpm_version}/etc/hadoop/conf")
+ hadoop_bin_dir = format("/usr/hdp/{rpm_version}/hadoop/bin")
+ hadoop_lib_home = format("/usr/hdp/{rpm_version}/hadoop/lib")
+ mapreduce_libs_path = format("/usr/hdp/{rpm_version}/hadoop-mapreduce/*")
+else:
+ hadoop_conf_dir = "/etc/hadoop/conf"
+ hadoop_bin_dir = "/usr/bin"
+ hadoop_lib_home = "/usr/lib/hadoop/lib"
+ mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+
oozie_user = config['configurations']['oozie-env']['oozie_user']
smokeuser = config['configurations']['cluster-env']['smokeuser']
conf_dir = "/etc/oozie/conf"
-hadoop_conf_dir = "/etc/hadoop/conf"
user_group = config['configurations']['cluster-env']['user_group']
jdk_location = config['hostLevelParams']['jdk_location']
check_db_connection_jar_name = "DBConnectionVerification.jar"
check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
-hadoop_prefix = "/usr"
oozie_tmp_dir = "/var/tmp/oozie"
oozie_hdfs_user_dir = format("/user/{oozie_user}")
oozie_pid_dir = status_params.oozie_pid_dir
@@ -53,7 +66,6 @@ oozie_keytab = config['configurations']['oozie-env']['oozie_keytab']
oozie_env_sh_template = config['configurations']['oozie-env']['content']
oracle_driver_jar_name = "ojdbc6.jar"
-java_share_dir = "/usr/share/java"
java_home = config['hostLevelParams']['java_home']
oozie_metastore_user_name = config['configurations']['oozie-site']['oozie.service.JPAService.jdbc.username']
@@ -71,7 +83,7 @@ oozie_shared_lib = "/usr/lib/oozie/share"
fs_root = config['configurations']['core-site']['fs.defaultFS']
if str(hdp_stack_version).startswith('2.0') or str(hdp_stack_version).startswith('2.1'):
- put_shared_lib_to_hdfs_cmd = format("hadoop dfs -put {oozie_shared_lib} {oozie_hdfs_user_dir}")
+ put_shared_lib_to_hdfs_cmd = format("hadoop --config {hadoop_conf_dir} dfs -put {oozie_shared_lib} {oozie_hdfs_user_dir}")
# for newer
else:
put_shared_lib_to_hdfs_cmd = format("{oozie_setup_sh} sharelib create -fs {fs_root} -locallib {oozie_shared_lib}")
@@ -103,7 +115,6 @@ oozie_hdfs_user_dir = format("/user/{oozie_user}")
oozie_hdfs_user_mode = 0775
#for create_hdfs_directory
hostname = config["hostname"]
-hadoop_conf_dir = "/etc/hadoop/conf"
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
@@ -117,5 +128,6 @@ HdfsDirectory = functools.partial(
hdfs_user=hdfs_user,
security_enabled = security_enabled,
keytab = hdfs_user_keytab,
- kinit_path_local = kinit_path_local
+ kinit_path_local = kinit_path_local,
+ bin_dir = hadoop_bin_dir
)
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/params.py
index 1b522b8..d1f8b75 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/params.py
@@ -25,8 +25,23 @@ from resource_management import *
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
-pig_conf_dir = "/etc/pig/conf"
-hadoop_conf_dir = "/etc/hadoop/conf"
+#RPM versioning support
+rpm_version = default("/configurations/hadoop-env/rpm_version", None)
+
+#hadoop params
+if rpm_version is not None:
+ hadoop_conf_dir = format("/usr/hdp/{rpm_version}/etc/hadoop/conf")
+ hadoop_bin_dir = format("/usr/hdp/{rpm_version}/hadoop/bin")
+ hadoop_home = format('/usr/hdp/{rpm_version}/hadoop')
+ pig_conf_dir = format('/usr/hdp/{rpm_version}/etc/pig/conf')
+ pig_bin_dir = format('/usr/hdp/{rpm_version}/pig/bin')
+else:
+ hadoop_conf_dir = "/etc/hadoop/conf"
+ hadoop_bin_dir = "/usr/bin"
+ hadoop_home = '/usr'
+ pig_conf_dir = "/etc/pig/conf"
+ pig_bin_dir = ""
+
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
smokeuser = config['configurations']['cluster-env']['smokeuser']
@@ -38,7 +53,6 @@ pig_env_sh_template = config['configurations']['pig-env']['content']
# not supporting 32 bit jdk.
java64_home = config['hostLevelParams']['java_home']
-hadoop_home = "/usr"
pig_properties = config['configurations']['pig-properties']['content']
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/service_check.py
index 8431b6d..7619bd6 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/PIG/package/scripts/service_check.py
@@ -31,7 +31,7 @@ class PigServiceCheck(Script):
cleanup_cmd = format("dfs -rmr {output_file} {input_file}")
#cleanup put below to handle retries; if retrying there wil be a stale file that needs cleanup; exit code is fn of second command
- create_file_cmd = format("{cleanup_cmd}; hadoop dfs -put /etc/passwd {input_file} ") #TODO: inconsistent that second command needs hadoop
+ create_file_cmd = format("{cleanup_cmd}; hadoop --config {hadoop_conf_dir} dfs -put /etc/passwd {input_file} ") #TODO: inconsistent that second command needs hadoop
test_cmd = format("fs -test -e {output_file}")
ExecuteHadoop( create_file_cmd,
@@ -42,7 +42,8 @@ class PigServiceCheck(Script):
# for kinit run
keytab = params.smoke_user_keytab,
security_enabled = params.security_enabled,
- kinit_path_local = params.kinit_path_local
+ kinit_path_local = params.kinit_path_local,
+ bin_dir = params.hadoop_bin_dir
)
File( format("{tmp_dir}/pigSmoke.sh"),
@@ -53,13 +54,14 @@ class PigServiceCheck(Script):
Execute( format("pig {tmp_dir}/pigSmoke.sh"),
tries = 3,
try_sleep = 5,
- path = '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+ path = format('{pig_bin_dir}:/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'),
user = params.smokeuser
)
ExecuteHadoop( test_cmd,
user = params.smokeuser,
- conf_dir = params.hadoop_conf_dir
+ conf_dir = params.hadoop_conf_dir,
+ bin_dir = params.hadoop_bin_dir
)
if __name__ == "__main__":
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/params.py
index 144a587..9170fdc 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/params.py
@@ -21,6 +21,15 @@ from resource_management import *
config = Script.get_config()
+#RPM versioning support
+rpm_version = default("/configurations/hadoop-env/rpm_version", None)
+
+#hadoop params
+if rpm_version is not None:
+ zoo_conf_dir = format('/usr/hdp/{rpm_version}/etc/zookeeper')
+else:
+ zoo_conf_dir = "/etc/zookeeper"
+
security_enabled = config['configurations']['cluster-env']['security_enabled']
smokeuser = config['configurations']['cluster-env']['smokeuser']
user_group = config['configurations']['cluster-env']['user_group']
@@ -29,7 +38,6 @@ sqoop_env_sh_template = config['configurations']['sqoop-env']['content']
sqoop_conf_dir = "/usr/lib/sqoop/conf"
hbase_home = "/usr"
hive_home = "/usr"
-zoo_conf_dir = "/etc/zookeeper"
sqoop_lib = "/usr/lib/sqoop/lib"
sqoop_user = config['configurations']['sqoop-env']['sqoop_user']
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/configuration/webhcat-env.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/configuration/webhcat-env.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/configuration/webhcat-env.xml
index 304bbb7..1dba691 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/configuration/webhcat-env.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/configuration/webhcat-env.xml
@@ -47,7 +47,7 @@ CONSOLE_LOG={{templeton_log_dir}}/webhcat-console.log
#HCAT_PREFIX=hive_prefix
# Set HADOOP_HOME to point to a specific hadoop install directory
-export HADOOP_HOME=/usr/lib/hadoop
+export HADOOP_HOME={{hadoop_home}}
</value>
</property>
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/params.py
index a7959f0..f37ac27 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/params.py
@@ -26,16 +26,36 @@ import status_params
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
-hcat_user = config['configurations']['hive-env']['hcat_user']
-webhcat_user = config['configurations']['hive-env']['webhcat_user']
+#RPM versioning support
+rpm_version = default("/configurations/hadoop-env/rpm_version", None)
-if str(config['hostLevelParams']['stack_version']).startswith('2.0'):
- config_dir = '/etc/hcatalog/conf'
- webhcat_bin_dir = '/usr/lib/hcatalog/sbin'
-# for newer versions
+#hadoop params
+hdp_stack_version = config['hostLevelParams']['stack_version']
+if rpm_version is not None:
+ hadoop_bin_dir = format("/usr/hdp/{rpm_version}/hadoop/bin")
+ hadoop_home = format('/usr/hdp/{rpm_version}/hadoop')
+ hadoop_streeming_jars = format("/usr/hdp/{rpm_version}/hadoop-mapreduce/hadoop-streaming-*.jar")
+ if str(hdp_stack_version).startswith('2.0'):
+ config_dir = format('/usr/hdp/{rpm_version}/etc/hcatalog/conf')
+ webhcat_bin_dir = format('/usr/hdp/{rpm_version}/hive/hcatalog/sbin')
+ # for newer versions
+ else:
+ config_dir = format('/usr/hdp/{rpm_version}/etc/hive-webhcat/conf')
+ webhcat_bin_dir = format('/usr/hdp/{rpm_version}/hive/hive-hcatalog/sbin')
else:
- config_dir = '/etc/hive-webhcat/conf'
- webhcat_bin_dir = '/usr/lib/hive-hcatalog/sbin'
+ hadoop_bin_dir = "/usr/bin"
+ hadoop_home = '/usr'
+ hadoop_streeming_jars = '/usr/lib/hadoop-mapreduce/hadoop-streaming-*.jar'
+ if str(hdp_stack_version).startswith('2.0'):
+ config_dir = '/etc/hcatalog/conf'
+ webhcat_bin_dir = '/usr/lib/hcatalog/sbin'
+ # for newer versions
+ else:
+ config_dir = '/etc/hive-webhcat/conf'
+ webhcat_bin_dir = '/usr/lib/hive-hcatalog/sbin'
+
+hcat_user = config['configurations']['hive-env']['hcat_user']
+webhcat_user = config['configurations']['hive-env']['webhcat_user']
webhcat_env_sh_template = config['configurations']['webhcat-env']['content']
templeton_log_dir = config['configurations']['hive-env']['hcat_log_dir']
@@ -46,7 +66,6 @@ pid_file = status_params.pid_file
hadoop_conf_dir = config['configurations']['webhcat-site']['templeton.hadoop.conf.dir']
templeton_jar = config['configurations']['webhcat-site']['templeton.jar']
-hadoop_home = '/usr'
user_group = config['configurations']['cluster-env']['user_group']
webhcat_server_host = config['clusterHostInfo']['webhcat_server_host']
@@ -64,7 +83,6 @@ webhcat_hdfs_user_mode = 0755
webhcat_apps_dir = "/apps/webhcat"
#for create_hdfs_directory
hostname = config["hostname"]
-hadoop_conf_dir = "/etc/hadoop/conf"
security_param = "true" if security_enabled else "false"
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
@@ -79,5 +97,6 @@ HdfsDirectory = functools.partial(
hdfs_user=hdfs_user,
security_enabled = security_enabled,
keytab = hdfs_user_keytab,
- kinit_path_local = kinit_path_local
+ kinit_path_local = kinit_path_local,
+ bin_dir = hadoop_bin_dir
)
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/webhcat.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/webhcat.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/webhcat.py
index 3092735..c56ae5f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/webhcat.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/webhcat.py
@@ -84,12 +84,13 @@ def webhcat():
path='/bin'
)
- CopyFromLocal('/usr/lib/hadoop-mapreduce/hadoop-streaming-*.jar',
+ CopyFromLocal(params.hadoop_streeming_jars,
owner=params.webhcat_user,
mode=0755,
dest_dir=params.webhcat_apps_dir,
kinnit_if_needed=kinit_if_needed,
- hdfs_user=params.hdfs_user
+ hdfs_user=params.hdfs_user,
+ hadoop_conf_dir=params.hadoop_conf_dir
)
CopyFromLocal('/usr/share/HDP-webhcat/pig.tar.gz',
@@ -97,7 +98,8 @@ def webhcat():
mode=0755,
dest_dir=params.webhcat_apps_dir,
kinnit_if_needed=kinit_if_needed,
- hdfs_user=params.hdfs_user
+ hdfs_user=params.hdfs_user,
+ hadoop_conf_dir=params.hadoop_conf_dir
)
CopyFromLocal('/usr/share/HDP-webhcat/hive.tar.gz',
@@ -105,5 +107,6 @@ def webhcat():
mode=0755,
dest_dir=params.webhcat_apps_dir,
kinnit_if_needed=kinit_if_needed,
- hdfs_user=params.hdfs_user
+ hdfs_user=params.hdfs_user,
+ hadoop_conf_dir=params.hadoop_conf_dir
)
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/params.py
index 313ed94..f8d670e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/params.py
@@ -18,6 +18,7 @@ limitations under the License.
Ambari Agent
"""
+import os
from resource_management import *
import status_params
@@ -26,7 +27,34 @@ import status_params
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
-config_dir = "/etc/hadoop/conf"
+#RPM versioning support
+rpm_version = default("/configurations/hadoop-env/rpm_version", None)
+
+#hadoop params
+if rpm_version is not None:
+ hadoop_conf_dir = format("/usr/hdp/{rpm_version}/etc/hadoop/conf")
+ hadoop_libexec_dir = format("/usr/hdp/{rpm_version}/hadoop/libexec")
+ hadoop_bin = format("/usr/hdp/{rpm_version}/hadoop/sbin")
+ hadoop_bin_dir = format("/usr/hdp/{rpm_version}/hadoop/bin")
+ limits_conf_dir = format("/usr/hdp/{rpm_version}/etc/security/limits.d")
+ hadoop_yarn_home = format('/usr/hdp/{rpm_version}/hadoop-yarn')
+ hadoop_mapred2_jar_location = format('/usr/hdp/{rpm_version}/hadoop-mapreduce')
+ mapred_bin = format('/usr/hdp/{rpm_version}/hadoop-mapreduce/sbin')
+ yarn_bin = format('/usr/hdp/{rpm_version}/hadoop-yarn/sbin')
+ yarn_container_bin = format('/usr/hdp/{rpm_version}/hadoop-yarn/bin')
+else:
+ hadoop_conf_dir = "/etc/hadoop/conf"
+ hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
+ hadoop_bin = "/usr/lib/hadoop/sbin"
+ hadoop_bin_dir = "/usr/bin"
+ limits_conf_dir = "/etc/security/limits.d"
+ hadoop_yarn_home = '/usr/lib/hadoop-yarn'
+ hadoop_mapred2_jar_location = "/usr/lib/hadoop-mapreduce"
+ mapred_bin = "/usr/lib/hadoop-mapreduce/sbin"
+ yarn_bin = "/usr/lib/hadoop-yarn/sbin"
+ yarn_container_bin = "/usr/lib/hadoop-yarn/bin"
+
+execute_path = os.environ['PATH'] + os.pathsep + hadoop_bin_dir
ulimit_cmd = "ulimit -c unlimited;"
@@ -49,8 +77,6 @@ rm_nodes_exclude_path = default("/configurations/yarn-site/yarn.resourcemanager.
java64_home = config['hostLevelParams']['java_home']
hadoop_ssl_enabled = default("/configurations/core-site/hadoop.ssl.enabled", False)
-hadoop_libexec_dir = '/usr/lib/hadoop/libexec'
-hadoop_yarn_home = '/usr/lib/hadoop-yarn'
yarn_heapsize = config['configurations']['yarn-env']['yarn_heapsize']
resourcemanager_heapsize = config['configurations']['yarn-env']['resourcemanager_heapsize']
nodemanager_heapsize = config['configurations']['yarn-env']['nodemanager_heapsize']
@@ -77,8 +103,6 @@ hs_webui_address = config['configurations']['mapred-site']['mapreduce.jobhistory
nm_local_dirs = config['configurations']['yarn-site']['yarn.nodemanager.local-dirs']
nm_log_dirs = config['configurations']['yarn-site']['yarn.nodemanager.log-dirs']
-
-hadoop_mapred2_jar_location = "/usr/lib/hadoop-mapreduce"
distrAppJarName = "hadoop-yarn-applications-distributedshell-2.*.jar"
hadoopMapredExamplesJarName = "hadoop-mapreduce-examples-2.*.jar"
@@ -90,13 +114,7 @@ yarn_log_dir = format("{yarn_log_dir_prefix}/{yarn_user}")
mapred_job_summary_log = format("{mapred_log_dir_prefix}/{mapred_user}/hadoop-mapreduce.jobsummary.log")
yarn_job_summary_log = format("{yarn_log_dir_prefix}/{yarn_user}/hadoop-mapreduce.jobsummary.log")
-mapred_bin = "/usr/lib/hadoop-mapreduce/sbin"
-yarn_bin = "/usr/lib/hadoop-yarn/sbin"
-
user_group = config['configurations']['cluster-env']['user_group']
-limits_conf_dir = "/etc/security/limits.d"
-hadoop_conf_dir = "/etc/hadoop/conf"
-yarn_container_bin = "/usr/lib/hadoop-yarn/bin"
#exclude file
exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
@@ -128,7 +146,6 @@ jobhistory_heapsize = default("/configurations/mapred-env/jobhistory_heapsize",
#for create_hdfs_directory
hostname = config["hostname"]
-hadoop_conf_dir = "/etc/hadoop/conf"
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
@@ -142,11 +159,11 @@ HdfsDirectory = functools.partial(
hdfs_user=hdfs_user,
security_enabled = security_enabled,
keytab = hdfs_user_keytab,
- kinit_path_local = kinit_path_local
+ kinit_path_local = kinit_path_local,
+ bin_dir = hadoop_bin_dir
)
update_exclude_file_only = config['commandParams']['update_exclude_file_only']
-hadoop_bin = "/usr/lib/hadoop/sbin"
mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
#taskcontroller.cfg
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/resourcemanager.py
index af678d0..4d40d68 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/resourcemanager.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/resourcemanager.py
@@ -78,10 +78,10 @@ class Resourcemanager(Script):
env.set_params(params)
rm_kinit_cmd = params.rm_kinit_cmd
yarn_user = params.yarn_user
- conf_dir = params.config_dir
+ conf_dir = params.hadoop_conf_dir
user_group = params.user_group
- yarn_refresh_cmd = format("{rm_kinit_cmd} /usr/bin/yarn --config {conf_dir} rmadmin -refreshNodes")
+ yarn_refresh_cmd = format("{rm_kinit_cmd} yarn --config {conf_dir} rmadmin -refreshNodes")
File(params.exclude_file_path,
content=Template("exclude_hosts_list.j2"),
@@ -91,6 +91,7 @@ class Resourcemanager(Script):
if params.update_exclude_file_only == False:
Execute(yarn_refresh_cmd,
+ environment= {'PATH' : params.execute_path },
user=yarn_user)
pass
pass
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/service.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/service.py
index 42a7138..466f637 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/service.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/service.py
@@ -35,7 +35,7 @@ def service(componentName, action='start', serviceName='yarn'):
pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-{componentName}.pid")
usr = params.yarn_user
- cmd = format("export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {daemon} --config {config_dir}")
+ cmd = format("export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {daemon} --config {hadoop_conf_dir}")
if action == 'start':
daemon_cmd = format("{ulimit_cmd} {cmd} start {componentName}")
http://git-wip-us.apache.org/repos/asf/ambari/blob/7d9feb6a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/service_check.py
index 2ed67ab..7e535a5 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/service_check.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/service_check.py
@@ -27,7 +27,7 @@ class ServiceCheck(Script):
import params
env.set_params(params)
- run_yarn_check_cmd = "/usr/bin/yarn node -list"
+ run_yarn_check_cmd = format("yarn --config {hadoop_conf_dir} node -list")
component_type = 'rm'
if params.hadoop_ssl_enabled:
@@ -60,6 +60,7 @@ class ServiceCheck(Script):
)
Execute(run_yarn_check_cmd,
+ environment= {'PATH' : params.execute_path },
user=params.smokeuser
)