You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by sm...@apache.org on 2014/01/13 23:08:58 UTC

[1/7] AMBARI-4270. Add decommission support for TaskTracker and modify support for DataNode to match

Updated Branches:
  refs/heads/trunk c86976551 -> 5d3677f74


http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hive_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hive_client.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hive_client.py
new file mode 100644
index 0000000..0a5fb2b
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hive_client.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import sys
+from resource_management import *
+
+from hive import hive
+
+class HiveClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hive(name='client')
+
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+  HiveClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hive_metastore.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hive_metastore.py
new file mode 100644
index 0000000..c741174
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hive_metastore.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from hive import hive
+from hive_service import hive_service
+
+class HiveMetastore(Script):
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hive(name='metastore')
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    hive_service( 'metastore',
+                   action = 'start'
+    )
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    hive_service( 'metastore',
+                   action = 'stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{hive_pid_dir}/{hive_metastore_pid}")
+    # Recursively check all existing gmetad pid files
+    check_process_status(pid_file)
+
+if __name__ == "__main__":
+  HiveMetastore().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hive_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hive_server.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hive_server.py
new file mode 100644
index 0000000..3ad81a1
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hive_server.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from hive import hive
+from hive_service import hive_service
+
+class HiveServer(Script):
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hive(name='hiveserver2')
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    hive_service( 'hiveserver2',
+                  action = 'start'
+    )
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    hive_service( 'hiveserver2',
+                  action = 'stop'
+    )
+
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{hive_pid_dir}/{hive_pid}")
+    # Recursively check all existing gmetad pid files
+    check_process_status(pid_file)
+
+if __name__ == "__main__":
+  HiveServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hive_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hive_service.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hive_service.py
new file mode 100644
index 0000000..e8d4e5c
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hive_service.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+def hive_service(
+    name,
+    action='start'):
+
+  import params
+
+  if name == 'metastore':
+    pid_file = format("{hive_pid_dir}/{hive_metastore_pid}")
+    cmd = format(
+      "env HADOOP_HOME={hadoop_home} JAVA_HOME={java64_home} {start_metastore_path} {hive_log_dir}/hive.out {hive_log_dir}/hive.log {pid_file} {hive_server_conf_dir}")
+  elif name == 'hiveserver2':
+    pid_file = format("{hive_pid_dir}/{hive_pid}")
+    cmd = format(
+      "env JAVA_HOME={java64_home} {start_hiveserver2_path} {hive_log_dir}/hive-server2.out {hive_log_dir}/hive-server2.log {pid_file} {hive_server_conf_dir}")
+
+  if action == 'start':
+    demon_cmd = format("{cmd}")
+    no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+    Execute(demon_cmd,
+            user=params.hive_user,
+            not_if=no_op_test
+    )
+
+    if params.hive_jdbc_driver == "com.mysql.jdbc.Driver" or params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
+      db_connection_check_command = format(
+        "{java64_home}/bin/java -cp {check_db_connection_jar}:/usr/share/java/{jdbc_jar_name} org.apache.ambari.server.DBConnectionVerification {hive_jdbc_connection_url} {hive_metastore_user_name} {hive_metastore_user_passwd} {hive_jdbc_driver}")
+      Execute(db_connection_check_command,
+              path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin')
+
+  elif action == 'stop':
+    demon_cmd = format("kill `cat {pid_file}` >/dev/null 2>&1 && rm -f {pid_file}")
+    Execute(demon_cmd)
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/mysql_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/mysql_server.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/mysql_server.py
new file mode 100644
index 0000000..5360f99
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/mysql_server.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from mysql_service import mysql_service
+
+class MysqlServer(Script):
+
+  if System.get_instance().platform == "suse":
+    daemon_name = 'mysql'
+  else:
+    daemon_name = 'mysqld'
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    mysql_service(daemon_name=self.daemon_name, action='start')
+
+    File(params.mysql_adduser_path,
+         mode=0755,
+         content=StaticFile('addMysqlUser.sh')
+    )
+
+    # Autoescaping
+    cmd = ("bash", "-x", params.mysql_adduser_path, self.daemon_name,
+           params.hive_metastore_user_name, params.hive_metastore_user_passwd, params.mysql_host[0])
+
+    Execute(cmd,
+            tries=3,
+            try_sleep=5,
+            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+            logoutput=True
+    )
+
+    mysql_service(daemon_name=self.daemon_name, action='stop')
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+
+    mysql_service(daemon_name=self.daemon_name, action = 'start')
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    mysql_service(daemon_name=self.daemon_name, action = 'stop')
+
+  def status(self, env):
+    mysql_service(daemon_name=self.daemon_name, action = 'status')
+
+if __name__ == "__main__":
+  MysqlServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/mysql_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/mysql_service.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/mysql_service.py
new file mode 100644
index 0000000..4716343
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/mysql_service.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+def mysql_service(daemon_name=None, action='start'):
+
+  logoutput=True
+  if action == 'start':
+    cmd = format('service {daemon_name} start')
+  elif action == 'stop':
+    cmd = format('service {daemon_name} stop')
+  elif action == 'status':
+    cmd = format('service {daemon_name} status')
+    logoutput = False
+  else:
+    cmd = None
+
+  if cmd is not None:
+    Execute(cmd,
+            path="/usr/local/bin/:/bin/:/sbin/",
+            tries=1,
+            logoutput=logoutput)
+
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/params.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/params.py
new file mode 100644
index 0000000..0cf89be
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/params.py
@@ -0,0 +1,123 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import status_params
+
+# server configurations
+config = Script.get_config()
+
+hive_metastore_user_name = config['configurations']['hive-site']['javax.jdo.option.ConnectionUserName']
+hive_server_conf_dir = "/etc/hive/conf.server"
+hive_jdbc_connection_url = config['configurations']['hive-site']['javax.jdo.option.ConnectionURL']
+
+hive_metastore_user_passwd = config['configurations']['hive-site']['javax.jdo.option.ConnectionPassword']
+
+#users
+hive_user = config['configurations']['global']['hive_user']
+hive_lib = '/usr/lib/hive/lib/'
+#JDBC driver jar name
+hive_jdbc_driver = default('hive_jdbc_driver', 'com.mysql.jdbc.Driver')
+if hive_jdbc_driver == "com.mysql.jdbc.Driver":
+  jdbc_jar_name = "mysql-connector-java.jar"
+elif hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
+  jdbc_jar_name = "ojdbc6.jar"
+
+check_db_connection_jar_name = "DBConnectionVerification.jar"
+check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
+
+#common
+hive_metastore_port = config['configurations']['global']['hive_metastore_port']
+hive_var_lib = '/var/lib/hive'
+hive_server_host = config['clusterHostInfo']['hive_server_host']
+hive_url = format("jdbc:hive2://{hive_server_host}:10000")
+
+smokeuser = config['configurations']['global']['smokeuser']
+smoke_test_sql = "/tmp/hiveserver2.sql"
+smoke_test_path = "/tmp/hiveserver2Smoke.sh"
+smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+
+security_enabled = config['configurations']['global']['security_enabled']
+
+kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+hive_metastore_keytab_path =  config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
+
+#hive_env
+hive_conf_dir = "/etc/hive/conf"
+hive_dbroot = config['configurations']['global']['hive_dbroot']
+hive_log_dir = config['configurations']['global']['hive_log_dir']
+hive_pid_dir = status_params.hive_pid_dir
+hive_pid = status_params.hive_pid
+
+#hive-site
+hive_database_name = config['configurations']['global']['hive_database_name']
+
+#Starting hiveserver2
+start_hiveserver2_script = 'startHiveserver2.sh'
+
+hadoop_home = '/usr'
+
+##Starting metastore
+start_metastore_script = 'startMetastore.sh'
+hive_metastore_pid = status_params.hive_metastore_pid
+java_share_dir = '/usr/share/java'
+driver_curl_target = format("{java_share_dir}/{jdbc_jar_name}")
+
+hdfs_user =  config['configurations']['global']['hdfs_user']
+user_group = config['configurations']['global']['user_group']
+artifact_dir = "/tmp/HDP-artifacts/"
+
+target = format("{hive_lib}/{jdbc_jar_name}")
+
+jdk_location = config['hostLevelParams']['jdk_location']
+driver_curl_source = format("{jdk_location}/{jdbc_jar_name}")
+
+start_hiveserver2_path = "/tmp/start_hiveserver2_script"
+start_metastore_path = "/tmp/start_metastore_script"
+
+hive_aux_jars_path = config['configurations']['global']['hive_aux_jars_path']
+hadoop_heapsize = config['configurations']['global']['hadoop_heapsize']
+java64_home = config['hostLevelParams']['java_home']
+
+##### MYSQL
+
+db_name = config['configurations']['global']['hive_database_name']
+mysql_user = "mysql"
+mysql_group = 'mysql'
+mysql_host = config['clusterHostInfo']['hive_mysql_host']
+
+mysql_adduser_path = "/tmp/addMysqlUser.sh"
+
+########## HCAT
+
+hcat_conf_dir = '/etc/hcatalog/conf'
+
+metastore_port = 9933
+hcat_lib = '/usr/lib/hcatalog/share/hcatalog'
+
+hcat_dbroot = hcat_lib
+
+hcat_user = config['configurations']['global']['hcat_user']
+webhcat_user = config['configurations']['global']['webhcat_user']
+
+hcat_pid_dir = status_params.hcat_pid_dir
+hcat_log_dir = config['configurations']['global']['hcat_log_dir']   #hcat_log_dir
+
+hadoop_conf_dir = '/etc/hadoop/conf'

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/service_check.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/service_check.py
new file mode 100644
index 0000000..111e8a1
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/service_check.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+from hcat_service_check import hcat_service_check
+
+class HiveServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    if params.security_enabled:
+      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser};")
+      hive_principal_ext = format("principal={hive_metastore_keytab_path}")
+      hive_url_ext = format("{hive_url}/\\;{hive_principal_ext}")
+      smoke_cmd = format("{kinit_cmd} env JAVA_HOME={java64_home} {smoke_test_path} {hive_url_ext} {smoke_test_sql}")
+    else:
+      smoke_cmd = format("env JAVA_HOME={java64_home} {smoke_test_path} {hive_url} {smoke_test_sql}")
+
+    File(params.smoke_test_path,
+         content=StaticFile('hiveserver2Smoke.sh'),
+         mode=0755
+    )
+
+    File(params.smoke_test_sql,
+         content=StaticFile('hiveserver2.sql')
+    )
+
+    Execute(smoke_cmd,
+            tries=3,
+            try_sleep=5,
+            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+            logoutput=True,
+            user=params.smokeuser)
+
+    hcat_service_check()
+
+if __name__ == "__main__":
+  HiveServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/status_params.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/status_params.py
new file mode 100644
index 0000000..7770975
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/status_params.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+hive_pid_dir = config['configurations']['global']['hive_pid_dir']
+hive_pid = 'hive-server.pid'
+
+hive_metastore_pid = 'hive.pid'
+
+hcat_pid_dir = config['configurations']['global']['hcat_pid_dir'] #hcat_pid_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/templates/hcat-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/templates/hcat-env.sh.j2 b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/templates/hcat-env.sh.j2
new file mode 100644
index 0000000..2a35240
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/templates/hcat-env.sh.j2
@@ -0,0 +1,25 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+JAVA_HOME={{java64_home}}
+HCAT_PID_DIR={{hcat_pid_dir}}/
+HCAT_LOG_DIR={{hcat_log_dir}}/
+HCAT_CONF_DIR={{hcat_conf_dir}}
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+#DBROOT is the path where the connector jars are downloaded
+DBROOT={{hcat_dbroot}}
+USER={{hcat_user}}
+METASTORE_PORT={{metastore_port}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/templates/hive-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/templates/hive-env.sh.j2 b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/templates/hive-env.sh.j2
new file mode 100644
index 0000000..548262a
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/templates/hive-env.sh.j2
@@ -0,0 +1,55 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Set Hive and Hadoop environment variables here. These variables can be used
+# to control the execution of Hive. It should be used by admins to configure
+# the Hive installation (so that users do not have to set environment variables
+# or set command line parameters to get correct behavior).
+#
+# The hive service being invoked (CLI/HWI etc.) is available via the environment
+# variable SERVICE
+
+# Hive Client memory usage can be an issue if a large number of clients
+# are running at the same time. The flags below have been useful in
+# reducing memory usage:
+#
+ if [ "$SERVICE" = "cli" ]; then
+   if [ -z "$DEBUG" ]; then
+     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
+   else
+     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
+   fi
+ fi
+
+# The heap size of the jvm stared by hive shell script can be controlled via:
+
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+
+# Larger heap size may be required when running queries over large number of files or partitions.
+# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
+# appropriate for hive server (hwi etc).
+
+
+# Set HADOOP_HOME to point to a specific hadoop install directory
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+
+# Hive Configuration Directory can be controlled by:
+export HIVE_CONF_DIR={{conf_dir}}
+
+# Folder containing extra ibraries required for hive compilation/execution can be controlled by:
+# export HIVE_AUX_JARS_PATH=
+export HIVE_AUX_JARS_PATH={{hive_aux_jars_path}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/YARN/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/YARN/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/YARN/metainfo.xml
index 6f02f79..6dfff9d 100644
--- a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/YARN/metainfo.xml
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/YARN/metainfo.xml
@@ -16,30 +16,156 @@
    limitations under the License.
 -->
 <metainfo>
-    <user>mapred</user>
-    <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
-    <version>2.1.0.2.0.6.0</version>
-    <deleted>false</deleted>
-    <components>
-      <component>
-        <name>RESOURCEMANAGER</name>
-        <category>MASTER</category>
-      </component>
-      <component>
-        <name>NODEMANAGER</name>
-        <category>SLAVE</category>
-      </component>
-      <component>
-        <name>YARN_CLIENT</name>
-        <category>CLIENT</category>
-        <deleted>true</deleted>
-      </component>
-      <component>
-        <name>TEZ</name>
-        <category>CLIENT</category>
-      </component>
-    </components>
-    <configuration-dependencies>
-      <config-type>core-site</config-type>
-    </configuration-dependencies>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>YARN</name>
+      <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
+      <version>2.1.0.2.0.6.0</version>
+      <components>
+
+        <component>
+          <name>RESOURCEMANAGER</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/resourcemanager.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/resourcemanager.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
+        </component>
+
+        <component>
+          <name>NODEMANAGER</name>
+          <category>SLAVE</category>
+          <commandScript>
+            <script>scripts/nodemanager.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>YARN_CLIENT</name>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/yarn_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-yarn</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-yarn-nodemanager</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-mapreduce</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-yarn-proxyserver</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-yarn-resourcemanager</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>yarn-site</config-type>
+        <config-type>capacity-scheduler</config-type>
+        <config-type>core-site</config-type>
+        <config-type>global</config-type>
+        <config-type>mapred-site</config-type>
+        <config-type>mapred-queue-acls</config-type>
+      </configuration-dependencies>
+    </service>
+
+    <service>
+      <name>MAPREDUCE2</name>
+      <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
+      <version>2.1.0.2.0.6.0</version>
+      <components>
+        <component>
+          <name>HISTORYSERVER</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/historyserver.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>MAPREDUCE2_CLIENT</name>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/mapreduce2_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-mapreduce</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-mapreduce-historyserver</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/mapred_service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>yarn-site</config-type>
+        <config-type>capacity-scheduler</config-type>
+        <config-type>core-site</config-type>
+        <config-type>global</config-type>
+        <config-type>mapred-site</config-type>
+        <config-type>mapred-queue-acls</config-type>
+      </configuration-dependencies>
+    </service>
+
+  </services>
 </metainfo>


[6/7] AMBARI-4270. Add decommission support for TaskTracker and modify support for DataNode to match

Posted by sm...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
index 0485c4a..d1ad1a9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/internal/HostComponentResourceProvider.java
@@ -17,13 +17,9 @@
  */
 package org.apache.ambari.server.controller.internal;
 
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.Set;
-
+import com.google.inject.assistedinject.Assisted;
+import com.google.inject.assistedinject.AssistedInject;
+import com.google.inject.persist.Transactional;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.configuration.ComponentSSLConfiguration;
 import org.apache.ambari.server.controller.AmbariManagementController;
@@ -42,10 +38,12 @@ import org.apache.ambari.server.controller.spi.SystemException;
 import org.apache.ambari.server.controller.spi.UnsupportedPropertyException;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
 
-import com.google.inject.Injector;
-import com.google.inject.assistedinject.Assisted;
-import com.google.inject.assistedinject.AssistedInject;
-import com.google.inject.persist.Transactional;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
 
 /**
  * Resource provider for host component resources.
@@ -72,15 +70,15 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
   protected static final String HOST_COMPONENT_DESIRED_STACK_ID_PROPERTY_ID
       = PropertyHelper.getPropertyId("HostRoles", "desired_stack_id");
   protected static final String HOST_COMPONENT_ACTUAL_CONFIGS_PROPERTY_ID
-    = PropertyHelper.getPropertyId("HostRoles", "actual_configs");
+      = PropertyHelper.getPropertyId("HostRoles", "actual_configs");
   protected static final String HOST_COMPONENT_STALE_CONFIGS_PROPERTY_ID
-    = PropertyHelper.getPropertyId("HostRoles", "stale_configs");
-  
+      = PropertyHelper.getPropertyId("HostRoles", "stale_configs");
+  protected static final String HOST_COMPONENT_ADMIN_STATE_PROPERTY_ID
+      = PropertyHelper.getPropertyId("HostRoles", "admin_state");
   //Component name mappings
   private static final Map<String, PropertyProvider> HOST_COMPONENT_PROPERTIES_PROVIDER = new HashMap<String, PropertyProvider>();
-
-  private static final int HOST_COMPONENT_HTTP_PROPERTY_REQUEST_CONNECT_TIMEOUT = 1500;
-  private static final int HOST_COMPONENT_HTTP_PROPERTY_REQUEST_READ_TIMEOUT    = 10000;
+  private static final int HOST_COMPONENT_HTTP_PROPERTY_REQUEST_CONNECT_TIMEOUT = 1500;   //milliseconds
+  private static final int HOST_COMPONENT_HTTP_PROPERTY_REQUEST_READ_TIMEOUT = 10000;  //milliseconds
 
   static {
     ComponentSSLConfiguration configuration = ComponentSSLConfiguration.instance();
@@ -101,7 +99,6 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
   //Parameters from the predicate
   private static final String QUERY_PARAMETERS_RUN_SMOKE_TEST_ID =
       "params/run_smoke_test";
-
   private static Set<String> pkPropertyIds =
       new HashSet<String>(Arrays.asList(new String[]{
           HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID,
@@ -114,14 +111,14 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
   /**
    * Create a  new resource provider for the given management controller.
    *
-   * @param propertyIds           the property ids
-   * @param keyPropertyIds        the key property ids
-   * @param managementController  the management controller
+   * @param propertyIds          the property ids
+   * @param keyPropertyIds       the key property ids
+   * @param managementController the management controller
    */
   @AssistedInject
   public HostComponentResourceProvider(@Assisted Set<String> propertyIds,
-                                @Assisted Map<Resource.Type, String> keyPropertyIds,
-                                @Assisted AmbariManagementController managementController) {
+                                       @Assisted Map<Resource.Type, String> keyPropertyIds,
+                                       @Assisted AmbariManagementController managementController) {
     super(propertyIds, keyPropertyIds, managementController);
   }
 
@@ -130,9 +127,9 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
   @Override
   public RequestStatus createResources(Request request)
       throws SystemException,
-             UnsupportedPropertyException,
-             ResourceAlreadyExistsException,
-             NoSuchParentResourceException {
+      UnsupportedPropertyException,
+      ResourceAlreadyExistsException,
+      NoSuchParentResourceException {
 
     final Set<ServiceComponentHostRequest> requests = new HashSet<ServiceComponentHostRequest>();
     for (Map<String, Object> propertyMap : request.getProperties()) {
@@ -163,8 +160,8 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
       requests.add(getRequest(propertyMap));
     }
 
-    Set<Resource> resources    = new HashSet<Resource>();
-    Set<String>   requestedIds = getRequestPropertyIds(request, predicate);
+    Set<Resource> resources = new HashSet<Resource>();
+    Set<String> requestedIds = getRequestPropertyIds(request, predicate);
 
     Set<ServiceComponentHostResponse> responses = getResources(new Command<Set<ServiceComponentHostResponse>>() {
       @Override
@@ -172,15 +169,21 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
         return getManagementController().getHostComponents(requests);
       }
     });
-    
+
     for (ServiceComponentHostResponse response : responses) {
       Resource resource = new ResourceImpl(Resource.Type.HostComponent);
-      setResourceProperty(resource, HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID, response.getClusterName(), requestedIds);
-      setResourceProperty(resource, HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID, response.getServiceName(), requestedIds);
-      setResourceProperty(resource, HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID, response.getComponentName(), requestedIds);
-      setResourceProperty(resource, HOST_COMPONENT_HOST_NAME_PROPERTY_ID, response.getHostname(), requestedIds);
-      setResourceProperty(resource, HOST_COMPONENT_STATE_PROPERTY_ID, response.getLiveState(), requestedIds);
-      setResourceProperty(resource, HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID, response.getDesiredState(), requestedIds);
+      setResourceProperty(resource, HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID,
+          response.getClusterName(), requestedIds);
+      setResourceProperty(resource, HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID,
+          response.getServiceName(), requestedIds);
+      setResourceProperty(resource, HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID,
+          response.getComponentName(), requestedIds);
+      setResourceProperty(resource, HOST_COMPONENT_HOST_NAME_PROPERTY_ID,
+          response.getHostname(), requestedIds);
+      setResourceProperty(resource, HOST_COMPONENT_STATE_PROPERTY_ID,
+          response.getLiveState(), requestedIds);
+      setResourceProperty(resource, HOST_COMPONENT_DESIRED_STATE_PROPERTY_ID,
+          response.getDesiredState(), requestedIds);
       setResourceProperty(resource, HOST_COMPONENT_STACK_ID_PROPERTY_ID,
           response.getStackVersion(), requestedIds);
       setResourceProperty(resource, HOST_COMPONENT_DESIRED_STACK_ID_PROPERTY_ID,
@@ -189,15 +192,19 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
           response.getActualConfigs(), requestedIds);
       setResourceProperty(resource, HOST_COMPONENT_STALE_CONFIGS_PROPERTY_ID,
           Boolean.valueOf(response.isStaleConfig()), requestedIds);
-      
-      String componentName = (String)resource.getPropertyValue(HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID);
+      if (response.getAdminState() != null) {
+        setResourceProperty(resource, HOST_COMPONENT_ADMIN_STATE_PROPERTY_ID,
+            response.getAdminState(), requestedIds);
+      }
+
+      String componentName = (String) resource.getPropertyValue(HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID);
       PropertyProvider propertyProvider = HOST_COMPONENT_PROPERTIES_PROVIDER.get(componentName);
       if (propertyProvider != null) {
         Set<Resource> resourcesToPopulate = new HashSet<Resource>();
         resourcesToPopulate.add(resource);
         propertyProvider.populateResources(resourcesToPopulate, request, predicate);
       }
-      
+
       resources.add(resource);
     }
     return resources;
@@ -205,14 +212,14 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
 
   @Override
   public RequestStatus updateResources(final Request request, Predicate predicate)
-        throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
+      throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
     final Set<ServiceComponentHostRequest> requests = new HashSet<ServiceComponentHostRequest>();
     RequestStatusResponse response = null;
 
     final boolean runSmokeTest = "true".equals(getQueryParameterValue(
         QUERY_PARAMETERS_RUN_SMOKE_TEST_ID, predicate)) ? true : false;
 
-    Iterator<Map<String,Object>> iterator = request.getProperties().iterator();
+    Iterator<Map<String, Object>> iterator = request.getProperties().iterator();
     if (iterator.hasNext()) {
       for (Map<String, Object> propertyMap : getPropertyMaps(request.getProperties().iterator().next(), predicate)) {
         requests.add(getRequest(propertyMap));
@@ -279,8 +286,7 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
   /**
    * Get a component request object from a map of property values.
    *
-   * @param properties  the predicate
-   *
+   * @param properties the predicate
    * @return the component request object
    */
   private ServiceComponentHostRequest getRequest(Map<String, Object> properties) {
@@ -294,7 +300,11 @@ public class HostComponentResourceProvider extends AbstractControllerResourcePro
         (String) properties.get(HOST_COMPONENT_STACK_ID_PROPERTY_ID));
     if (properties.get(HOST_COMPONENT_STALE_CONFIGS_PROPERTY_ID) != null) {
       serviceComponentHostRequest.setStaleConfig(
-          properties.get(HOST_COMPONENT_STALE_CONFIGS_PROPERTY_ID).toString());
+          properties.get(HOST_COMPONENT_STALE_CONFIGS_PROPERTY_ID).toString().toLowerCase());
+    }
+    if (properties.get(HOST_COMPONENT_ADMIN_STATE_PROPERTY_ID) != null) {
+      serviceComponentHostRequest.setAdminState(
+          properties.get(HOST_COMPONENT_ADMIN_STATE_PROPERTY_ID).toString());
     }
 
     return serviceComponentHostRequest;

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/java/org/apache/ambari/server/metadata/ActionMetadata.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/metadata/ActionMetadata.java b/ambari-server/src/main/java/org/apache/ambari/server/metadata/ActionMetadata.java
index ea97524..93226c9 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/metadata/ActionMetadata.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/metadata/ActionMetadata.java
@@ -50,7 +50,7 @@ public class ActionMetadata {
 
   private void fillServiceClients() {
     serviceClients.put("hdfs"       , Role.HDFS_CLIENT.toString());
-    serviceClients.put("glusterfs"       , Role.GLUSTERFS_CLIENT.toString());
+    serviceClients.put("glusterfs"  , Role.GLUSTERFS_CLIENT.toString());
     serviceClients.put("hbase"      , Role.HBASE_CLIENT.toString());
     serviceClients.put("mapreduce"  , Role.MAPREDUCE_CLIENT.toString());
     serviceClients.put("zookeeper"  , Role.ZOOKEEPER_CLIENT.toString());
@@ -62,9 +62,8 @@ public class ActionMetadata {
   }
 
   private void fillServiceActions() {
-    serviceActions.put("hdfs"       , Arrays.asList(Role.HDFS_SERVICE_CHECK.toString(),
-                                                    Role.DECOMMISSION_DATANODE.toString()));
-    serviceActions.put("glusterfs"       , Arrays.asList(Role.GLUSTERFS_SERVICE_CHECK.toString()));
+    serviceActions.put("hdfs"       , Arrays.asList(Role.HDFS_SERVICE_CHECK.toString()));
+    serviceActions.put("glusterfs"  , Arrays.asList(Role.GLUSTERFS_SERVICE_CHECK.toString()));
     serviceActions.put("hbase"      , Arrays.asList(Role.HBASE_SERVICE_CHECK.toString()));
     serviceActions.put("mapreduce"  , Arrays.asList(Role.MAPREDUCE_SERVICE_CHECK.toString()));
     serviceActions.put("mapreduce2" , Arrays.asList(Role.MAPREDUCE2_SERVICE_CHECK.toString()));
@@ -75,9 +74,9 @@ public class ActionMetadata {
     serviceActions.put("oozie"      , Arrays.asList(Role.OOZIE_SERVICE_CHECK.toString()));
     serviceActions.put("pig"        , Arrays.asList(Role.PIG_SERVICE_CHECK.toString()));
     serviceActions.put("sqoop"      , Arrays.asList(Role.SQOOP_SERVICE_CHECK.toString()));
-    serviceActions.put("webhcat"  , Arrays.asList(Role.WEBHCAT_SERVICE_CHECK.toString()));
-    serviceActions.put("storm"  , Arrays.asList(Role.STORM_SERVICE_CHECK.toString()));
-    serviceActions.put("falcon"  , Arrays.asList(Role.FALCON_SERVICE_CHECK.toString()));
+    serviceActions.put("webhcat"    , Arrays.asList(Role.WEBHCAT_SERVICE_CHECK.toString()));
+    serviceActions.put("storm"      , Arrays.asList(Role.STORM_SERVICE_CHECK.toString()));
+    serviceActions.put("falcon"     , Arrays.asList(Role.FALCON_SERVICE_CHECK.toString()));
   }
 
   private void fillServiceCheckActions() {

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentDesiredStateEntity.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentDesiredStateEntity.java b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentDesiredStateEntity.java
index 615db27..7f04c51 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentDesiredStateEntity.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/orm/entities/HostComponentDesiredStateEntity.java
@@ -17,10 +17,9 @@
  */
 package org.apache.ambari.server.orm.entities;
 
-import java.util.Collection;
-
 import javax.persistence.*;
 
+import org.apache.ambari.server.state.HostComponentAdminState;
 import org.apache.ambari.server.state.State;
 
 import static org.apache.commons.lang.StringUtils.defaultString;
@@ -55,6 +54,10 @@ public class HostComponentDesiredStateEntity {
   @Column(name = "desired_stack_version", insertable = true, updatable = true)
   private String desiredStackVersion = "";
 
+  @Enumerated(value = EnumType.STRING)
+  @Column(name = "admin_state", nullable = true, insertable = true, updatable = true)
+  private HostComponentAdminState adminState;
+
   @ManyToOne(cascade = CascadeType.PERSIST)
   @JoinColumns({
       @JoinColumn(name = "cluster_id", referencedColumnName = "cluster_id", nullable = false),
@@ -114,6 +117,15 @@ public class HostComponentDesiredStateEntity {
     this.desiredStackVersion = desiredStackVersion;
   }
 
+
+  public HostComponentAdminState getAdminState() {
+    return adminState;
+  }
+
+  public void setAdminState(HostComponentAdminState attribute) {
+    this.adminState = attribute;
+  }
+
   @Override
   public boolean equals(Object o) {
     if (this == o) return true;

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/java/org/apache/ambari/server/state/HostComponentAdminState.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/HostComponentAdminState.java b/ambari-server/src/main/java/org/apache/ambari/server/state/HostComponentAdminState.java
new file mode 100644
index 0000000..fa3a451
--- /dev/null
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/HostComponentAdminState.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.state;
+
+public enum HostComponentAdminState {
+  /**
+   * No attributes.
+   */
+  INSERVICE,
+  /**
+   * The host component is decommissioned.
+   */
+  DECOMMISSIONED,
+  /**
+   * The host component is being decommissioned.
+   */
+  DECOMMISSIONING
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java
index a982ef1..65c37f4 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponent.java
@@ -68,6 +68,8 @@ public interface ServiceComponent {
 
   public boolean isClientComponent();
 
+  public boolean isMasterComponent();
+
   public boolean canBeRemoved();
 
   public void deleteAllServiceComponentHosts() throws AmbariException;

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHost.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHost.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHost.java
index b940503..4ce632a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHost.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHost.java
@@ -81,6 +81,10 @@ public interface ServiceComponentHost {
 
   public void setStackVersion(StackId stackVersion);
 
+  public HostComponentAdminState getComponentAdminState();
+
+  public void setComponentAdminState(HostComponentAdminState attribute);
+
   public ServiceComponentHostResponse convertToResponse();
 
   boolean isPersisted();

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHostFactory.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHostFactory.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHostFactory.java
index d535fd9..e0716c8 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHostFactory.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentHostFactory.java
@@ -23,8 +23,7 @@ import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
 
 public interface ServiceComponentHostFactory {
 
-  ServiceComponentHost createNew(ServiceComponent serviceComponent,
-                                 String hostName, boolean isClient);
+  ServiceComponentHost createNew(ServiceComponent serviceComponent, String hostName);
 
   ServiceComponentHost createExisting(ServiceComponent serviceComponent,
                                       HostComponentStateEntity stateEntity,

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java
index 5deb0a7..9226cf1 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java
@@ -18,11 +18,6 @@
 
 package org.apache.ambari.server.state;
 
-import java.util.*;
-import java.util.Map.Entry;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
 import com.google.gson.Gson;
 import com.google.inject.Inject;
 import com.google.inject.Injector;
@@ -33,21 +28,37 @@ import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.ServiceComponentHostNotFoundException;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.controller.ServiceComponentResponse;
-import org.apache.ambari.server.orm.dao.*;
-import org.apache.ambari.server.orm.entities.*;
+import org.apache.ambari.server.orm.dao.ClusterServiceDAO;
+import org.apache.ambari.server.orm.dao.HostComponentDesiredStateDAO;
+import org.apache.ambari.server.orm.dao.ServiceComponentDesiredStateDAO;
+import org.apache.ambari.server.orm.entities.ClusterServiceEntity;
+import org.apache.ambari.server.orm.entities.ClusterServiceEntityPK;
+import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity;
+import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntityPK;
+import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
+import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity;
+import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntityPK;
 import org.apache.ambari.server.state.cluster.ClusterImpl;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
 public class ServiceComponentImpl implements ServiceComponent {
 
   private final static Logger LOG =
       LoggerFactory.getLogger(ServiceComponentImpl.class);
-  
   private final Service service;
   private final ReadWriteLock clusterGlobalLock;
   private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
-
+  private final boolean isClientComponent;
+  private final boolean isMasterComponent;
+  boolean persisted = false;
   @Inject
   private Gson gson;
   @Inject
@@ -60,22 +71,12 @@ public class ServiceComponentImpl implements ServiceComponent {
   private ServiceComponentHostFactory serviceComponentHostFactory;
   @Inject
   private AmbariMetaInfo ambariMetaInfo;
-
-  boolean persisted = false;
   private ServiceComponentDesiredStateEntity desiredStateEntity;
-
   private Map<String, ServiceComponentHost> hostComponents;
 
-  private final boolean isClientComponent;
-
-  private void init() {
-    // TODO load during restart
-    // initialize from DB
-  }
-
   @AssistedInject
   public ServiceComponentImpl(@Assisted Service service,
-      @Assisted String componentName, Injector injector) throws AmbariException {
+                              @Assisted String componentName, Injector injector) throws AmbariException {
     injector.injectMembers(this);
     this.clusterGlobalLock = service.getClusterGlobalLock();
     this.service = service;
@@ -100,6 +101,7 @@ public class ServiceComponentImpl implements ServiceComponent {
           + ", stackInfo=" + stackId.getStackId());
     }
     this.isClientComponent = compInfo.isClient();
+    this.isMasterComponent = compInfo.isMaster();
 
     init();
   }
@@ -141,10 +143,16 @@ public class ServiceComponentImpl implements ServiceComponent {
           + ", stackInfo=" + stackId.getStackId());
     }
     this.isClientComponent = compInfo.isClient();
+    this.isMasterComponent = compInfo.isMaster();
 
     persisted = true;
   }
 
+  private void init() {
+    // TODO load during restart
+    // initialize from DB
+  }
+
   @Override
   public ReadWriteLock getClusterGlobalLock() {
     return clusterGlobalLock;
@@ -163,8 +171,6 @@ public class ServiceComponentImpl implements ServiceComponent {
     } finally {
       clusterGlobalLock.readLock().unlock();
     }
-
-
   }
 
   @Override
@@ -180,8 +186,6 @@ public class ServiceComponentImpl implements ServiceComponent {
     } finally {
       clusterGlobalLock.readLock().unlock();
     }
-
-
   }
 
   @Override
@@ -197,13 +201,11 @@ public class ServiceComponentImpl implements ServiceComponent {
     } finally {
       clusterGlobalLock.readLock().unlock();
     }
-
-
   }
 
   @Override
   public Map<String, ServiceComponentHost>
-      getServiceComponentHosts() {
+  getServiceComponentHosts() {
     clusterGlobalLock.readLock().lock();
     try {
       readWriteLock.readLock().lock();
@@ -215,8 +217,6 @@ public class ServiceComponentImpl implements ServiceComponent {
     } finally {
       clusterGlobalLock.readLock().unlock();
     }
-
-
   }
 
   @Override
@@ -243,8 +243,6 @@ public class ServiceComponentImpl implements ServiceComponent {
     } finally {
       clusterGlobalLock.writeLock().unlock();
     }
-
-
   }
 
   @Override
@@ -282,8 +280,6 @@ public class ServiceComponentImpl implements ServiceComponent {
     } finally {
       clusterGlobalLock.writeLock().unlock();
     }
-
-
   }
 
   @Override
@@ -311,8 +307,7 @@ public class ServiceComponentImpl implements ServiceComponent {
               + ", serviceComponentName=" + getName()
               + ", hostname=" + hostName);
         }
-        ServiceComponentHost hostComponent =
-            serviceComponentHostFactory.createNew(this, hostName, this.isClientComponent());
+        ServiceComponentHost hostComponent = serviceComponentHostFactory.createNew(this, hostName);
         // FIXME need a better approach of caching components by host
         ClusterImpl clusterImpl = (ClusterImpl) service.getCluster();
         clusterImpl.addServiceComponentHost(hostComponent);
@@ -326,13 +321,11 @@ public class ServiceComponentImpl implements ServiceComponent {
     } finally {
       clusterGlobalLock.writeLock().unlock();
     }
-
-
   }
 
   @Override
   public ServiceComponentHost getServiceComponentHost(String hostname)
-    throws AmbariException {
+      throws AmbariException {
     clusterGlobalLock.readLock().lock();
     try {
       readWriteLock.readLock().lock();
@@ -348,8 +341,6 @@ public class ServiceComponentImpl implements ServiceComponent {
     } finally {
       clusterGlobalLock.readLock().unlock();
     }
-
-
   }
 
   @Override
@@ -365,8 +356,6 @@ public class ServiceComponentImpl implements ServiceComponent {
     } finally {
       clusterGlobalLock.readLock().unlock();
     }
-
-
   }
 
   @Override
@@ -407,8 +396,6 @@ public class ServiceComponentImpl implements ServiceComponent {
     } finally {
       clusterGlobalLock.readLock().unlock();
     }
-
-
   }
 
   @Override
@@ -434,8 +421,6 @@ public class ServiceComponentImpl implements ServiceComponent {
     } finally {
       clusterGlobalLock.readLock().unlock();
     }
-
-
   }
 
   @Override
@@ -456,8 +441,6 @@ public class ServiceComponentImpl implements ServiceComponent {
     } finally {
       clusterGlobalLock.readLock().unlock();
     }
-
-
   }
 
   @Override
@@ -473,8 +456,6 @@ public class ServiceComponentImpl implements ServiceComponent {
     } finally {
       clusterGlobalLock.readLock().unlock();
     }
-
-
   }
 
   @Override
@@ -507,8 +488,6 @@ public class ServiceComponentImpl implements ServiceComponent {
     } finally {
       clusterGlobalLock.readLock().unlock();
     }
-
-
   }
 
   @Override
@@ -524,8 +503,6 @@ public class ServiceComponentImpl implements ServiceComponent {
     } finally {
       clusterGlobalLock.readLock().unlock();
     }
-
-
   }
 
   @Override
@@ -548,8 +525,6 @@ public class ServiceComponentImpl implements ServiceComponent {
     } finally {
       clusterGlobalLock.readLock().unlock();
     }
-
-
   }
 
   @Transactional
@@ -586,8 +561,6 @@ public class ServiceComponentImpl implements ServiceComponent {
     } finally {
       clusterGlobalLock.readLock().unlock();
     }
-
-
   }
 
   @Transactional
@@ -605,8 +578,6 @@ public class ServiceComponentImpl implements ServiceComponent {
     } finally {
       clusterGlobalLock.readLock().unlock();
     }
-
-
   }
 
   @Override
@@ -615,6 +586,11 @@ public class ServiceComponentImpl implements ServiceComponent {
   }
 
   @Override
+  public boolean isMasterComponent() {
+    return this.isMasterComponent;
+  }
+
+  @Override
   public boolean canBeRemoved() {
     clusterGlobalLock.readLock().lock();
     try {
@@ -642,8 +618,6 @@ public class ServiceComponentImpl implements ServiceComponent {
     } finally {
       clusterGlobalLock.readLock().unlock();
     }
-
-
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
index b922293..d9135c0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
@@ -18,7 +18,6 @@
 
 package org.apache.ambari.server.state.svccomphost;
 
-import java.util.Collections;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -43,6 +42,7 @@ import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.Host;
+import org.apache.ambari.server.state.HostComponentAdminState;
 import org.apache.ambari.server.state.HostConfig;
 import org.apache.ambari.server.state.HostState;
 import org.apache.ambari.server.state.ServiceComponent;
@@ -589,10 +589,10 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
 
   @AssistedInject
   public ServiceComponentHostImpl(@Assisted ServiceComponent serviceComponent,
-                                  @Assisted String hostName, @Assisted boolean isClient, Injector injector) {
+                                  @Assisted String hostName, Injector injector) {
     injector.injectMembers(this);
 
-    if (isClient) {
+    if (serviceComponent.isClientComponent()) {
       this.stateMachine = clientStateMachineFactory.make(this);
     } else {
       this.stateMachine = daemonStateMachineFactory.make(this);
@@ -617,6 +617,11 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
     desiredStateEntity.setDesiredState(State.INIT);
     desiredStateEntity.setDesiredStackVersion(
         gson.toJson(serviceComponent.getDesiredStackVersion()));
+    if(!serviceComponent.isMasterComponent() && !serviceComponent.isClientComponent()) {
+      desiredStateEntity.setAdminState(HostComponentAdminState.INSERVICE);
+    } else {
+      desiredStateEntity.setAdminState(null);
+    }
 
     try {
       this.host = clusters.getHost(hostName);
@@ -640,6 +645,7 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
 
     this.desiredStateEntity = desiredStateEntity;
     this.stateEntity = stateEntity;
+
     //TODO implement State Machine init as now type choosing is hardcoded in above code
     if (serviceComponent.isClientComponent()) {
       this.stateMachine = clientStateMachineFactory.make(this);
@@ -755,8 +761,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
     } finally {
       clusterGlobalLock.readLock().unlock();
     }
-
-
   }
 
   @Override
@@ -772,8 +776,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
     } finally {
       clusterGlobalLock.readLock().unlock();
     }
-
-
   }
 
   /**
@@ -791,7 +793,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
     } finally {
       clusterGlobalLock.readLock().unlock();
     }
-
   }
 
   /**
@@ -809,7 +810,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
     } finally {
       clusterGlobalLock.readLock().unlock();
     }
-
   }
 
   /**
@@ -827,7 +827,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
     } finally {
       clusterGlobalLock.readLock().unlock();
     }
-
   }
 
   /**
@@ -845,7 +844,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
     } finally {
       clusterGlobalLock.readLock().unlock();
     }
-
   }
 
   /**
@@ -863,7 +861,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
     } finally {
       clusterGlobalLock.readLock().unlock();
     }
-
   }
 
   /**
@@ -881,7 +878,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
     } finally {
       clusterGlobalLock.readLock().unlock();
     }
-
   }
 
   @Override
@@ -897,8 +893,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
     } finally {
       clusterGlobalLock.readLock().unlock();
     }
-
-
   }
 
   @Override
@@ -914,8 +908,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
     } finally {
       clusterGlobalLock.readLock().unlock();
     }
-
-
   }
 
   @Override
@@ -931,7 +923,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
     } finally {
       clusterGlobalLock.readLock().unlock();
     }
-
   }
 
   @Override
@@ -948,10 +939,8 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
     } finally {
       clusterGlobalLock.readLock().unlock();
     }
-
   }
 
-
   @Override
   public State getDesiredState() {
     clusterGlobalLock.readLock().lock();
@@ -965,7 +954,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
     } finally {
       clusterGlobalLock.readLock().unlock();
     }
-
   }
 
   @Override
@@ -997,7 +985,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
     } finally {
       clusterGlobalLock.readLock().unlock();
     }
-
   }
 
   @Override
@@ -1014,7 +1001,42 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
     } finally {
       clusterGlobalLock.readLock().unlock();
     }
+  }
+
+  @Override
+  public HostComponentAdminState getComponentAdminState() {
+    clusterGlobalLock.readLock().lock();
+    try {
+      readLock.lock();
+      try {
+        HostComponentAdminState adminState = desiredStateEntity.getAdminState();
+        if (adminState == null
+            && !serviceComponent.isClientComponent() && !serviceComponent.isMasterComponent()) {
+          adminState = HostComponentAdminState.INSERVICE;
+        }
+        return adminState;
+      } finally {
+        readLock.unlock();
+      }
+    } finally {
+      clusterGlobalLock.readLock().unlock();
+    }
+  }
 
+  @Override
+  public void setComponentAdminState(HostComponentAdminState attribute) {
+    clusterGlobalLock.readLock().lock();
+    try {
+      writeLock.lock();
+      try {
+        desiredStateEntity.setAdminState(attribute);
+        saveIfPersisted();
+      } finally {
+        writeLock.unlock();
+      }
+    } finally {
+      clusterGlobalLock.readLock().unlock();
+    }
   }
 
   @Override
@@ -1031,7 +1053,8 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
             getState().toString(),
             getStackVersion().getStackId(),
             getDesiredState().toString(),
-            getDesiredStackVersion().getStackId());
+            getDesiredStackVersion().getStackId(),
+            getComponentAdminState());
 
         r.setActualConfigs(actualConfigs);
 
@@ -1040,7 +1063,7 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
         } catch (Exception e) {
           LOG.error("Could not determine stale config", e);
         }
-        
+
         return r;
       } finally {
         readLock.unlock();
@@ -1048,7 +1071,6 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
     } finally {
       clusterGlobalLock.readLock().unlock();
     }
-
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java b/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
index 1c0a66c..6f7f465 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
@@ -17,26 +17,6 @@
  */
 package org.apache.ambari.server.utils;
 
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.net.InetAddress;
-import java.net.UnknownHostException;
-import java.nio.charset.Charset;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedHashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.Set;
-import java.util.SortedSet;
-import java.util.TreeMap;
-import java.util.TreeSet;
-
-import javax.xml.bind.JAXBException;
-
 import com.google.common.base.Joiner;
 import com.google.gson.Gson;
 import org.apache.ambari.server.AmbariException;
@@ -44,12 +24,12 @@ import org.apache.ambari.server.Role;
 import org.apache.ambari.server.RoleCommand;
 import org.apache.ambari.server.actionmanager.Stage;
 import org.apache.ambari.server.agent.ExecutionCommand;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.controller.HostsMap;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Host;
+import org.apache.ambari.server.state.HostComponentAdminState;
 import org.apache.ambari.server.state.Service;
 import org.apache.ambari.server.state.ServiceComponent;
+import org.apache.ambari.server.state.ServiceComponentHost;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostInstallEvent;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -59,33 +39,44 @@ import org.codehaus.jackson.map.JsonMappingException;
 import org.codehaus.jackson.map.ObjectMapper;
 import org.codehaus.jackson.map.SerializationConfig;
 
-public class StageUtils {
-  
+import javax.xml.bind.JAXBException;
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.nio.charset.Charset;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeMap;
+import java.util.TreeSet;
 
-  private static final Log LOG = LogFactory.getLog(StageUtils.class);
+public class StageUtils {
 
-  private static Map<String, String> componentToClusterInfoKeyMap =
-      new HashMap<String, String>();
 
-  private volatile static Gson gson;
   public static final Integer DEFAULT_PING_PORT = 8670;
-
+  private static final Log LOG = LogFactory.getLog(StageUtils.class);
   private static final String HOSTS_LIST = "all_hosts";
-
   private static final String PORTS = "all_ping_ports";
-
-  public static void setGson(Gson gson) {
-    if (gson==null) {
-      StageUtils.gson = gson;
-    }
-  }
+  private static Map<String, String> componentToClusterInfoKeyMap =
+      new HashMap<String, String>();
+  private static Map<String, String> decommissionedToClusterInfoKeyMap =
+      new HashMap<String, String>();
+  private volatile static Gson gson;
 
   public static Gson getGson() {
     if (gson != null) {
       return gson;
     } else {
       synchronized (LOG) {
-        if (gson==null) {
+        if (gson == null) {
           gson = new Gson();
         }
         return gson;
@@ -93,6 +84,11 @@ public class StageUtils {
     }
   }
 
+  public static void setGson(Gson gson) {
+    if (gson == null) {
+      StageUtils.gson = gson;
+    }
+  }
 
   static {
     componentToClusterInfoKeyMap.put("NAMENODE", "namenode_host");
@@ -121,6 +117,13 @@ public class StageUtils {
     componentToClusterInfoKeyMap.put("KERBEROS_ADMIN_CLIENT", "kerberos_adminclient_host");
   }
 
+  static {
+    decommissionedToClusterInfoKeyMap.put("DATANODE", "decom_dn_hosts");
+    decommissionedToClusterInfoKeyMap.put("TASKTRACKER", "decom_tt_hosts");
+    decommissionedToClusterInfoKeyMap.put("NODEMANAGER", "decom_nm_hosts");
+    decommissionedToClusterInfoKeyMap.put("HBASE_REGIONSERVER", "decom_hbase_rs_hosts");
+  }
+
   public static String getActionId(long requestId, long stageId) {
     return requestId + "-" + stageId;
   }
@@ -145,7 +148,7 @@ public class StageUtils {
 
   //For testing only
   public static Stage getATestStage(long requestId, long stageId, String hostname, String clusterHostInfo) {
-    
+
     Stage s = new Stage(requestId, "/tmp", "cluster1", "context", clusterHostInfo);
     s.setStageId(stageId);
     long now = System.currentTimeMillis();
@@ -175,7 +178,7 @@ public class StageUtils {
   }
 
   public static String jaxbToString(Object jaxbObj) throws JAXBException,
-  JsonGenerationException, JsonMappingException, IOException {
+      JsonGenerationException, JsonMappingException, IOException {
     return getGson().toJson(jaxbObj);
   }
 
@@ -196,121 +199,141 @@ public class StageUtils {
     return mapper.readValue(is, clazz);
   }
 
-
   public static Map<String, Set<String>> getClusterHostInfo(
-      Map<String, Host> allHosts, Cluster cluster, HostsMap hostsMap,
-      Configuration configuration) throws AmbariException {
+      Map<String, Host> allHosts, Cluster cluster) throws AmbariException {
 
     Map<String, SortedSet<Integer>> hostRolesInfo = new HashMap<String, SortedSet<Integer>>();
-    
+
     Map<String, Set<String>> clusterHostInfo = new HashMap<String, Set<String>>();
 
     //Fill hosts and ports lists
     Set<String> hostsSet = new LinkedHashSet<String>();
     List<Integer> portsList = new ArrayList<Integer>();
-    
+
     for (Host host : allHosts.values()) {
-      
+
       Integer currentPingPort = host.getCurrentPingPort() == null ?
           DEFAULT_PING_PORT : host.getCurrentPingPort();
-      
+
       hostsSet.add(host.getHostName());
       portsList.add(currentPingPort);
     }
-    
+
     List<String> hostsList = new ArrayList<String>(hostsSet);
-    
+
     //Fill host roles
-    for (Entry<String, Service> serviceEntry : cluster.getServices().entrySet()) { 
-      
+    for (Entry<String, Service> serviceEntry : cluster.getServices().entrySet()) {
+
       Service service = serviceEntry.getValue();
-      
+
       for (Entry<String, ServiceComponent> serviceComponentEntry : service.getServiceComponents().entrySet()) {
-        
+
         ServiceComponent serviceComponent = serviceComponentEntry.getValue();
         String componentName = serviceComponent.getName();
-        
+
         String roleName = componentToClusterInfoKeyMap.get(componentName);
-        if (null == roleName && !serviceComponent.isClientComponent())
+        if (null == roleName && !serviceComponent.isClientComponent()) {
           roleName = componentName.toLowerCase() + "_hosts";
-        
-        if (null == roleName)
+        }
+
+        String decomRoleName = decommissionedToClusterInfoKeyMap.get(componentName);
+
+        if (roleName == null && decomRoleName == null) {
           continue;
-        
+        }
+
         for (String hostName : serviceComponent.getServiceComponentHosts().keySet()) {
-          
-          SortedSet<Integer> hostsForComponentsHost = hostRolesInfo.get(roleName);
-          
-          if (hostsForComponentsHost == null) {
-            hostsForComponentsHost = new TreeSet<Integer>();
-            hostRolesInfo.put(roleName, hostsForComponentsHost);
+
+          if (roleName != null) {
+            SortedSet<Integer> hostsForComponentsHost = hostRolesInfo.get(roleName);
+
+            if (hostsForComponentsHost == null) {
+              hostsForComponentsHost = new TreeSet<Integer>();
+              hostRolesInfo.put(roleName, hostsForComponentsHost);
+            }
+
+            int hostIndex = hostsList.indexOf(hostName);
+            //Add index of host to current host role
+            hostsForComponentsHost.add(hostIndex);
           }
 
-          int hostIndex = hostsList.indexOf(hostName);
-          //Add index of host to current host role
-          hostsForComponentsHost.add(hostIndex);
+          if (decomRoleName != null) {
+            ServiceComponentHost scHost = serviceComponent.getServiceComponentHost(hostName);
+            if (scHost.getComponentAdminState() == HostComponentAdminState.DECOMMISSIONED) {
+              SortedSet<Integer> hostsForComponentsHost = hostRolesInfo.get(decomRoleName);
+
+              if (hostsForComponentsHost == null) {
+                hostsForComponentsHost = new TreeSet<Integer>();
+                hostRolesInfo.put(decomRoleName, hostsForComponentsHost);
+              }
+
+              int hostIndex = hostsList.indexOf(hostName);
+              //Add index of host to current host role
+              hostsForComponentsHost.add(hostIndex);
+            }
+          }
         }
       }
     }
-    
+
     for (Entry<String, SortedSet<Integer>> entry : hostRolesInfo.entrySet()) {
       TreeSet<Integer> sortedSet = new TreeSet<Integer>(entry.getValue());
-  
+
       Set<String> replacedRangesSet = replaceRanges(sortedSet);
-  
+
       clusterHostInfo.put(entry.getKey(), replacedRangesSet);
     }
 
     clusterHostInfo.put(HOSTS_LIST, hostsSet);
     clusterHostInfo.put(PORTS, replaceMappedRanges(portsList));
-    
+
     return clusterHostInfo;
   }
-  
-  
+
   /**
    * Finds ranges in sorted set and replaces ranges by compact notation
-   * 
+   * <p/>
    * <p>For example, suppose <tt>set</tt> comprises<tt> [1, 2, 3, 4, 7]</tt>.
-   * After invoking <tt>rangedSet = StageUtils.replaceRanges(set)</tt> 
+   * After invoking <tt>rangedSet = StageUtils.replaceRanges(set)</tt>
    * <tt>rangedSet</tt> will comprise
    * <tt>["1-4", "7"]</tt>..
    *
-   * @param  set  the source set to be ranged
+   * @param set the source set to be ranged
    */
   public static Set<String> replaceRanges(SortedSet<Integer> set) {
-    
-    if (set == null)
+
+    if (set == null) {
       return null;
-    
+    }
+
     Set<String> rangedSet = new HashSet<String>();
-    
+
     Integer prevElement = null;
     Integer startOfRange = set.first();
-    
+
     for (Integer i : set) {
-      if (prevElement != null && (i - prevElement) > 1 ) {
+      if (prevElement != null && (i - prevElement) > 1) {
         String rangeItem = getRangedItem(startOfRange, prevElement);
         rangedSet.add(rangeItem);
         startOfRange = i;
       }
       prevElement = i;
     }
-    
+
     rangedSet.add(getRangedItem(startOfRange, prevElement));
-    
+
     return rangedSet;
   }
-  
+
   /**
    * Finds ranges in list and replaces ranges by compact notation
-   * 
+   * <p/>
    * <p>For example, suppose <tt>list</tt> comprises<tt> [1, 1, 2, 2, 1, 3]</tt>.
-   * After invoking <tt>rangedMappedSet = StageUtils.replaceMappedRanges(list)</tt> 
+   * After invoking <tt>rangedMappedSet = StageUtils.replaceMappedRanges(list)</tt>
    * <tt>rangedMappedSet</tt> will comprise
    * <tt>["1:0-1,4", "2:2-3", "3:5"]</tt>..
    *
-   * @param  list  the source list to be ranged
+   * @param values the source list to be ranged
    */
   public static Set<String> replaceMappedRanges(List<Integer> values) {
 
@@ -341,12 +364,12 @@ public class StageUtils {
   }
 
   private static String getRangedItem(Integer startOfRange, Integer endOfRange) {
-    
+
     String separator = (endOfRange - startOfRange) > 1 ? "-" : ",";
-    
-    String rangeItem = endOfRange.equals(startOfRange) ? 
+
+    String rangeItem = endOfRange.equals(startOfRange) ?
         endOfRange.toString() :
-          startOfRange + separator + endOfRange;
+        startOfRange + separator + endOfRange;
     return rangeItem;
   }
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
index ba6f1d4..812bba0 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-MySQL-CREATE.sql
@@ -30,7 +30,7 @@ CREATE TABLE clusters (cluster_id BIGINT NOT NULL, cluster_info VARCHAR(255) NOT
 CREATE TABLE clusterconfig (version_tag VARCHAR(255) NOT NULL, type_name VARCHAR(255) NOT NULL, cluster_id BIGINT NOT NULL, config_data LONGTEXT NOT NULL, create_timestamp BIGINT NOT NULL, PRIMARY KEY (version_tag, type_name, cluster_id));
 CREATE TABLE clusterservices (service_name VARCHAR(255) NOT NULL, cluster_id BIGINT NOT NULL, service_enabled INTEGER NOT NULL, PRIMARY KEY (service_name, cluster_id));
 CREATE TABLE clusterstate (cluster_id BIGINT NOT NULL, current_cluster_state VARCHAR(255) NOT NULL, current_stack_version VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id));
-CREATE TABLE hostcomponentdesiredstate (cluster_id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, desired_stack_version VARCHAR(255) NOT NULL, desired_state VARCHAR(255) NOT NULL, host_name VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id, component_name, host_name, service_name));
+CREATE TABLE hostcomponentdesiredstate (cluster_id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, desired_stack_version VARCHAR(255) NOT NULL, desired_state VARCHAR(255) NOT NULL, host_name VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, admin_state VARCHAR(32), PRIMARY KEY (cluster_id, component_name, host_name, service_name));
 CREATE TABLE hostcomponentstate (cluster_id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, current_stack_version VARCHAR(255) NOT NULL, current_state VARCHAR(255) NOT NULL, host_name VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id, component_name, host_name, service_name));
 CREATE TABLE hosts (host_name VARCHAR(255) NOT NULL, cpu_count INTEGER NOT NULL, cpu_info VARCHAR(255) NOT NULL, discovery_status VARCHAR(2000) NOT NULL, host_attributes LONGTEXT NOT NULL, ipv4 VARCHAR(255), ipv6 VARCHAR(255), last_registration_time BIGINT NOT NULL, os_arch VARCHAR(255) NOT NULL, os_info VARCHAR(1000) NOT NULL, os_type VARCHAR(255) NOT NULL, ph_cpu_count INTEGER, public_host_name VARCHAR(255), rack_info VARCHAR(255) NOT NULL, total_mem BIGINT NOT NULL, PRIMARY KEY (host_name));
 CREATE TABLE hoststate (agent_version VARCHAR(255) NOT NULL, available_mem BIGINT NOT NULL, current_state VARCHAR(255) NOT NULL, health_status VARCHAR(255), host_name VARCHAR(255) NOT NULL, time_in_state BIGINT NOT NULL, PRIMARY KEY (host_name));

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
index 4dcd37f..4be3a42 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-Oracle-CREATE.sql
@@ -20,7 +20,7 @@ CREATE TABLE clusters (cluster_id NUMBER(19) NOT NULL, cluster_info VARCHAR2(255
 CREATE TABLE clusterconfig (version_tag VARCHAR2(255) NOT NULL, type_name VARCHAR2(255) NOT NULL, cluster_id NUMBER(19) NOT NULL, config_data CLOB NOT NULL, create_timestamp NUMBER(19) NOT NULL, PRIMARY KEY (version_tag, type_name, cluster_id));
 CREATE TABLE clusterservices (service_name VARCHAR2(255) NOT NULL, cluster_id NUMBER(19) NOT NULL, service_enabled NUMBER(10) NOT NULL, PRIMARY KEY (service_name, cluster_id));
 CREATE TABLE clusterstate (cluster_id NUMBER(19) NOT NULL, current_cluster_state VARCHAR2(255) NULL, current_stack_version VARCHAR2(255) NULL, PRIMARY KEY (cluster_id));
-CREATE TABLE hostcomponentdesiredstate (cluster_id NUMBER(19) NOT NULL, component_name VARCHAR2(255) NOT NULL, desired_stack_version VARCHAR2(255) NULL, desired_state VARCHAR2(255) NOT NULL, host_name VARCHAR2(255) NOT NULL, service_name VARCHAR2(255) NOT NULL, PRIMARY KEY (cluster_id, component_name, host_name, service_name));
+CREATE TABLE hostcomponentdesiredstate (cluster_id NUMBER(19) NOT NULL, component_name VARCHAR2(255) NOT NULL, desired_stack_version VARCHAR2(255) NULL, desired_state VARCHAR2(255) NOT NULL, host_name VARCHAR2(255) NOT NULL, service_name VARCHAR2(255) NOT NULL, admin_state VARCHAR2(32) NULL, PRIMARY KEY (cluster_id, component_name, host_name, service_name));
 CREATE TABLE hostcomponentstate (cluster_id NUMBER(19) NOT NULL, component_name VARCHAR2(255) NOT NULL, current_stack_version VARCHAR2(255) NOT NULL, current_state VARCHAR2(255) NOT NULL, host_name VARCHAR2(255) NOT NULL, service_name VARCHAR2(255) NOT NULL, PRIMARY KEY (cluster_id, component_name, host_name, service_name));
 CREATE TABLE hosts (host_name VARCHAR2(255) NOT NULL, cpu_count INTEGER NOT NULL, cpu_info VARCHAR2(255) NULL, discovery_status VARCHAR2(2000) NULL, host_attributes CLOB NULL, ipv4 VARCHAR2(255) NULL, ipv6 VARCHAR2(255) NULL, last_registration_time INTEGER NOT NULL, os_arch VARCHAR2(255) NULL, os_info VARCHAR2(1000) NULL, os_type VARCHAR2(255) NULL, ph_cpu_count INTEGER NOT NULL, public_host_name VARCHAR2(255) NULL, rack_info VARCHAR2(255) NOT NULL, total_mem INTEGER NOT NULL, PRIMARY KEY (host_name));
 CREATE TABLE hoststate (agent_version VARCHAR2(255) NULL, available_mem NUMBER(19) NOT NULL, current_state VARCHAR2(255) NOT NULL, health_status VARCHAR2(255) NULL, host_name VARCHAR2(255) NOT NULL, time_in_state NUMBER(19) NOT NULL, PRIMARY KEY (host_name));

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql b/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
index 2bdb174..ff2b480 100644
--- a/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
+++ b/ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
@@ -43,7 +43,7 @@ GRANT ALL PRIVILEGES ON TABLE ambari.clusterservices TO :username;
 CREATE TABLE ambari.clusterstate (cluster_id BIGINT NOT NULL, current_cluster_state VARCHAR(255) NOT NULL, current_stack_version VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id));
 GRANT ALL PRIVILEGES ON TABLE ambari.clusterstate TO :username;
 
-CREATE TABLE ambari.hostcomponentdesiredstate (cluster_id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, desired_stack_version VARCHAR(255) NOT NULL, desired_state VARCHAR(255) NOT NULL, host_name VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id, component_name, host_name, service_name));
+CREATE TABLE ambari.hostcomponentdesiredstate (cluster_id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, desired_stack_version VARCHAR(255) NOT NULL, desired_state VARCHAR(255) NOT NULL, host_name VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, admin_state VARCHAR(32), PRIMARY KEY (cluster_id, component_name, host_name, service_name));
 GRANT ALL PRIVILEGES ON TABLE ambari.hostcomponentdesiredstate TO :username;
 
 CREATE TABLE ambari.hostcomponentstate (cluster_id BIGINT NOT NULL, component_name VARCHAR(255) NOT NULL, current_stack_version VARCHAR(255) NOT NULL, current_state VARCHAR(255) NOT NULL, host_name VARCHAR(255) NOT NULL, service_name VARCHAR(255) NOT NULL, PRIMARY KEY (cluster_id, component_name, host_name, service_name));

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/properties.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/properties.json b/ambari-server/src/main/resources/properties.json
index 52c0a20..a80e7ba 100644
--- a/ambari-server/src/main/resources/properties.json
+++ b/ambari-server/src/main/resources/properties.json
@@ -64,6 +64,7 @@
         "params/run_smoke_test",
         "HostRoles/nagios_alerts",
         "HostRoles/stale_configs",
+        "HostRoles/admin_state",
         "_"
     ],
     "Configuration":[

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/scripts/params.py
index 4a15586..aabb406 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/scripts/params.py
@@ -153,16 +153,6 @@ mapred_local_dir = "/tmp/hadoop-mapred/mapred/local"
 
 yarn_log_dir_prefix = default("yarn_log_dir_prefix","/var/log/hadoop-yarn")
 
-#exclude file
-exlude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
-if 'hdfs-exclude-file' in config['configurations']:
-  if 'datanodes' in config['configurations']['hdfs-exclude-file']:
-    hdfs_exclude_file = config['configurations']['hdfs-exclude-file']['datanodes'].split(",")
-  else:
-    hdfs_exclude_file = []
-else:
-  hdfs_exclude_file = []
-
 #hdfs ha properties
 dfs_ha_enabled = False
 dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/scripts/shared_initialization.py
index 6243eca..7b406e1 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/scripts/shared_initialization.py
@@ -265,7 +265,6 @@ def setup_configs():
          group=params.user_group
     )
 
-  # generate_exlude_file()
   # generate_include_file()
 
 def update_log4j_props(file):
@@ -293,16 +292,6 @@ def update_log4j_props(file):
       "sed -i 's~\\({rca_disabled_prefix}\\)\\?{key}=.*~{rca_prefix}{key}={value}~' {file}"))
 
 
-def generate_exlude_file():
-  import params
-
-  File(params.exlude_file_path,
-       content=Template("exclude_hosts_list.j2"),
-       owner=params.hdfs_user,
-       group=params.user_group
-  )
-
-
 def generate_include_file():
   import params
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/metainfo.xml
index aedb8e0..129d0ae 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/metainfo.xml
@@ -33,6 +33,16 @@
             <scriptType>PYTHON</scriptType>
             <timeout>600</timeout>
           </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/hbase_master.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
         </component>
 
         <component>

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/metainfo.xml
index cd6f3a5..5f587d3 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/metainfo.xml
@@ -32,6 +32,16 @@
             <scriptType>PYTHON</scriptType>
             <timeout>600</timeout>
           </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/namenode.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
         </component>
 
         <component>

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/hdfs_namenode.py
index 9830b63..d8e191f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/hdfs_namenode.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/hdfs_namenode.py
@@ -52,6 +52,8 @@ def namenode(action=None, format=True):
       principal=params.dfs_namenode_kerberos_principal
     )
 
+  if action == "decommission":
+    decommission()
 
 def create_name_dirs(directories):
   import params
@@ -170,3 +172,21 @@ def format_namenode(force=None):
               not_if=format("test -d {mark_dir}"),
               path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin")
     Execute(format("mkdir -p {mark_dir}"))
+
+
+def decommission():
+  import params
+
+  hdfs_user = params.hdfs_user
+  conf_dir = params.hadoop_conf_dir
+
+  File(params.exclude_file_path,
+       content=Template("exclude_hosts_list.j2"),
+       owner=hdfs_user,
+       group=params.user_group
+  )
+
+  ExecuteHadoop('dfsadmin -refreshNodes',
+                user=hdfs_user,
+                conf_dir=conf_dir,
+                kinit_override=True)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/namenode.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/namenode.py
index 9b0fe43..80700c8 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/namenode.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/namenode.py
@@ -53,7 +53,14 @@ class NameNode(Script):
 
     env.set_params(status_params)
     check_process_status(status_params.namenode_pid_file)
+    pass
+
+  def decommission(self, env):
+    import params
 
+    env.set_params(params)
+    namenode(action="decommission")
+    pass
 
 if __name__ == "__main__":
   NameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/params.py
index e93c50a..3e0e65b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/params.py
@@ -41,6 +41,10 @@ dfs_web_authentication_kerberos_principal = config['configurations']['hdfs-site'
 dfs_secondary_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.principal']
 dfs_journalnode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.internal.spnego.principal']
 
+#exclude file
+hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
+exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
+
 kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 #hosts
 hostname = config["hostname"]

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/templates/exclude_hosts_list.j2
new file mode 100644
index 0000000..c3af46e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/templates/exclude_hosts_list.j2
@@ -0,0 +1,3 @@
+{% for host in hdfs_exclude_file %}
+{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/metainfo.xml
index 1e44d2c..874209b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/metainfo.xml
@@ -33,6 +33,16 @@
             <scriptType>PYTHON</scriptType>
             <timeout>600</timeout>
           </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/jobtracker.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
         </component>
 
         <component>

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/jobtracker.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/jobtracker.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/jobtracker.py
index a48cc23..8f7f1d7 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/jobtracker.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/jobtracker.py
@@ -77,6 +77,28 @@ class Jobtracker(Script):
     import status_params
     env.set_params(status_params)
     check_process_status(status_params.jobtracker_pid_file)
+    pass
+
+  def decommission(self, env):
+    import params
+
+    env.set_params(params)
+
+    mapred_user = params.mapred_user
+    conf_dir = params.conf_dir
+    user_group = params.user_group
+
+    File(params.exclude_file_path,
+         content=Template("exclude_hosts_list.j2"),
+         owner=mapred_user,
+         group=user_group
+    )
+
+    ExecuteHadoop('mradmin -refreshNodes',
+                user=mapred_user,
+                conf_dir=conf_dir,
+                kinit_override=True)
+    pass
 
 if __name__ == "__main__":
   Jobtracker().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/params.py
index dd8569a..55f4303 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/params.py
@@ -46,4 +46,8 @@ hadoop_jar_location = "/usr/lib/hadoop/"
 smokeuser = config['configurations']['global']['smokeuser']
 security_enabled = config['configurations']['global']['security_enabled']
 smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
\ No newline at end of file
+kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+
+#exclude file
+mr_exclude_hosts = default("/clusterHostInfo/decom_tt_hosts", [])
+exclude_file_path = config['configurations']['mapred-site']['mapred.hosts.exclude']
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/templates/exclude_hosts_list.j2
new file mode 100644
index 0000000..02fc5fe
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/templates/exclude_hosts_list.j2
@@ -0,0 +1,3 @@
+{% for host in mr_exclude_hosts %}
+{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/scripts/params.py
index 0effa01..712a5ab 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/scripts/params.py
@@ -153,16 +153,6 @@ mapred_local_dir = "/tmp/hadoop-mapred/mapred/local"
 
 yarn_log_dir_prefix = default("yarn_log_dir_prefix","/var/log/hadoop-yarn")
 
-#exclude file
-exlude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
-if 'hdfs-exclude-file' in config['configurations']:
-  if 'datanodes' in config['configurations']['hdfs-exclude-file']:
-    hdfs_exclude_file = config['configurations']['hdfs-exclude-file']['datanodes'].split(",")
-  else:
-    hdfs_exclude_file = []
-else:
-  hdfs_exclude_file = []
-
 #hdfs ha properties
 dfs_ha_enabled = False
 dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/scripts/shared_initialization.py
index 9a1661a..f2644aa 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/scripts/shared_initialization.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/scripts/shared_initialization.py
@@ -270,7 +270,6 @@ def setup_configs():
          group=params.user_group
     )
 
-  generate_exlude_file()
   generate_include_file()
 
 def update_log4j_props(file):
@@ -298,16 +297,6 @@ def update_log4j_props(file):
       "sed -i 's~\\({rca_disabled_prefix}\\)\\?{key}=.*~{rca_prefix}{key}={value}~' {file}"))
 
 
-def generate_exlude_file():
-  import params
-
-  File(params.exlude_file_path,
-       content=Template("exclude_hosts_list.j2"),
-       owner=params.hdfs_user,
-       group=params.user_group
-  )
-
-
 def generate_include_file():
   import params
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/metainfo.xml
index f6db6be..7227b6e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/metainfo.xml
@@ -33,20 +33,11 @@
             <scriptType>PYTHON</scriptType>
             <timeout>600</timeout>
           </commandScript>
-        </component>
-
-        <component>
-          <name>HBASE_REGIONSERVER</name>
-          <category>SLAVE</category>
-          <commandScript>
-            <script>scripts/hbase_regionserver.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
           <customCommands>
             <customCommand>
               <name>DECOMMISSION</name>
               <commandScript>
-                <script>scripts/hbase_regionserver.py</script>
+                <script>scripts/hbase_master.py</script>
                 <scriptType>PYTHON</scriptType>
                 <timeout>600</timeout>
               </commandScript>
@@ -55,6 +46,15 @@
         </component>
 
         <component>
+          <name>HBASE_REGIONSERVER</name>
+          <category>SLAVE</category>
+          <commandScript>
+            <script>scripts/hbase_regionserver.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+        </component>
+
+        <component>
           <name>HBASE_CLIENT</name>
           <category>CLIENT</category>
           <commandScript>

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/metainfo.xml
index 6cb2dfa..3de6ce5 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/metainfo.xml
@@ -32,6 +32,16 @@
             <scriptType>PYTHON</scriptType>
             <timeout>600</timeout>
           </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/namenode.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
         </component>
 
         <component>

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/hdfs_namenode.py
index ecda794..e26f758 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/hdfs_namenode.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/hdfs_namenode.py
@@ -34,6 +34,13 @@ def namenode(action=None, format=True):
     if format:
       format_namenode()
       pass
+
+    File(params.exclude_file_path,
+         content=Template("exclude_hosts_list.j2"),
+         owner=params.hdfs_user,
+         group=params.user_group
+    )
+
     service(
       action="start", name="namenode", user=params.hdfs_user,
       keytab=params.dfs_namenode_keytab_file,
@@ -53,6 +60,8 @@ def namenode(action=None, format=True):
       principal=params.dfs_namenode_kerberos_principal
     )
 
+  if action == "decommission":
+    decommission()
 
 def create_name_dirs(directories):
   import params
@@ -178,3 +187,21 @@ def format_namenode(force=None):
               not_if=format("test -d {mark_dir}"),
               path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin")
     Execute(format("mkdir -p {mark_dir}"))
+
+
+def decommission():
+  import params
+
+  hdfs_user = params.hdfs_user
+  conf_dir = params.hadoop_conf_dir
+
+  File(params.exclude_file_path,
+       content=Template("exclude_hosts_list.j2"),
+       owner=params.hdfs_user,
+       group=params.user_group
+  )
+
+  ExecuteHadoop('dfsadmin -refreshNodes',
+                user=hdfs_user,
+                conf_dir=conf_dir,
+                kinit_override=True)

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/namenode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/namenode.py
index 9a1dba36..deb01d5 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/namenode.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/namenode.py
@@ -55,7 +55,14 @@ class NameNode(Script):
 
     env.set_params(status_params)
     check_process_status(status_params.namenode_pid_file)
+    pass
+
+  def decommission(self, env):
+    import params
 
+    env.set_params(params)
+    namenode(action="decommission")
+    pass
 
 if __name__ == "__main__":
   NameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/params.py
index 568a8a8..a2479c0 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/params.py
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/params.py
@@ -41,6 +41,10 @@ dfs_web_authentication_kerberos_principal = config['configurations']['hdfs-site'
 dfs_secondary_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.principal']
 dfs_journalnode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.internal.spnego.principal']
 
+#exclude file
+hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
+exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
+
 kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 #hosts
 hostname = config["hostname"]

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/templates/exclude_hosts_list.j2
new file mode 100644
index 0000000..c3af46e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/templates/exclude_hosts_list.j2
@@ -0,0 +1,3 @@
+{% for host in hdfs_exclude_file %}
+{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/metainfo.xml
index 127d055..f25d80b 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/metainfo.xml
@@ -33,6 +33,16 @@
             <scriptType>PYTHON</scriptType>
             <timeout>600</timeout>
           </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/resourcemanager.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
         </component>
 
         <component>

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-MySQL-UPGRADE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-MySQL-UPGRADE.sql b/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-MySQL-UPGRADE.sql
index b1ad27d..2253658 100644
--- a/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-MySQL-UPGRADE.sql
+++ b/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-MySQL-UPGRADE.sql
@@ -17,4 +17,9 @@
 --
 
 
--- DDL
\ No newline at end of file
+-- DDL
+
+--Upgrade version to current
+UPDATE metainfo SET "metainfo_value" = '${ambariVersion}' WHERE metainfo_key = 'version';
+
+ALTER TABLE hostcomponentdesiredstate ADD admin_state VARCHAR(32);

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Oracle-UPGRADE.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Oracle-UPGRADE.sql b/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Oracle-UPGRADE.sql
index bcd958d..916a1e8 100644
--- a/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Oracle-UPGRADE.sql
+++ b/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Oracle-UPGRADE.sql
@@ -25,6 +25,9 @@ ALTER TABLE hostconfigmapping ADD (user_name VARCHAR2 (255) DEFAULT '_db');
 
 ALTER TABLE stage ADD (cluster_host_info BLOB DEFAULT NULL);
 
+-- add decommission state
+ALTER TABLE hostcomponentdesiredstate ADD (component_attribute VARCHAR2 (255) DEFAULT NULL);
+
 -- DML
 --Upgrade version to current
 UPDATE metainfo SET "metainfo_key" = 'version', "metainfo_value" = '${ambariVersion}';

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.3.0.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.3.0.sql b/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.3.0.sql
index a914a3f..bb02aba 100644
--- a/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.3.0.sql
+++ b/ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.3.0.sql
@@ -137,6 +137,9 @@ CREATE TABLE ambari.action (action_name VARCHAR(255) NOT NULL, action_type VARCH
 target_service VARCHAR(255), target_component VARCHAR(255), default_timeout SMALLINT NOT NULL, description VARCHAR(1000), target_type VARCHAR(32), PRIMARY KEY (action_name));
 GRANT ALL PRIVILEGES ON TABLE ambari.action TO :username;
 
+-- add decommission state
+ALTER TABLE ambari.hostcomponentdesiredstate ADD COLUMN admin_state VARCHAR(32);
+
 --Move cluster host info for old execution commands to stage table
 UPDATE ambari.stage sd
   SET 


[4/7] AMBARI-4270. Add decommission support for TaskTracker and modify support for DataNode to match

Posted by sm...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/configuration/global.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/configuration/global.xml
new file mode 100644
index 0000000..49d66bb
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/configuration/global.xml
@@ -0,0 +1,192 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>namenode_host</name>
+    <value></value>
+    <description>NameNode Host.</description>
+  </property>
+  <property>
+    <name>dfs_namenode_name_dir</name>
+    <value>/hadoop/hdfs/namenode</value>
+    <description>NameNode Directories.</description>
+  </property>
+  <property>
+    <name>snamenode_host</name>
+    <value></value>
+    <description>Secondary NameNode.</description>
+  </property>
+  <property>
+    <name>dfs_namenode_checkpoint_dir</name>
+    <value>/hadoop/hdfs/namesecondary</value>
+    <description>Secondary NameNode checkpoint dir.</description>
+  </property>
+  <property>
+    <name>datanode_hosts</name>
+    <value></value>
+    <description>List of Datanode Hosts.</description>
+  </property>
+  <property>
+    <name>dfs_datanode_data_dir</name>
+    <value>/hadoop/hdfs/data</value>
+    <description>Data directories for Data Nodes.</description>
+  </property>
+  <property>
+    <name>hdfs_log_dir_prefix</name>
+    <value>/var/log/hadoop</value>
+    <description>Hadoop Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_pid_dir_prefix</name>
+    <value>/var/run/hadoop</value>
+    <description>Hadoop PID Dir Prefix</description>
+  </property>
+  <property>
+    <name>dfs_webhdfs_enabled</name>
+    <value>true</value>
+    <description>WebHDFS enabled</description>
+  </property>
+  <property>
+    <name>hadoop_heapsize</name>
+    <value>1024</value>
+    <description>Hadoop maximum Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_heapsize</name>
+    <value>1024</value>
+    <description>NameNode Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_opt_newsize</name>
+    <value>200</value>
+    <description>NameNode new generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxnewsize</name>
+    <value>200</value>
+    <description>NameNode maximum new generation size</description>
+  </property>
+  <property>
+    <name>datanode_du_reserved</name>
+    <value>1073741824</value>
+    <description>Reserved space for HDFS</description>
+  </property>
+  <property>
+    <name>dtnode_heapsize</name>
+    <value>1024</value>
+    <description>DataNode maximum Java heap size</description>
+  </property>
+  <property>
+    <name>dfs_datanode_failed_volume_tolerated</name>
+    <value>0</value>
+    <description>DataNode volumes failure toleration</description>
+  </property>
+  <property>
+    <name>dfs_namenode_checkpoint_period</name>
+    <value>21600</value>
+    <description>HDFS Maximum Checkpoint Delay</description>
+  </property>
+  <property>
+    <name>fs_checkpoint_size</name>
+    <value>0.5</value>
+    <description>FS Checkpoint Size.</description>
+  </property>
+  <property>
+    <name>proxyuser_group</name>
+    <value>users</value>
+    <description>Proxy user group.</description>
+  </property>
+  <property>
+    <name>dfs_exclude</name>
+    <value></value>
+    <description>HDFS Exclude hosts.</description>
+  </property>
+  <property>
+    <name>dfs_replication</name>
+    <value>3</value>
+    <description>Default Block Replication.</description>
+  </property>
+  <property>
+    <name>dfs_block_local_path_access_user</name>
+    <value>hbase</value>
+    <description>Default Block Replication.</description>
+  </property>
+  <property>
+    <name>dfs_datanode_address</name>
+    <value>50010</value>
+    <description>Port for datanode address.</description>
+  </property>
+  <property>
+    <name>dfs_datanode_http_address</name>
+    <value>50075</value>
+    <description>Port for datanode address.</description>
+  </property>
+  <property>
+    <name>dfs_datanode_data_dir_perm</name>
+    <value>750</value>
+    <description>Datanode dir perms.</description>
+  </property>
+
+  <property>
+    <name>security_enabled</name>
+    <value>false</value>
+    <description>Hadoop Security</description>
+  </property>
+  <property>
+    <name>kerberos_domain</name>
+    <value>EXAMPLE.COM</value>
+    <description>Kerberos realm.</description>
+  </property>
+  <property>
+    <name>kadmin_pw</name>
+    <value></value>
+    <description>Kerberos realm admin password</description>
+  </property>
+  <property>
+    <name>keytab_path</name>
+    <value>/etc/security/keytabs</value>
+    <description>Kerberos keytab path.</description>
+  </property>
+  
+  <property>
+    <name>keytab_path</name>
+    <value>/etc/security/keytabs</value>
+    <description>KeyTab Directory.</description>
+  </property>
+    <property>
+    <name>namenode_formatted_mark_dir</name>
+    <value>/var/run/hadoop/hdfs/namenode/formatted/</value>
+    <description>Formatteed Mark Directory.</description>
+  </property>
+    <property>
+    <name>hdfs_user</name>
+    <value>hdfs</value>
+    <description>User and Groups.</description>
+  </property>
+  <property>
+    <name>lzo_enabled</name>
+    <value>true</value>
+    <description>LZO compression enabled</description>
+  </property>
+  
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/configuration/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/configuration/hadoop-policy.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/configuration/hadoop-policy.xml
new file mode 100644
index 0000000..51b01bb
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/configuration/hadoop-policy.xml
@@ -0,0 +1,134 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+  <property>
+    <name>security.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ClientProtocol, which is used by user code
+    via the DistributedFileSystem.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.client.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
+    for block recovery.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for DatanodeProtocol, which is used by datanodes to
+    communicate with the namenode.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.inter.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
+    for updating generation timestamp.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.namenode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for NamenodeProtocol, the protocol used by the secondary
+    namenode to communicate with the namenode.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.inter.tracker.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for InterTrackerProtocol, used by the tasktrackers to
+    communicate with the jobtracker.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.job.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for JobSubmissionProtocol, used by job clients to
+    communciate with the jobtracker for job submission, querying job status etc.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.job.task.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
+    tasks to communicate with the parent tasktracker.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+ <property>
+    <name>security.admin.operations.protocol.acl</name>
+    <value>hadoop</value>
+    <description>ACL for AdminOperationsProtocol. Used for admin commands.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.refresh.usertogroups.mappings.protocol.acl</name>
+    <value>hadoop</value>
+    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
+    users mappings. The ACL is a comma-separated list of user and
+    group names. The user and group list is separated by a blank. For
+    e.g. "alice,bob users,wheel".  A special value of "*" means all
+    users are allowed.</description>
+  </property>
+
+<property>
+    <name>security.refresh.policy.protocol.acl</name>
+    <value>hadoop</value>
+    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
+    dfsadmin and mradmin commands to refresh the security policy in-effect.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/configuration/hdfs-site.xml
new file mode 100644
index 0000000..7e8bfba
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/configuration/hdfs-site.xml
@@ -0,0 +1,513 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+  <!-- file system properties -->
+
+  <property>
+    <name>dfs.namenode.name.dir</name>
+    <!-- cluster variant -->
+    <value>/hadoop/hdfs/namenode</value>
+    <description>Determines where on the local filesystem the DFS name node
+      should store the name table.  If this is a comma-delimited list
+      of directories then the name table is replicated in all of the
+      directories, for redundancy. </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.support.append</name>
+    <value>true</value>
+    <description>to enable dfs append</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.webhdfs.enabled</name>
+    <value>true</value>
+    <description>Whether to enable WebHDFS feature</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.datanode.failed.volumes.tolerated</name>
+    <value>0</value>
+    <description> Number of failed disks a DataNode would tolerate before it stops offering service</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.datanode.data.dir</name>
+    <value>/hadoop/hdfs/data</value>
+    <description>Determines where on the local filesystem an DFS data node
+      should store its blocks.  If this is a comma-delimited
+      list of directories, then data will be stored in all named
+      directories, typically on different devices.
+      Directories that do not exist are ignored.
+    </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.hosts.exclude</name>
+    <value>/etc/hadoop/conf/dfs.exclude</value>
+    <description>Names a file that contains a list of hosts that are
+      not permitted to connect to the namenode.  The full pathname of the
+      file must be specified.  If the value is empty, no hosts are
+      excluded.</description>
+  </property>
+
+  <!--
+    <property>
+      <name>dfs.hosts</name>
+      <value>/etc/hadoop/conf/dfs.include</value>
+      <description>Names a file that contains a list of hosts that are
+      permitted to connect to the namenode. The full pathname of the file
+      must be specified.  If the value is empty, all hosts are
+      permitted.</description>
+    </property>
+  -->
+
+  <property>
+    <name>dfs.namenode.checkpoint.dir</name>
+    <value>/hadoop/hdfs/namesecondary</value>
+    <description>Determines where on the local filesystem the DFS secondary
+      name node should store the temporary images to merge.
+      If this is a comma-delimited list of directories then the image is
+      replicated in all of the directories for redundancy.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.checkpoint.edits.dir</name>
+    <value>${dfs.namenode.checkpoint.dir}</value>
+    <description>Determines where on the local filesystem the DFS secondary
+      name node should store the temporary edits to merge.
+      If this is a comma-delimited list of directoires then teh edits is
+      replicated in all of the directoires for redundancy.
+      Default value is same as dfs.namenode.checkpoint.dir
+    </description>
+  </property>
+
+
+  <property>
+    <name>dfs.namenode.checkpoint.period</name>
+    <value>21600</value>
+    <description>The number of seconds between two periodic checkpoints.
+    </description>
+  </property>
+
+  <property>
+    <name>fs.checkpoint.size</name>
+    <value>67108864</value>
+    <description>The size of the current edit log (in bytes) that triggers
+      a periodic checkpoint even if the maximum checkpoint delay is not reached
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.replication.max</name>
+    <value>50</value>
+    <description>Maximal block replication.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.replication</name>
+    <value>3</value>
+    <description>Default block replication.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.heartbeat.interval</name>
+    <value>3</value>
+    <description>Determines datanode heartbeat interval in seconds.</description>
+  </property>
+
+  <property>
+    <name>dfs.heartbeat.interval</name>
+    <value>3</value>
+    <description>Determines datanode heartbeat interval in seconds.</description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.safemode.threshold-pct</name>
+    <value>1.0f</value>
+    <description>
+      Specifies the percentage of blocks that should satisfy
+      the minimal replication requirement defined by dfs.namenode.replication.min.
+      Values less than or equal to 0 mean not to start in safe mode.
+      Values greater than 1 will make safe mode permanent.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.balance.bandwidthPerSec</name>
+    <value>6250000</value>
+    <description>
+      Specifies the maximum amount of bandwidth that each datanode
+      can utilize for the balancing purpose in term of
+      the number of bytes per second.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.https.port</name>
+    <value>50470</value>
+    <description>
+      This property is used by HftpFileSystem.
+    </description>
+  </property>
+
+  <property>
+    <name>ambari.dfs.datanode.port</name>
+    <value>50010</value>
+    <description>
+      The datanode port for data transfer. This property is effective only if referenced from dfs.datanode.address property.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.address</name>
+    <value>0.0.0.0:${ambari.dfs.datanode.port}</value>
+    <description>
+      The datanode server address and port for data transfer.
+    </description>
+  </property>
+
+  <property>
+    <name>ambari.dfs.datanode.http.port</name>
+    <value>50075</value>
+    <description>
+      The datanode http port. This property is effective only if referenced from dfs.datanode.http.address property.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.http.address</name>
+    <value>0.0.0.0:${ambari.dfs.datanode.http.port}</value>
+    <description>
+      The datanode http server address and port.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.blocksize</name>
+    <value>134217728</value>
+    <description>The default block size for new files.</description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.http-address</name>
+    <value>localhost:50070</value>
+    <description>The name of the default file system.  Either the
+      literal string "local" or a host:port for NDFS.</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.datanode.du.reserved</name>
+    <!-- cluster variant -->
+    <value>1073741824</value>
+    <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.ipc.address</name>
+    <value>0.0.0.0:8010</value>
+    <description>
+      The datanode ipc server address and port.
+      If the port is 0 then the server will start on a free port.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.blockreport.initialDelay</name>
+    <value>120</value>
+    <description>Delay for first block report in seconds.</description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.handler.count</name>
+    <value>40</value>
+    <description>The number of server threads for the namenode.</description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.max.transfer.threads</name>
+    <value>1024</value>
+    <description>PRIVATE CONFIG VARIABLE</description>
+  </property>
+
+  <!-- Permissions configuration -->
+
+  <property>
+    <name>fs.permissions.umask-mode</name>
+    <value>022</value>
+    <description>
+      The octal umask used when creating files and directories.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.permissions.enabled</name>
+    <value>true</value>
+    <description>
+      If "true", enable permission checking in HDFS.
+      If "false", permission checking is turned off,
+      but all other behavior is unchanged.
+      Switching from one parameter value to the other does not change the mode,
+      owner or group of files or directories.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.permissions.superusergroup</name>
+    <value>hdfs</value>
+    <description>The name of the group of super-users.</description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.handler.count</name>
+    <value>100</value>
+    <description>Added to grow Queue size so that more client connections are allowed</description>
+  </property>
+
+  <property>
+    <name>dfs.block.access.token.enable</name>
+    <value>true</value>
+    <description>
+      If "true", access tokens are used as capabilities for accessing datanodes.
+      If "false", no access tokens are checked on accessing datanodes.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.kerberos.principal</name>
+    <value></value>
+    <description>
+      Kerberos principal name for the NameNode
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.kerberos.principal</name>
+    <value></value>
+    <description>
+      Kerberos principal name for the secondary NameNode.
+    </description>
+  </property>
+
+
+  <!--
+    This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
+  -->
+  <property>
+    <name>dfs.namenode.kerberos.https.principal</name>
+    <value></value>
+    <description>The Kerberos principal for the host that the NameNode runs on.</description>
+
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.kerberos.https.principal</name>
+    <value></value>
+    <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
+
+  </property>
+
+  <property>
+    <!-- cluster variant -->
+    <name>dfs.namenode.secondary.http-address</name>
+    <value>localhost:50090</value>
+    <description>Address of secondary namenode web server</description>
+  </property>
+
+  <property>
+    <name>dfs.web.authentication.kerberos.principal</name>
+    <value></value>
+    <description>
+      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
+      HTTP SPENGO specification.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.web.authentication.kerberos.keytab</name>
+    <value></value>
+    <description>
+      The Kerberos keytab file with the credentials for the
+      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.kerberos.principal</name>
+    <value></value>
+    <description>
+      The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.keytab.file</name>
+    <value></value>
+    <description>
+      Combined keytab file containing the namenode service and host principals.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.keytab.file</name>
+    <value></value>
+    <description>
+      Combined keytab file containing the namenode service and host principals.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.keytab.file</name>
+    <value></value>
+    <description>
+      The filename of the keytab file for the DataNode.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.https-address</name>
+    <value>localhost:50470</value>
+    <description>The https address where namenode binds</description>
+
+  </property>
+
+  <property>
+    <name>dfs.datanode.data.dir.perm</name>
+    <value>750</value>
+    <description>The permissions that should be there on dfs.datanode.data.dir
+      directories. The datanode will not come up if the permissions are
+      different on existing dfs.datanode.data.dir directories. If the directories
+      don't exist, they will be created with this permission.</description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.accesstime.precision</name>
+    <value>0</value>
+    <description>The access time for HDFS file is precise upto this value.
+      The default value is 1 hour. Setting a value of 0 disables
+      access times for HDFS.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.cluster.administrators</name>
+    <value> hdfs</value>
+    <description>ACL for who all can view the default servlets in the HDFS</description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.avoid.read.stale.datanode</name>
+    <value>true</value>
+    <description>
+      Indicate whether or not to avoid reading from stale datanodes whose
+      heartbeat messages have not been received by the namenode for more than a
+      specified time interval.
+    </description>
+  </property>
+  <property>
+    <name>dfs.namenode.avoid.write.stale.datanode</name>
+    <value>true</value>
+    <description>
+      Indicate whether or not to avoid writing to stale datanodes whose
+      heartbeat messages have not been received by the namenode for more than a
+      specified time interval.
+    </description>
+  </property>
+  <property>
+    <name>dfs.namenode.write.stale.datanode.ratio</name>
+    <value>1.0f</value>
+    <description>When the ratio of number stale datanodes to total datanodes marked is greater
+      than this ratio, stop avoiding writing to stale nodes so as to prevent causing hotspots.
+    </description>
+  </property>
+  <property>
+    <name>dfs.namenode.stale.datanode.interval</name>
+    <value>30000</value>
+    <description>Datanode is stale after not getting a heartbeat in this interval in ms</description>
+  </property>
+
+  <property>
+    <name>dfs.journalnode.http-address</name>
+    <value>0.0.0.0:8480</value>
+    <description>The address and port the JournalNode web UI listens on.
+      If the port is 0 then the server will start on a free port. </description>
+  </property>
+
+  <property>
+    <name>dfs.journalnode.edits.dir</name>
+    <value>/grid/0/hdfs/journal</value>
+    <description>The path where the JournalNode daemon will store its local state. </description>
+  </property>
+
+  <!-- HDFS Short-Circuit Local Reads -->
+
+  <property>
+    <name>dfs.client.read.shortcircuit</name>
+    <value>true</value>
+    <description>
+      This configuration parameter turns on short-circuit local reads.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.client.read.shortcircuit.skip.checksum</name>
+    <value></value>
+    <description>Enable/disbale skipping the checksum check</description>
+  </property>
+
+  <property>
+    <name>dfs.domain.socket.path</name>
+    <value>/var/lib/hadoop-hdfs/dn_socket</value>
+  </property>
+
+  <property>
+    <name>dfs.client.read.shortcircuit.streams.cache.size</name>
+    <value>4096</value>
+    <description>
+      The DFSClient maintains a cache of recently opened file descriptors. This
+      parameter controls the size of that cache. Setting this higher will use
+      more file descriptors, but potentially provide better performance on
+      workloads involving lots of seeks.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.name.dir.restore</name>
+    <value>true</value>
+    <description>Set to true to enable NameNode to attempt recovering a previously failed dfs.namenode.name.dir.
+      When enabled, a recovery of any failed directory is attempted during checkpoint.</description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/metainfo.xml
new file mode 100644
index 0000000..3de6ce5
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/metainfo.xml
@@ -0,0 +1,152 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HDFS</name>
+      <comment>Apache Hadoop Distributed File System</comment>
+      <version>2.1.0.2.0.6.0</version>
+
+      <components>
+        <component>
+          <name>NAMENODE</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/namenode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/namenode.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
+        </component>
+
+        <component>
+          <name>DATANODE</name>
+          <category>SLAVE</category>
+          <commandScript>
+            <script>scripts/datanode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>SECONDARY_NAMENODE</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/snamenode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>HDFS_CLIENT</name>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/hdfs_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>JOURNALNODE</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/journalnode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>ZKFC</name>
+          <category>SLAVE</category>
+          <commandScript>
+            <script>scripts/zkfc_slave.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>lzo</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-libhdfs</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-lzo</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-lzo-native</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>snappy</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>snappy-devel</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>ambari-log4j</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>core-site</config-type>
+        <config-type>global</config-type>
+        <config-type>hdfs-site</config-type>
+        <config-type>hadoop-policy</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>


[2/7] AMBARI-4270. Add decommission support for TaskTracker and modify support for DataNode to match

Posted by sm...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/files/checkForFormat.sh b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/files/checkForFormat.sh
new file mode 100644
index 0000000..d14091a
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/files/checkForFormat.sh
@@ -0,0 +1,62 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export hdfs_user=$1
+shift
+export conf_dir=$1
+shift
+export mark_dir=$1
+shift
+export name_dirs=$*
+
+export EXIT_CODE=0
+export command="namenode -format"
+export list_of_non_empty_dirs=""
+
+mark_file=/var/run/hadoop/hdfs/namenode-formatted
+if [[ -f ${mark_file} ]] ; then
+  rm -f ${mark_file}
+  mkdir -p ${mark_dir}
+fi
+
+if [[ ! -d $mark_dir ]] ; then
+  for dir in `echo $name_dirs | tr ',' ' '` ; do
+    echo "NameNode Dirname = $dir"
+    cmd="ls $dir | wc -l  | grep -q ^0$"
+    eval $cmd
+    if [[ $? -ne 0 ]] ; then
+      (( EXIT_CODE = $EXIT_CODE + 1 ))
+      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
+    fi
+  done
+
+  if [[ $EXIT_CODE == 0 ]] ; then
+    su - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}"
+  else
+    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
+  fi
+else
+  echo "${mark_dir} exists. Namenode DFS already formatted"
+fi
+
+exit $EXIT_CODE
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/files/checkWebUI.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/files/checkWebUI.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/files/checkWebUI.py
new file mode 100644
index 0000000..f8e9c1a
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/files/checkWebUI.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import optparse
+import httplib
+
+#
+# Main.
+#
+def main():
+  parser = optparse.OptionParser(usage="usage: %prog [options] component ")
+  parser.add_option("-m", "--hosts", dest="hosts", help="Comma separated hosts list for WEB UI to check it availability")
+  parser.add_option("-p", "--port", dest="port", help="Port of WEB UI to check it availability")
+
+  (options, args) = parser.parse_args()
+  
+  hosts = options.hosts.split(',')
+  port = options.port
+
+  for host in hosts:
+    try:
+      conn = httplib.HTTPConnection(host, port)
+      # This can be modified to get a partial url part to be sent with request
+      conn.request("GET", "/")
+      httpCode = conn.getresponse().status
+      conn.close()
+    except Exception:
+      httpCode = 404
+
+    if httpCode != 200:
+      print "Cannot access WEB UI on: http://" + host + ":" + port
+      exit(1)
+      
+
+if __name__ == "__main__":
+  main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/datanode.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/datanode.py
new file mode 100644
index 0000000..eaa27cf
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/datanode.py
@@ -0,0 +1,57 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from hdfs_datanode import datanode
+
+
+class DataNode(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+    self.config(env)
+    datanode(action="start")
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+    datanode(action="stop")
+
+  def config(self, env):
+    import params
+
+    datanode(action="configure")
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    check_process_status(status_params.datanode_pid_file)
+
+
+if __name__ == "__main__":
+  DataNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/hdfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/hdfs_client.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/hdfs_client.py
new file mode 100644
index 0000000..6babde5
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/hdfs_client.py
@@ -0,0 +1,49 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from utils import service
+
+
+class HdfsClient(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+    self.config(env)
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+
+  def config(self, env):
+    import params
+
+    pass
+
+
+if __name__ == "__main__":
+  HdfsClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/hdfs_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/hdfs_datanode.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/hdfs_datanode.py
new file mode 100644
index 0000000..e0b6c39
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/hdfs_datanode.py
@@ -0,0 +1,56 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from utils import service
+
+
+def datanode(action=None):
+  import params
+
+  if action == "configure":
+    Directory(params.dfs_domain_socket_dir,
+              recursive=True,
+              mode=0750,
+              owner=params.hdfs_user,
+              group=params.user_group)
+    Directory(params.dfs_data_dir,
+              recursive=True,
+              mode=0755,
+              owner=params.hdfs_user,
+              group=params.user_group)
+
+  if action == "start":
+    service(
+      action=action, name="datanode",
+      user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True,
+      keytab=params.dfs_datanode_keytab_file,
+      principal=params.dfs_datanode_kerberos_principal
+    )
+  if action == "stop":
+    service(
+      action=action, name="datanode",
+      user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True,
+      keytab=params.dfs_datanode_keytab_file,
+      principal=params.dfs_datanode_kerberos_principal
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/hdfs_namenode.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/hdfs_namenode.py
new file mode 100644
index 0000000..ecda794
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/hdfs_namenode.py
@@ -0,0 +1,180 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from utils import service
+from utils import hdfs_directory
+import urlparse
+
+
+def namenode(action=None, format=True):
+  import params
+  #we need this directory to be present before any action(HA manual steps for
+  #additional namenode)
+  if action == "configure":
+    create_name_dirs(params.dfs_name_dir)
+
+  if action == "start":
+    if format:
+      format_namenode()
+      pass
+    service(
+      action="start", name="namenode", user=params.hdfs_user,
+      keytab=params.dfs_namenode_keytab_file,
+      create_pid_dir=True,
+      create_log_dir=True,
+      principal=params.dfs_namenode_kerberos_principal
+    )
+
+    # TODO: extract creating of dirs to different services
+    create_app_directories()
+    create_user_directories()
+
+  if action == "stop":
+    service(
+      action="stop", name="namenode", user=params.hdfs_user,
+      keytab=params.dfs_namenode_keytab_file,
+      principal=params.dfs_namenode_kerberos_principal
+    )
+
+
+def create_name_dirs(directories):
+  import params
+
+  dirs = directories.split(",")
+  Directory(dirs,
+            mode=0755,
+            owner=params.hdfs_user,
+            group=params.user_group,
+            recursive=True
+  )
+
+
+def create_app_directories():
+  import params
+
+  hdfs_directory(name="/tmp",
+                 owner=params.hdfs_user,
+                 mode="777"
+  )
+  #mapred directories
+  if params.has_histroryserver:
+    hdfs_directory(name="/mapred",
+                   owner=params.mapred_user
+    )
+    hdfs_directory(name="/mapred/system",
+                   owner=params.hdfs_user
+    )
+    #hbase directories
+  if len(params.hbase_master_hosts) != 0:
+    hdfs_directory(name=params.hbase_hdfs_root_dir,
+                   owner=params.hbase_user
+    )
+    hdfs_directory(name=params.hbase_staging_dir,
+                   owner=params.hbase_user,
+                   mode="711"
+    )
+    #hive directories
+  if len(params.hive_server_host) != 0:
+    hdfs_directory(name=params.hive_apps_whs_dir,
+                   owner=params.hive_user,
+                   mode="777"
+    )
+  if len(params.hcat_server_hosts) != 0:
+    hdfs_directory(name=params.webhcat_apps_dir,
+                   owner=params.webhcat_user,
+                   mode="755"
+    )
+  if len(params.hs_host) != 0:
+    if params.yarn_log_aggregation_enabled:
+      hdfs_directory(name=params.yarn_nm_app_log_dir,
+                     owner=params.yarn_user,
+                     group=params.user_group,
+                     mode="777",
+                     recursive_chmod=True
+      )
+    hdfs_directory(name=params.mapreduce_jobhistory_intermediate_done_dir,
+                   owner=params.mapred_user,
+                   group=params.user_group,
+                   mode="777"
+    )
+
+    hdfs_directory(name=params.mapreduce_jobhistory_done_dir,
+                   owner=params.mapred_user,
+                   group=params.user_group,
+                   mode="1777"
+    )
+
+  pass
+
+
+def create_user_directories():
+  import params
+
+  hdfs_directory(name=params.smoke_hdfs_user_dir,
+                 owner=params.smoke_user,
+                 mode=params.smoke_hdfs_user_mode
+  )
+
+  if params.has_hive_server_host:
+    hdfs_directory(name=params.hive_hdfs_user_dir,
+                   owner=params.hive_user,
+                   mode=params.hive_hdfs_user_mode
+    )
+
+  if params.has_hcat_server_host:
+    if params.hcat_hdfs_user_dir != params.webhcat_hdfs_user_dir:
+      hdfs_directory(name=params.hcat_hdfs_user_dir,
+                     owner=params.hcat_user,
+                     mode=params.hcat_hdfs_user_mode
+      )
+    hdfs_directory(name=params.webhcat_hdfs_user_dir,
+                   owner=params.webhcat_user,
+                   mode=params.webhcat_hdfs_user_mode
+    )
+
+  if params.has_oozie_server:
+    hdfs_directory(name=params.oozie_hdfs_user_dir,
+                   owner=params.oozie_user,
+                   mode=params.oozie_hdfs_user_mode
+    )
+
+
+def format_namenode(force=None):
+  import params
+
+  mark_dir = params.namenode_formatted_mark_dir
+  dfs_name_dir = params.dfs_name_dir
+  hdfs_user = params.hdfs_user
+  hadoop_conf_dir = params.hadoop_conf_dir
+
+  if not params.dfs_ha_enabled:
+    if force:
+      ExecuteHadoop('namenode -format',
+                    kinit_override=True)
+    else:
+      File('/tmp/checkForFormat.sh',
+           content=StaticFile("checkForFormat.sh"),
+           mode=0755)
+      Execute(format(
+        "sh /tmp/checkForFormat.sh {hdfs_user} {hadoop_conf_dir} {mark_dir} "
+        "{dfs_name_dir}"),
+              not_if=format("test -d {mark_dir}"),
+              path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin")
+    Execute(format("mkdir -p {mark_dir}"))

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/hdfs_snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/hdfs_snamenode.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/hdfs_snamenode.py
new file mode 100644
index 0000000..a943455
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/hdfs_snamenode.py
@@ -0,0 +1,53 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from utils import service
+from utils import hdfs_directory
+
+
+def snamenode(action=None, format=False):
+  import params
+
+  if action == "configure":
+    Directory(params.fs_checkpoint_dir,
+              recursive=True,
+              mode=0755,
+              owner=params.hdfs_user,
+              group=params.user_group)
+  elif action == "start":
+    service(
+      action=action,
+      name="secondarynamenode",
+      user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True,
+      keytab=params.dfs_secondary_namenode_keytab_file,
+      principal=params.dfs_secondary_namenode_kerberos_principal
+    )
+  elif action == "stop":
+    service(
+      action=action,
+      name="secondarynamenode",
+      user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True,
+      keytab=params.dfs_secondary_namenode_keytab_file,
+      principal=params.dfs_secondary_namenode_kerberos_principal
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/journalnode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/journalnode.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/journalnode.py
new file mode 100644
index 0000000..fd355cc
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/journalnode.py
@@ -0,0 +1,74 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from utils import service
+
+
+class JournalNode(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+    self.config(env)
+    service(
+      action="start", name="journalnode", user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True,
+      keytab=params.dfs_journalnode_keytab_file,
+      principal=params.dfs_journalnode_kerberos_principal
+    )
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+    service(
+      action="stop", name="journalnode", user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True,
+      keytab=params.dfs_journalnode_keytab_file,
+      principal=params.dfs_journalnode_kerberos_principal
+    )
+
+  def config(self, env):
+    import params
+
+    Directory(params.jn_edits_dir,
+              recursive=True,
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+    pass
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    check_process_status(status_params.journalnode_pid_file)
+
+
+if __name__ == "__main__":
+  JournalNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/namenode.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/namenode.py
new file mode 100644
index 0000000..9a1dba36
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/namenode.py
@@ -0,0 +1,61 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from hdfs_namenode import namenode
+
+
+class NameNode(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+    #TODO remove when config action will be implemented
+    self.config(env)
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+    self.config(env)
+    namenode(action="start")
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+    namenode(action="stop")
+
+  def config(self, env):
+    import params
+
+    env.set_params(params)
+    namenode(action="configure")
+    pass
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    check_process_status(status_params.namenode_pid_file)
+
+
+if __name__ == "__main__":
+  NameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/params.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/params.py
new file mode 100644
index 0000000..568a8a8
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/params.py
@@ -0,0 +1,180 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import status_params
+import os
+
+config = Script.get_config()
+
+#security params
+security_enabled = config['configurations']['global']['security_enabled']
+dfs_journalnode_keytab_file = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
+dfs_web_authentication_kerberos_keytab = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
+dfs_secondary_namenode_keytab_file =  config['configurations']['hdfs-site']['dfs.secondary.namenode.keytab.file']
+dfs_datanode_keytab_file =  config['configurations']['hdfs-site']['dfs.datanode.keytab.file']
+dfs_namenode_keytab_file =  config['configurations']['hdfs-site']['dfs.namenode.keytab.file']
+smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab']
+
+dfs_datanode_kerberos_principal = config['configurations']['hdfs-site']['dfs.datanode.kerberos.principal']
+dfs_journalnode_kerberos_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.principal']
+dfs_secondary_namenode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.internal.spnego.principal']
+dfs_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.namenode.kerberos.principal']
+dfs_web_authentication_kerberos_principal = config['configurations']['hdfs-site']['dfs.web.authentication.kerberos.principal']
+dfs_secondary_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.principal']
+dfs_journalnode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.internal.spnego.principal']
+
+kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+#hosts
+hostname = config["hostname"]
+rm_host = default("/clusterHostInfo/rm_host", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
+hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+hs_host = default("/clusterHostInfo/hs_host", [])
+jtnode_host = default("/clusterHostInfo/jtnode_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+nm_host = default("/clusterHostInfo/nm_host", [])
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", [])
+zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", [])
+
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+has_namenodes = not len(namenode_host) == 0
+has_jobtracker = not len(jtnode_host) == 0
+has_resourcemanager = not len(rm_host) == 0
+has_histroryserver = not len(hs_host) == 0
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_slaves = not len(slave_hosts) == 0
+has_nagios = not len(hagios_server_hosts) == 0
+has_oozie_server = not len(oozie_servers)  == 0
+has_hcat_server_host = not len(hcat_server_hosts)  == 0
+has_hive_server_host = not len(hive_server_host)  == 0
+has_journalnode_hosts = not len(journalnode_hosts)  == 0
+has_zkfc_hosts = not len(zkfc_hosts)  == 0
+
+
+is_namenode_master = hostname in namenode_host
+is_jtnode_master = hostname in jtnode_host
+is_rmnode_master = hostname in rm_host
+is_hsnode_master = hostname in hs_host
+is_hbase_master = hostname in hbase_master_hosts
+is_slave = hostname in slave_hosts
+
+if has_ganglia_server:
+  ganglia_server_host = ganglia_server_hosts[0]
+
+#users and groups
+yarn_user = config['configurations']['global']['yarn_user']
+hbase_user = config['configurations']['global']['hbase_user']
+nagios_user = config['configurations']['global']['nagios_user']
+oozie_user = config['configurations']['global']['oozie_user']
+webhcat_user = config['configurations']['global']['hcat_user']
+hcat_user = config['configurations']['global']['hcat_user']
+hive_user = config['configurations']['global']['hive_user']
+smoke_user =  config['configurations']['global']['smokeuser']
+mapred_user = config['configurations']['global']['mapred_user']
+hdfs_user = status_params.hdfs_user
+
+user_group = config['configurations']['global']['user_group']
+proxyuser_group =  config['configurations']['global']['proxyuser_group']
+nagios_group = config['configurations']['global']['nagios_group']
+smoke_user_group = "users"
+
+#hadoop params
+hadoop_conf_dir = "/etc/hadoop/conf"
+hadoop_pid_dir_prefix = status_params.hadoop_pid_dir_prefix
+hadoop_bin = "/usr/lib/hadoop/sbin"
+
+hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
+
+dfs_domain_socket_path = config['configurations']['hdfs-site']['dfs.domain.socket.path']
+dfs_domain_socket_dir = os.path.dirname(dfs_domain_socket_path)
+
+hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
+
+jn_edits_dir = config['configurations']['hdfs-site']['dfs.journalnode.edits.dir']#"/grid/0/hdfs/journal"
+
+# if stack_version[0] == "2":
+dfs_name_dir = config['configurations']['hdfs-site']['dfs.namenode.name.dir']
+# else:
+#   dfs_name_dir = default("/configurations/hdfs-site/dfs.name.dir","/tmp/hadoop-hdfs/dfs/name")
+
+namenode_dirs_created_stub_dir = format("{hdfs_log_dir_prefix}/{hdfs_user}")
+namenode_dirs_stub_filename = "namenode_dirs_created"
+
+hbase_hdfs_root_dir = config['configurations']['hbase-site']['hbase.rootdir']#","/apps/hbase/data")
+hbase_staging_dir = "/apps/hbase/staging"
+hive_apps_whs_dir = config['configurations']['hive-site']["hive.metastore.warehouse.dir"] #, "/apps/hive/warehouse")
+webhcat_apps_dir = "/apps/webhcat"
+yarn_log_aggregation_enabled = config['configurations']['yarn-site']['yarn.log-aggregation-enable']#","true")
+yarn_nm_app_log_dir =  config['configurations']['yarn-site']['yarn.nodemanager.remote-app-log-dir']#","/app-logs")
+mapreduce_jobhistory_intermediate_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.intermediate-done-dir']#","/app-logs")
+mapreduce_jobhistory_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.done-dir']#","/mr-history/done")
+
+if has_oozie_server:
+  oozie_hdfs_user_dir = format("/user/{oozie_user}")
+  oozie_hdfs_user_mode = 775
+if has_hcat_server_host:
+  hcat_hdfs_user_dir = format("/user/{hcat_user}")
+  hcat_hdfs_user_mode = 755
+  webhcat_hdfs_user_dir = format("/user/{webhcat_user}")
+  webhcat_hdfs_user_mode = 755
+if has_hive_server_host:
+  hive_hdfs_user_dir = format("/user/{hive_user}")
+  hive_hdfs_user_mode = 700
+smoke_hdfs_user_dir = format("/user/{smoke_user}")
+smoke_hdfs_user_mode = 770
+
+namenode_formatted_mark_dir = format("{hadoop_pid_dir_prefix}/hdfs/namenode/formatted/")
+
+# if stack_version[0] == "2":
+fs_checkpoint_dir = config['configurations']['hdfs-site']['dfs.namenode.checkpoint.dir'] #","/tmp/hadoop-hdfs/dfs/namesecondary")
+# else:
+#   fs_checkpoint_dir = default("/configurations/core-site/fs.checkpoint.dir","/tmp/hadoop-hdfs/dfs/namesecondary")
+
+# if stack_version[0] == "2":
+dfs_data_dir = config['configurations']['hdfs-site']['dfs.datanode.data.dir']#,"/tmp/hadoop-hdfs/dfs/data")
+# else:
+#   dfs_data_dir = default('/configurations/hdfs-site/dfs.data.dir',"/tmp/hadoop-hdfs/dfs/data")
+
+# HDFS High Availability properties
+dfs_ha_enabled = False
+dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
+dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
+if dfs_ha_namenode_ids:
+  dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
+  dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
+  if dfs_ha_namenode_ids_array_len > 1:
+    dfs_ha_enabled = True
+if dfs_ha_enabled:
+  for nn_id in dfs_ha_namemodes_ids_list:
+    nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
+    if hostname in nn_host:
+      namenode_id = nn_id
+  namenode_id = None
+
+journalnode_address = default('/configurations/hdfs-site/dfs.journalnode.http-address', None)
+if journalnode_address:
+  journalnode_port = journalnode_address.split(":")[1]
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/service_check.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/service_check.py
new file mode 100644
index 0000000..d27b13a
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/service_check.py
@@ -0,0 +1,107 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+class HdfsServiceCheck(Script):
+  def service_check(self, env):
+    import params
+
+    env.set_params(params)
+    unique = get_unique_id_and_date()
+    dir = '/tmp'
+    tmp_file = format("{dir}/{unique}")
+
+    safemode_command = "dfsadmin -safemode get | grep OFF"
+
+    create_dir_cmd = format("fs -mkdir {dir} ; hadoop fs -chmod -R 777 {dir}")
+    test_dir_exists = format("hadoop fs -test -e {dir}")
+    cleanup_cmd = format("fs -rm {tmp_file}")
+    #cleanup put below to handle retries; if retrying there wil be a stale file
+    #that needs cleanup; exit code is fn of second command
+    create_file_cmd = format(
+      "{cleanup_cmd}; hadoop fs -put /etc/passwd {tmp_file}")
+    test_cmd = format("fs -test -e {tmp_file}")
+    if params.security_enabled:
+      Execute(format(
+        "su - {smoke_user} -c '{kinit_path_local} -kt {smoke_user_keytab} "
+        "{smoke_user}'"))
+    ExecuteHadoop(safemode_command,
+                  user=params.smoke_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir,
+                  try_sleep=3,
+                  tries=5
+    )
+    ExecuteHadoop(create_dir_cmd,
+                  user=params.smoke_user,
+                  logoutput=True,
+                  not_if=test_dir_exists,
+                  conf_dir=params.hadoop_conf_dir,
+                  try_sleep=3,
+                  tries=5
+    )
+    ExecuteHadoop(create_file_cmd,
+                  user=params.smoke_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir,
+                  try_sleep=3,
+                  tries=5
+    )
+    ExecuteHadoop(test_cmd,
+                  user=params.smoke_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir,
+                  try_sleep=3,
+                  tries=5
+    )
+    if params.has_journalnode_hosts:
+      journalnode_port = params.journalnode_port
+      smoke_test_user = params.smoke_user
+      checkWebUIFileName = "checkWebUI.py"
+      checkWebUIFilePath = format("/tmp/{checkWebUIFileName}")
+      comma_sep_jn_hosts = ",".join(params.journalnode_hosts)
+      checkWebUICmd = format(
+        "su - {smoke_test_user} -c 'python {checkWebUIFilePath} -m "
+        "{comma_sep_jn_hosts} -p {journalnode_port}'")
+      File(checkWebUIFilePath,
+           content=StaticFile(checkWebUIFileName))
+
+      Execute(checkWebUICmd,
+              logoutput=True,
+              try_sleep=3,
+              tries=5
+      )
+
+    if params.is_namenode_master:
+      if params.has_zkfc_hosts:
+        pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
+        pid_file = format("{pid_dir}/hadoop-{hdfs_user}-zkfc.pid")
+        check_zkfc_process_cmd = format(
+          "ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+        Execute(check_zkfc_process_cmd,
+                logoutput=True,
+                try_sleep=3,
+                tries=5
+        )
+
+
+if __name__ == "__main__":
+  HdfsServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/snamenode.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/snamenode.py
new file mode 100644
index 0000000..8f682ec
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/snamenode.py
@@ -0,0 +1,64 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from hdfs_snamenode import snamenode
+
+
+class SNameNode(Script):
+  def install(self, env):
+    import params
+
+    env.set_params(params)
+
+    self.install_packages(env)
+
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+
+    self.config(env)
+    snamenode(action="start")
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+
+    snamenode(action="stop")
+
+  def config(self, env):
+    import params
+
+    env.set_params(params)
+
+    snamenode(action="configure")
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+
+    check_process_status(status_params.snamenode_pid_file)
+
+
+if __name__ == "__main__":
+  SNameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/status_params.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/status_params.py
new file mode 100644
index 0000000..4097373
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/status_params.py
@@ -0,0 +1,31 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+hadoop_pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
+hdfs_user = config['configurations']['global']['hdfs_user']
+hdp_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
+datanode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-datanode.pid")
+namenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-namenode.pid")
+snamenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-secondarynamenode.pid")
+journalnode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-journalnode.pid")
+zkfc_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-zkfc.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/utils.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/utils.py
new file mode 100644
index 0000000..225cd2e
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/utils.py
@@ -0,0 +1,138 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+def service(action=None, name=None, user=None, create_pid_dir=False,
+            create_log_dir=False, keytab=None, principal=None):
+  import params
+
+  kinit_cmd = "true"
+  pid_dir = format("{hadoop_pid_dir_prefix}/{user}")
+  pid_file = format("{pid_dir}/hadoop-{user}-{name}.pid")
+  log_dir = format("{hdfs_log_dir_prefix}/{user}")
+  hadoop_daemon = format(
+    "export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && "
+    "{hadoop_bin}/hadoop-daemon.sh")
+  cmd = format("{hadoop_daemon} --config {hadoop_conf_dir}")
+
+  if create_pid_dir:
+    Directory(pid_dir,
+              owner=user,
+              recursive=True)
+  if create_log_dir:
+    Directory(log_dir,
+              owner=user,
+              recursive=True)
+
+  if params.security_enabled:
+    principal_replaced = principal.replace("_HOST", params.hostname)
+    kinit_cmd = format("kinit -kt {keytab} {principal_replaced}")
+
+    if name == "datanode":
+      user = "root"
+      pid_file = format(
+        "{hadoop_pid_dir_prefix}/{hdfs_user}/hadoop-{hdfs_user}-{name}.pid")
+
+  daemon_cmd = format("{cmd} {action} {name}")
+
+  service_is_up = format(
+    "ls {pid_file} >/dev/null 2>&1 &&"
+    " ps `cat {pid_file}` >/dev/null 2>&1") if action == "start" else None
+
+  Execute(kinit_cmd)
+  Execute(daemon_cmd,
+          user = user,
+          not_if=service_is_up
+  )
+  if action == "stop":
+    File(pid_file,
+         action="delete",
+         ignore_failures=True
+    )
+
+
+def hdfs_directory(name=None, owner=None, group=None,
+                   mode=None, recursive_chown=False, recursive_chmod=False):
+  import params
+
+  dir_exists = format("hadoop fs -ls {name} >/dev/null 2>&1")
+  namenode_safe_mode_off = "hadoop dfsadmin -safemode get|grep 'Safe mode is OFF'"
+
+  stub_dir = params.namenode_dirs_created_stub_dir
+  stub_filename = params.namenode_dirs_stub_filename
+  dir_absent_in_stub = format(
+    "grep -q '^{name}$' {stub_dir}/{stub_filename} > /dev/null 2>&1; test $? -ne 0")
+  record_dir_in_stub = format("echo '{name}' >> {stub_dir}/{stub_filename}")
+  tries = 3
+  try_sleep = 10
+  dfs_check_nn_status_cmd = "true"
+
+  if params.dfs_ha_enabled:
+    namenode_id = params.namenode_id
+    dfs_check_nn_status_cmd = format(
+      "hdfs haadmin -getServiceState $namenode_id | grep active > /dev/null")
+
+  #if params.stack_version[0] == "2":
+  mkdir_cmd = format("fs -mkdir -p {name}")
+  #else:
+  #  mkdir_cmd = format("fs -mkdir {name}")
+
+  if params.security_enabled:
+    Execute(format("kinit -kt {hdfs_user_keytab} {hdfs_user}"),
+            user = params.hdfs_user)
+  ExecuteHadoop(mkdir_cmd,
+                try_sleep=try_sleep,
+                tries=tries,
+                not_if=format(
+                  "! {dir_absent_in_stub} && {dfs_check_nn_status_cmd} && "
+                  "{dir_exists} && ! {namenode_safe_mode_off}"),
+                only_if=format(
+                  "{dir_absent_in_stub} && {dfs_check_nn_status_cmd} && "
+                  "! {dir_exists}"),
+                conf_dir=params.hadoop_conf_dir,
+                user=params.hdfs_user
+  )
+  Execute(record_dir_in_stub,
+          user=params.hdfs_user,
+          only_if=format("{dir_absent_in_stub}")
+  )
+
+  recursive = "-R" if recursive_chown else ""
+  perm_cmds = []
+
+  if owner:
+    chown = owner
+    if group:
+      chown = format("{owner}:{group}")
+    perm_cmds.append(format("fs -chown {recursive} {chown} {name}"))
+  if mode:
+    perm_cmds.append(format("fs -chmod {recursive} {mode} {name}"))
+  for cmd in perm_cmds:
+    ExecuteHadoop(cmd,
+                  user=params.hdfs_user,
+                  only_if=format("! {dir_absent_in_stub} && {dfs_check_nn_status_cmd} && {namenode_safe_mode_off} && {dir_exists}"),
+                  try_sleep=try_sleep,
+                  tries=tries,
+                  conf_dir=params.hadoop_conf_dir
+    )
+
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/zkfc_slave.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/zkfc_slave.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/zkfc_slave.py
new file mode 100644
index 0000000..1f9ba65
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/package/scripts/zkfc_slave.py
@@ -0,0 +1,62 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from utils import service
+
+
+class ZkfcSlave(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+    self.config(env)
+    service(
+      action="start", name="zkfc", user=params.hdfs_user, create_pid_dir=True,
+      create_log_dir=True
+    )
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+    service(
+      action="stop", name="zkfc", user=params.hdfs_user, create_pid_dir=True,
+      create_log_dir=True
+    )
+
+  def config(self, env):
+    pass
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+
+    check_process_status(status_params.zkfc_pid_file)
+
+
+if __name__ == "__main__":
+  ZkfcSlave().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/configuration/hive-site.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/configuration/hive-site.xml
new file mode 100644
index 0000000..a69e250
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/configuration/hive-site.xml
@@ -0,0 +1,267 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+<configuration>
+
+  <property>
+    <name>ambari.hive.db.schema.name</name>
+    <value>hive</value>
+    <description>Database name used as the Hive Metastore</description>
+  </property>
+
+  <property>
+    <name>javax.jdo.option.ConnectionURL</name>
+    <value>jdbc</value>
+    <description>JDBC connect string for a JDBC metastore</description>
+  </property>
+
+  <property>
+      <name>javax.jdo.option.ConnectionDriverName</name>
+    <value>com.mysql.jdbc.Driver</value>
+    <description>Driver class name for a JDBC metastore</description>
+  </property>
+
+  <property>
+    <name>javax.jdo.option.ConnectionUserName</name>
+    <value>hive</value>
+    <description>username to use against metastore database</description>
+  </property>
+
+  <property>
+    <name>javax.jdo.option.ConnectionPassword</name>
+    <value> </value>
+    <description>password to use against metastore database</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.warehouse.dir</name>
+    <value>/apps/hive/warehouse</value>
+    <description>location of default database for the warehouse</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.sasl.enabled</name>
+    <value></value>
+    <description>If true, the metastore thrift interface will be secured with SASL.
+     Clients must authenticate with Kerberos.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.kerberos.keytab.file</name>
+    <value></value>
+    <description>The path to the Kerberos Keytab file containing the metastore
+     thrift server's service principal.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.kerberos.principal</name>
+    <value></value>
+    <description>The service principal for the metastore thrift server. The special
+    string _HOST will be replaced automatically with the correct host name.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.cache.pinobjtypes</name>
+    <value>Table,Database,Type,FieldSchema,Order</value>
+    <description>List of comma separated metastore object types that should be pinned in the cache</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.uris</name>
+    <value>thrift://localhost:9083</value>
+    <description>URI for client to contact metastore server</description>
+  </property>
+
+  <property>
+    <name>hive.semantic.analyzer.factory.impl</name>
+    <value>org.apache.hivealog.cli.HCatSemanticAnalyzerFactory</value>
+    <description>controls which SemanticAnalyzerFactory implemenation class is used by CLI</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.client.socket.timeout</name>
+    <value>60</value>
+    <description>MetaStore Client socket timeout in seconds</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.execute.setugi</name>
+    <value>true</value>
+    <description>In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.</description>
+  </property>
+
+  <property>
+    <name>hive.security.authorization.enabled</name>
+    <value>false</value>
+    <description>enable or disable the hive client authorization</description>
+  </property>
+
+  <property>
+    <name>hive.security.authorization.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
+    <description>the hive client authorization manager class name.
+    The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
+  </property>
+
+  <property>
+    <name>hive.security.metastore.authorization.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
+    <description>The authorization manager class name to be used in the metastore for authorization. The user-defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider.  </description>
+  </property>
+
+  <property>
+    <name>hive.security.authenticator.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator</value>
+    <description>Hive client authenticator manager class name. The user-defined authenticator class should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.  </description>
+  </property>
+
+  <property>
+    <name>hive.server2.enable.doAs</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.hdfs.impl.disable.cache</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.file.impl.disable.cache</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>hive.enforce.bucketing</name>
+    <value>true</value>
+    <description>Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced.</description>
+  </property>
+
+  <property>
+    <name>hive.enforce.sorting</name>
+    <value>true</value>
+    <description>Whether sorting is enforced. If true, while inserting into the table, sorting is enforced.</description>
+  </property>
+
+  <property>
+    <name>hive.map.aggr</name>
+    <value>true</value>
+    <description>Whether to use map-side aggregation in Hive Group By queries.</description>
+  </property>
+
+  <property>
+    <name>hive.optimize.bucketmapjoin</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>hive.optimize.bucketmapjoin.sortedmerge</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>hive.mapred.reduce.tasks.speculative.execution</name>
+    <value>false</value>
+    <description>Whether speculative execution for reducers should be turned on.</description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.join</name>
+    <value>true</value>
+    <description>Whether Hive enable the optimization about converting common
+      join into mapjoin based on the input file size.</description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.sortmerge.join</name>
+    <value>true</value>
+    <description>Will the join be automatically converted to a sort-merge join, if the joined tables pass
+      the criteria for sort-merge join.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.sortmerge.join.noconditionaltask</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.join.noconditionaltask</name>
+    <value>true</value>
+    <description>Whether Hive enable the optimization about converting common join into mapjoin based on the input file
+      size. If this paramater is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the
+      specified size, the join is directly converted to a mapjoin (there is no conditional task).
+    </description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.join.noconditionaltask.size</name>
+    <value>1000000000</value>
+    <description>If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. However, if it
+      is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly
+      converted to a mapjoin(there is no conditional task). The default is 10MB.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.optimize.reducededuplication.min.reducer</name>
+    <value>1</value>
+    <description>Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS.
+      That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.
+      The optimization will be disabled if number of reducers is less than specified value.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.optimize.mapjoin.mapreduce</name>
+    <value>true</value>
+    <description>If hive.auto.convert.join is off, this parameter does not take
+      affect. If it is on, and if there are map-join jobs followed by a map-reduce
+      job (for e.g a group by), each map-only job is merged with the following
+      map-reduce job.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.mapjoin.bucket.cache.size</name>
+    <value>10000</value>
+    <description>
+      Size per reducer.The default is 1G, i.e if the input size is 10G, it
+      will use 10 reducers.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.vectorized.execution.enabled</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <name>hive.optimize.reducededuplication</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>hive.optimize.index.filter</name>
+    <value>true</value>
+    <description>
+    Whether to enable automatic use of indexes
+    </description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/metainfo.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/metainfo.xml
new file mode 100644
index 0000000..15b95fa
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/metainfo.xml
@@ -0,0 +1,47 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <user>root</user>
+  <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management
+    service
+  </comment>
+  <version>0.11.0.2.0.5.0</version>
+
+  <components>
+    <component>
+      <name>HIVE_METASTORE</name>
+      <category>MASTER</category>
+    </component>
+    <component>
+      <name>HIVE_SERVER</name>
+      <category>MASTER</category>
+    </component>
+    <component>
+      <name>MYSQL_SERVER</name>
+      <category>MASTER</category>
+    </component>
+    <component>
+      <name>HIVE_CLIENT</name>
+      <category>CLIENT</category>
+    </component>
+  </components>
+  <configuration-dependencies>
+    <config-type>global</config-type>
+    <config-type>hive-site</config-type>
+  </configuration-dependencies>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/files/addMysqlUser.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/files/addMysqlUser.sh b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/files/addMysqlUser.sh
new file mode 100644
index 0000000..8d31b91
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/files/addMysqlUser.sh
@@ -0,0 +1,41 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+mysqldservice=$1
+mysqldbuser=$2
+mysqldbpasswd=$3
+mysqldbhost=$4
+myhostname=$(hostname -f)
+
+service $mysqldservice start
+echo "Adding user $mysqldbuser@$mysqldbhost and $mysqldbuser@localhost"
+mysql -u root -e "CREATE USER '$mysqldbuser'@'$mysqldbhost' IDENTIFIED BY '$mysqldbpasswd';"
+mysql -u root -e "CREATE USER '$mysqldbuser'@'localhost' IDENTIFIED BY '$mysqldbpasswd';"
+mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'$mysqldbhost';"
+mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'localhost';"
+if [ '$(mysql -u root -e "select user from mysql.user where user='$mysqldbuser' and host='$myhostname'" | grep "$mysqldbuser")' != '0' ]; then
+  echo "Adding user $mysqldbuser@$myhostname";
+  mysql -u root -e "CREATE USER '$mysqldbuser'@'$myhostname' IDENTIFIED BY '$mysqldbpasswd';";
+  mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'$myhostname';";
+fi
+mysql -u root -e "flush privileges;"
+service $mysqldservice stop

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/files/hcatSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/files/hcatSmoke.sh b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/files/hcatSmoke.sh
new file mode 100644
index 0000000..9e7b33f
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/files/hcatSmoke.sh
@@ -0,0 +1,35 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+export tablename=$1
+
+case "$2" in
+
+prepare)
+  hcat -e "show tables"
+  hcat -e "drop table IF EXISTS ${tablename}"
+  hcat -e "create table ${tablename} ( id INT, name string ) stored as rcfile ;"
+;;
+
+cleanup)
+  hcat -e "drop table IF EXISTS ${tablename}"
+;;
+
+esac

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/files/hiveSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/files/hiveSmoke.sh b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/files/hiveSmoke.sh
new file mode 100644
index 0000000..7e03524
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/files/hiveSmoke.sh
@@ -0,0 +1,23 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+export tablename=$1
+echo "CREATE EXTERNAL TABLE IF NOT EXISTS ${tablename} ( foo INT, bar STRING );" | hive
+echo "DESCRIBE ${tablename};" | hive

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/files/hiveserver2.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/files/hiveserver2.sql b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/files/hiveserver2.sql
new file mode 100644
index 0000000..99a3865
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/files/hiveserver2.sql
@@ -0,0 +1,23 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+CREATE EXTERNAL TABLE IF NOT EXISTS hiveserver2smoke20408 ( foo INT, bar STRING );
+DESCRIBE hiveserver2smoke20408;

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/files/hiveserver2Smoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/files/hiveserver2Smoke.sh b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/files/hiveserver2Smoke.sh
new file mode 100644
index 0000000..051a21e
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/files/hiveserver2Smoke.sh
@@ -0,0 +1,31 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+smokeout=`/usr/lib/hive/bin/beeline -u $1 -n fakeuser -p fakepwd -d org.apache.hive.jdbc.HiveDriver -e '!run $2' 2>&1| awk '{print}'|grep Error`
+
+if [ "x$smokeout" == "x" ]; then
+  echo "Smoke test of hiveserver2 passed"
+  exit 0
+else
+  echo "Smoke test of hiveserver2 wasnt passed"
+  echo $smokeout
+  exit 1
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/files/pigSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/files/pigSmoke.sh b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/files/pigSmoke.sh
new file mode 100644
index 0000000..2e90ac0
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/files/pigSmoke.sh
@@ -0,0 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+A = load 'passwd' using PigStorage(':');
+B = foreach A generate \$0 as id;
+store B into 'pigsmoke.out';

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/files/startHiveserver2.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/files/startHiveserver2.sh b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/files/startHiveserver2.sh
new file mode 100644
index 0000000..fa90c2f
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/files/startHiveserver2.sh
@@ -0,0 +1,22 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+HIVE_CONF_DIR=$4 /usr/lib/hive/bin/hiveserver2 -hiveconf hive.metastore.uris=' ' > $1 2> $2 &
+echo $!|cat>$3

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/files/startMetastore.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/files/startMetastore.sh b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/files/startMetastore.sh
new file mode 100644
index 0000000..9350776
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/files/startMetastore.sh
@@ -0,0 +1,22 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+HIVE_CONF_DIR=$4 hive --service metastore > $1 2> $2 &
+echo $!|cat>$3

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/__init__.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/__init__.py
new file mode 100644
index 0000000..5561e10
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hcat.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hcat.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hcat.py
new file mode 100644
index 0000000..2993d3a
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hcat.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import sys
+
+
+def hcat():
+  import params
+
+  Directory(params.hcat_conf_dir,
+            owner=params.hcat_user,
+            group=params.user_group,
+  )
+
+  Directory(params.hcat_pid_dir,
+            owner=params.webhcat_user,
+            recursive=True
+  )
+
+  hcat_TemplateConfig('hcat-env.sh')
+
+
+def hcat_TemplateConfig(name):
+  import params
+
+  TemplateConfig(format("{hcat_conf_dir}/{name}"),
+                 owner=params.hcat_user,
+                 group=params.user_group
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hcat_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hcat_client.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hcat_client.py
new file mode 100644
index 0000000..8b5921a
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hcat_client.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from hcat import hcat
+
+class HCatClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+
+    env.set_params(params)
+
+    hcat()
+
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+
+if __name__ == "__main__":
+  HCatClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hcat_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hcat_service_check.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hcat_service_check.py
new file mode 100644
index 0000000..5112e99
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hcat_service_check.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+def hcat_service_check():
+    import params
+
+    unique = get_unique_id_and_date()
+    output_file = format("/apps/hive/warehouse/hcatsmoke{unique}")
+    test_cmd = format("fs -test -e {output_file}")
+
+    if params.security_enabled:
+      kinit_cmd = format(
+        "{kinit_path_local} -kt {smoke_user_keytab} {smokeuser}; ")
+    else:
+      kinit_cmd = ""
+
+    File('/tmp/hcatSmoke.sh',
+         content=StaticFile("hcatSmoke.sh"),
+         mode=0755
+    )
+
+    prepare_cmd = format("{kinit_cmd}sh /tmp/hcatSmoke.sh hcatsmoke{unique} prepare")
+
+    Execute(prepare_cmd,
+            tries=3,
+            user=params.smokeuser,
+            try_sleep=5,
+            path=['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
+            logoutput=True)
+
+    ExecuteHadoop(test_cmd,
+                  user=params.hdfs_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir)
+
+    cleanup_cmd = format("{kinit_cmd}sh /tmp/hcatSmoke.sh hcatsmoke{unique} cleanup")
+
+    Execute(cleanup_cmd,
+            tries=3,
+            user=params.smokeuser,
+            try_sleep=5,
+            path=['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
+            logoutput=True
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hive.py b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hive.py
new file mode 100644
index 0000000..b37ebb2
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hive.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import sys
+
+
+def hive(name=None):
+  import params
+
+  if name == 'metastore' or name == 'hiveserver2':
+    hive_config_dir = params.hive_server_conf_dir
+    config_file_mode = 0600
+    jdbc_connector()
+  else:
+    hive_config_dir = params.hive_conf_dir
+    config_file_mode = 0644
+
+  Directory(hive_config_dir,
+            owner=params.hive_user,
+            group=params.user_group,
+            recursive=True
+  )
+
+  XmlConfig("hive-site.xml",
+            conf_dir=hive_config_dir,
+            configurations=params.config['configurations']['hive-site'],
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=config_file_mode
+  )
+
+  cmd = format("/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf --retry 5 "
+               "{jdk_location}{check_db_connection_jar_name} -o {check_db_connection_jar_name}'")
+
+  Execute(cmd,
+          not_if=format("[ -f {check_db_connection_jar_name}]"))
+
+  if name == 'metastore':
+    File(params.start_metastore_path,
+         mode=0755,
+         content=StaticFile('startMetastore.sh')
+    )
+
+  elif name == 'hiveserver2':
+    File(params.start_hiveserver2_path,
+         mode=0755,
+         content=StaticFile('startHiveserver2.sh')
+    )
+
+  if name != "client":
+    crt_directory(params.hive_pid_dir)
+    crt_directory(params.hive_log_dir)
+    crt_directory(params.hive_var_lib)
+
+  File(format("{hive_config_dir}/hive-env.sh"),
+       owner=params.hive_user,
+       group=params.user_group,
+       content=Template('hive-env.sh.j2', conf_dir=hive_config_dir)
+  )
+
+  crt_file(format("{hive_conf_dir}/hive-default.xml.template"))
+  crt_file(format("{hive_conf_dir}/hive-env.sh.template"))
+  crt_file(format("{hive_conf_dir}/hive-exec-log4j.properties.template"))
+  crt_file(format("{hive_conf_dir}/hive-log4j.properties.template"))
+
+
+def crt_directory(name):
+  import params
+
+  Directory(name,
+            recursive=True,
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=0755)
+
+
+def crt_file(name):
+  import params
+
+  File(name,
+       owner=params.hive_user,
+       group=params.user_group
+  )
+
+
+def jdbc_connector():
+  import params
+
+  if params.hive_jdbc_driver == "com.mysql.jdbc.Driver":
+    cmd = format("hive mkdir -p {artifact_dir} ; cp /usr/share/java/{jdbc_jar_name} {target}")
+
+    Execute(cmd,
+            not_if=format("test -f {target}"),
+            creates=params.target,
+            path=["/bin", "usr/bin/"])
+
+  elif params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
+    cmd = format(
+      "mkdir -p {artifact_dir} ; curl -kf --retry 10 {driver_curl_source} -o {driver_curl_target} &&  "
+      "cp {driver_curl_target} {target}")
+
+    Execute(cmd,
+            not_if=format("test -f {target}"),
+            path=["/bin", "usr/bin/"])


[5/7] AMBARI-4270. Add decommission support for TaskTracker and modify support for DataNode to match

Posted by sm...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/java/org/apache/ambari/server/api/util/StackExtensionHelperTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/api/util/StackExtensionHelperTest.java b/ambari-server/src/test/java/org/apache/ambari/server/api/util/StackExtensionHelperTest.java
index a31ed6e..55ef182 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/api/util/StackExtensionHelperTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/api/util/StackExtensionHelperTest.java
@@ -34,37 +34,37 @@ public class StackExtensionHelperTest {
     StackExtensionHelper helper = new StackExtensionHelper(stackRoot);
     helper.populateServicesForStack(stackInfo);
     List<ServiceInfo> services =  stackInfo.getServices();
-    assertEquals(services.size(), 2);
+    assertEquals(5, services.size());
     for (ServiceInfo serviceInfo : services) {
-      if (serviceInfo.getName().equals("YARN")) {
+      if (serviceInfo.getName().equals("HIVE")) {
         // Check old-style service
-        assertEquals("YARN", serviceInfo.getName());
+        assertEquals("HIVE", serviceInfo.getName());
         assertEquals("1.0", serviceInfo.getSchemaVersion());
-        assertEquals("mapred", serviceInfo.getUser());
-        assertTrue(serviceInfo.getComment().startsWith("Apache Hadoop NextGen"));
-        assertEquals("2.1.0.2.0.6.0", serviceInfo.getVersion());
+        assertEquals("root", serviceInfo.getUser());
+        assertTrue(serviceInfo.getComment().startsWith("Data warehouse system"));
+        assertEquals("0.11.0.2.0.5.0", serviceInfo.getVersion());
         // Check some component definitions
         List<ComponentInfo> components = serviceInfo.getComponents();
-        assertEquals("RESOURCEMANAGER", components.get(0).getName());
+        assertEquals("HIVE_METASTORE", components.get(0).getName());
         assertEquals("MASTER", components.get(0).getCategory());
         List<PropertyInfo> properties = serviceInfo.getProperties();
         // Check some property
-        assertEquals(4, properties.size());
+        assertEquals(35, properties.size());
         boolean found = false;
         for (PropertyInfo property : properties) {
-          if (property.getName().equals("yarn.resourcemanager.resource-tracker.address")) {
-            assertEquals("localhost:8025", property.getValue());
-            assertEquals("yarn-site.xml",
+          if (property.getName().equals("javax.jdo.option.ConnectionDriverName")) {
+            assertEquals("com.mysql.jdbc.Driver", property.getValue());
+            assertEquals("hive-site.xml",
                     property.getFilename());
-            assertEquals(true, property.isDeleted());
+            assertEquals(false, property.isDeleted());
             found = true;
           }
         }
         assertTrue("Property not found in a list of properties", found);
         // Check config dependencies
         List<String> configDependencies = serviceInfo.getConfigDependencies();
-        assertEquals(1, configDependencies.size());
-        assertEquals("core-site", configDependencies.get(0));
+        assertEquals(2, configDependencies.size());
+        assertEquals("hive-site", configDependencies.get(1));
       } else if (serviceInfo.getName().equals("HBASE")) {
         assertEquals("HBASE", serviceInfo.getName());
         assertEquals("HBASE", serviceInfo.getServiceMetadataFolder());
@@ -151,7 +151,11 @@ public class StackExtensionHelperTest {
         assertEquals("hbase-policy", configDependencies.get(1));
         assertEquals("hbase-site", configDependencies.get(2));
       } else {
-        fail("Unknown service");
+        if (!serviceInfo.getName().equals("YARN") &&
+            !serviceInfo.getName().equals("HDFS") &&
+            !serviceInfo.getName().equals("MAPREDUCE2")) {
+          fail("Unknown service");
+        }
       }
     }
   }
@@ -161,7 +165,7 @@ public class StackExtensionHelperTest {
     File stackRoot = new File(stackRootStr);
     StackExtensionHelper helper = new StackExtensionHelper(stackRoot);
     File legacyMetaInfoFile = new File("./src/test/resources/stacks/HDP/2.0.7/" +
-            "services/YARN/metainfo.xml".replaceAll("/", File.separator));
+            "services/HIVE/metainfo.xml".replaceAll("/", File.separator));
     String version = helper.getSchemaVersion(legacyMetaInfoFile);
     assertEquals("1.0", version);
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
index d6e59ee..7bfe71b 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
@@ -62,6 +62,7 @@ import org.apache.ambari.server.state.ConfigFactory;
 import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.state.ConfigImpl;
 import org.apache.ambari.server.state.Host;
+import org.apache.ambari.server.state.HostComponentAdminState;
 import org.apache.ambari.server.state.HostState;
 import org.apache.ambari.server.state.RepositoryInfo;
 import org.apache.ambari.server.state.Service;
@@ -146,7 +147,7 @@ public class AmbariManagementControllerTest {
   private static final String INCORRECT_BASE_URL = "http://incorrect.url";
 
   private static final String COMPONENT_NAME = "NAMENODE";
-  
+
   private static final String REQUEST_CONTEXT_PROPERTY = "context";
 
   private static final String CLUSTER_HOST_INFO = "clusterHostInfo";
@@ -2058,8 +2059,7 @@ public class AmbariManagementControllerTest {
     s1.addServiceComponent(sc1);
     sc1.setDesiredState(State.UNINSTALLED);
     sc1.persist();
-    ServiceComponentHost sch1 = serviceComponentHostFactory.createNew(sc1, "h1",
-        false);
+    ServiceComponentHost sch1 = serviceComponentHostFactory.createNew(sc1, "h1");
     sc1.addServiceComponentHost(sch1);
     sch1.setDesiredState(State.INSTALLED);
     sch1.setState(State.INSTALLING);
@@ -2067,11 +2067,11 @@ public class AmbariManagementControllerTest {
     sch1.setStackVersion(new StackId("HDP-0.1"));
 
     sch1.persist();
-    
+
     sch1.updateActualConfigs(new HashMap<String, Map<String,String>>() {{
       put("global", new HashMap<String,String>() {{ put("tag", "version1"); }});
     }});
-    
+
 
     ServiceComponentHostRequest r =
         new ServiceComponentHostRequest(c1.getClusterName(),
@@ -2199,6 +2199,88 @@ public class AmbariManagementControllerTest {
     Assert.assertEquals(1, resps.size());
   }
 
+  @Test
+  public void testServiceComponentHostsWithDecommissioned() throws AmbariException {
+
+    final String host1 = "h1";
+    final String host2 = "h2";
+    String clusterName = "foo1";
+    setupClusterWithHosts(clusterName, "HDP-2.0.7",
+        new ArrayList<String>() {{
+          add(host1);
+          add(host2);
+        }},
+        "centos5");
+    String serviceName = "HDFS";
+    createService(clusterName, serviceName, null);
+    String componentName1 = "NAMENODE";
+    String componentName2 = "DATANODE";
+    String componentName3 = "HDFS_CLIENT";
+
+    createServiceComponent(clusterName, serviceName, componentName1,
+        State.INIT);
+    createServiceComponent(clusterName, serviceName, componentName2,
+        State.INIT);
+    createServiceComponent(clusterName, serviceName, componentName3,
+        State.INIT);
+
+    createServiceComponentHost(clusterName, serviceName, componentName1,
+        host1, null);
+    createServiceComponentHost(clusterName, serviceName, componentName2,
+        host1, null);
+    createServiceComponentHost(clusterName, serviceName, componentName3,
+        host1, null);
+    createServiceComponentHost(clusterName, serviceName, componentName2,
+        host2, null);
+    createServiceComponentHost(clusterName, serviceName, componentName3,
+        host2, null);
+
+    // Install
+    installService(clusterName, serviceName, false, false);
+
+    // Start
+    startService(clusterName, serviceName, false, false);
+
+    Service s1 = clusters.getCluster(clusterName).getService(serviceName);
+    s1.getServiceComponent(componentName2).getServiceComponentHost(host1).
+        setComponentAdminState(HostComponentAdminState.DECOMMISSIONED);
+    s1.getServiceComponent(componentName2).getServiceComponentHost(host2).
+        setComponentAdminState(HostComponentAdminState.INSERVICE);
+
+    ServiceComponentHostRequest r =
+        new ServiceComponentHostRequest(clusterName, null, null, null, null);
+    Set<ServiceComponentHostResponse> resps = controller.getHostComponents(Collections.singleton(r));
+    Assert.assertEquals(5, resps.size());
+
+    //Get all host components with decommissiond = true
+    r = new ServiceComponentHostRequest(clusterName, null, null, null, null);
+    r.setAdminState("DECOMMISSIONED");
+    resps = controller.getHostComponents(Collections.singleton(r));
+    Assert.assertEquals(1, resps.size());
+
+    //Get all host components with decommissioned = false
+    r = new ServiceComponentHostRequest(clusterName, null, null, null, null);
+    r.setAdminState("INSERVICE");
+    resps = controller.getHostComponents(Collections.singleton(r));
+    Assert.assertEquals(1, resps.size());
+
+    //Get all host components with decommissioned = some random string
+    r = new ServiceComponentHostRequest(clusterName, null, null, null, null);
+    r.setAdminState("INSTALLED");
+    resps = controller.getHostComponents(Collections.singleton(r));
+    Assert.assertEquals(0, resps.size());
+
+    //Update adminState
+    r = new ServiceComponentHostRequest(clusterName, "HDFS", "DATANODE", host2, null);
+    r.setAdminState("DECOMMISSIONED");
+    try {
+      controller.updateHostComponents(Collections.singleton(r), new HashMap<String, String>(), false);
+      Assert.fail("Must throw exception when decommission attribute is updated.");
+    } catch (IllegalArgumentException ex) {
+      Assert.assertTrue(ex.getMessage().contains("Property adminState cannot be modified through update"));
+    }
+  }
+
   private Cluster setupClusterWithHosts(String clusterName, String stackId, List<String> hosts,
                                         String osType) throws AmbariException {
     ClusterRequest r = new ClusterRequest(null, clusterName, stackId, null);
@@ -2254,18 +2336,12 @@ public class AmbariManagementControllerTest {
     sc2.persist();
     sc3.persist();
 
-    ServiceComponentHost sch1 = serviceComponentHostFactory.createNew(sc1, "h1",
-        false);
-    ServiceComponentHost sch2 = serviceComponentHostFactory.createNew(sc1, "h2",
-        false);
-    ServiceComponentHost sch3 = serviceComponentHostFactory.createNew(sc1, "h3",
-        false);
-    ServiceComponentHost sch4 = serviceComponentHostFactory.createNew(sc2, "h1",
-        false);
-    ServiceComponentHost sch5 = serviceComponentHostFactory.createNew(sc2, "h2",
-        false);
-    ServiceComponentHost sch6 = serviceComponentHostFactory.createNew(sc3, "h3",
-        false);
+    ServiceComponentHost sch1 = serviceComponentHostFactory.createNew(sc1, "h1");
+    ServiceComponentHost sch2 = serviceComponentHostFactory.createNew(sc1, "h2");
+    ServiceComponentHost sch3 = serviceComponentHostFactory.createNew(sc1, "h3");
+    ServiceComponentHost sch4 = serviceComponentHostFactory.createNew(sc2, "h1");
+    ServiceComponentHost sch5 = serviceComponentHostFactory.createNew(sc2, "h2");
+    ServiceComponentHost sch6 = serviceComponentHostFactory.createNew(sc3, "h3");
 
     sc1.addServiceComponentHost(sch1);
     sc1.addServiceComponentHost(sch2);
@@ -2519,7 +2595,7 @@ public class AmbariManagementControllerTest {
     ServiceRequest req1, req2;
     try {
       reqs.clear();
-      req1 = new ServiceRequest(clusterName1, serviceName1, 
+      req1 = new ServiceRequest(clusterName1, serviceName1,
           State.INSTALLED.toString());
       req2 = new ServiceRequest(clusterName2, serviceName2,
           State.INSTALLED.toString());
@@ -2669,7 +2745,7 @@ public class AmbariManagementControllerTest {
     sch4.setState(State.INSTALLED);
     sch5.setState(State.INSTALLED);
     sch6.setState(State.INSTALLED);
-    
+
     Set<ServiceRequest> reqs = new HashSet<ServiceRequest>();
     ServiceRequest req1, req2;
     try {
@@ -2727,7 +2803,7 @@ public class AmbariManagementControllerTest {
     sch5.setState(State.INSTALLED);
 
     reqs.clear();
-    req1 = new ServiceRequest(clusterName, serviceName1, 
+    req1 = new ServiceRequest(clusterName, serviceName1,
         State.STARTED.toString());
     req2 = new ServiceRequest(clusterName, serviceName2,
         State.STARTED.toString());
@@ -2788,7 +2864,7 @@ public class AmbariManagementControllerTest {
     Assert.assertNull(stage1.getExecutionCommandWrapper(host2, "DATANODE"));
     Assert.assertNotNull(stage3.getExecutionCommandWrapper(host1, "HBASE_SERVICE_CHECK"));
     Assert.assertNotNull(stage2.getExecutionCommandWrapper(host2, "HDFS_SERVICE_CHECK"));
-    
+
     for (Stage s : stages) {
       for (List<ExecutionCommandWrapper> list : s.getExecutionCommands().values()) {
         for (ExecutionCommandWrapper ecw : list) {
@@ -3652,15 +3728,15 @@ public class AmbariManagementControllerTest {
   @SuppressWarnings("serial")
   @Test
   public void testCreateActionsFailures() throws Exception {
-    setupClusterWithHosts("c1", "HDP-0.1",
+    setupClusterWithHosts("c1", "HDP-2.0.7",
         new ArrayList<String>() {{
           add("h1");
         }},
         "centos5");
 
     Cluster cluster = clusters.getCluster("c1");
-    cluster.setDesiredStackVersion(new StackId("HDP-0.1"));
-    cluster.setCurrentStackVersion(new StackId("HDP-0.1"));
+    cluster.setDesiredStackVersion(new StackId("HDP-2.0.7"));
+    cluster.setCurrentStackVersion(new StackId("HDP-2.0.7"));
 
     ConfigFactory cf = injector.getInstance(ConfigFactory.class);
     Config config1 = cf.createNew(cluster, "global",
@@ -3683,14 +3759,14 @@ public class AmbariManagementControllerTest {
     Service hdfs = cluster.addService("HDFS");
     hdfs.persist();
 
-    Service mapred = cluster.addService("MAPREDUCE");
-    mapred.persist();
+    Service hive = cluster.addService("HIVE");
+    hive.persist();
 
     hdfs.addServiceComponent(Role.HDFS_CLIENT.name()).persist();
     hdfs.addServiceComponent(Role.NAMENODE.name()).persist();
     hdfs.addServiceComponent(Role.DATANODE.name()).persist();
 
-    mapred.addServiceComponent(Role.MAPREDUCE_CLIENT.name()).persist();
+    hive.addServiceComponent(Role.HIVE_SERVER.name()).persist();
 
     hdfs.getServiceComponent(Role.HDFS_CLIENT.name()).addServiceComponentHost("h1").persist();
     hdfs.getServiceComponent(Role.NAMENODE.name()).addServiceComponentHost("h1").persist();
@@ -3710,23 +3786,49 @@ public class AmbariManagementControllerTest {
     expectActionCreationErrorWithMessage(actionRequest, requestProperties,
         "Unsupported action");
 
-    actionRequest = new ExecuteActionRequest("c1", "DECOMMISSION_DATANODE", "MAPREDUCE", params);
+    actionRequest = new ExecuteActionRequest("c1", "DECOMMISSION_DATANODE", "HDFS", params);
     expectActionCreationErrorWithMessage(actionRequest, requestProperties,
-        "Unsupported action DECOMMISSION_DATANODE for MAPREDUCE");
+        "Unsupported action DECOMMISSION_DATANODE for Service: HDFS and Component: null");
 
-    actionRequest = new ExecuteActionRequest("c1", "DECOMMISSION_DATANODE", "HDFS", params);
+    actionRequest = new ExecuteActionRequest("c1", "DECOMMISSION", "HDFS", params);
     expectActionCreationErrorWithMessage(actionRequest, requestProperties,
-        "No exclude file specified when decommissioning datanodes");
+        "Unsupported action DECOMMISSION for Service: HDFS and Component: null");
 
-    params.put("excludeFileTag", "tag1");
-    actionRequest = new ExecuteActionRequest("c1", "DECOMMISSION_DATANODE", "HDFS", params);
+    actionRequest = new ExecuteActionRequest("c1", "DECOMMISSION", null, "HDFS", "HDFS_CLIENT", null, params);
     expectActionCreationErrorWithMessage(actionRequest, requestProperties,
-        "Decommissioning datanodes requires the cluster");
+        "Unsupported action DECOMMISSION for Service: HDFS and Component: HDFS_CLIENT");
 
     actionRequest = new ExecuteActionRequest("c1", null, "DECOMMISSION_DATANODE", "HDFS", null, null, params);
     expectActionCreationErrorWithMessage(actionRequest, requestProperties,
         "Action DECOMMISSION_DATANODE does not exist");
 
+    actionRequest = new ExecuteActionRequest("c1", "DECOMMISSION", null, "YARN", "RESOURCEMANAGER", null, params);
+    expectActionCreationErrorWithMessage(actionRequest, requestProperties,
+        "Service not found, clusterName=c1, serviceName=YARN");
+
+    Map<String, String> params2 = new HashMap<String, String>() {{
+      put("included_hosts", "h1,h2");
+      put("excluded_hosts", "h1,h3");
+    }};
+    actionRequest = new ExecuteActionRequest("c1", "DECOMMISSION", null, "HDFS", "NAMENODE", null, params2);
+    expectActionCreationErrorWithMessage(actionRequest, requestProperties,
+        "Same host cannot be specified for inclusion as well as exclusion. Hosts: [h1]");
+
+    params2 = new HashMap<String, String>() {{
+      put("included_hosts", " h1,h2");
+      put("excluded_hosts", "h4, h3");
+      put("slave_type", "HDFS_CLIENT");
+    }};
+    actionRequest = new ExecuteActionRequest("c1", "DECOMMISSION", null, "HDFS", "NAMENODE", null, params2);
+    expectActionCreationErrorWithMessage(actionRequest, requestProperties,
+        "Component HDFS_CLIENT is not supported for decommissioning.");
+
+    List<String> hosts = new ArrayList<String>();
+    hosts.add("h6");
+    actionRequest = new ExecuteActionRequest("c1", "DECOMMISSION", null, "HDFS", "NAMENODE", hosts, params2);
+    expectActionCreationErrorWithMessage(actionRequest, requestProperties,
+        "Decommission command cannot be issued with target host(s) specified.");
+
     controller.getActionManager().createActionDefinition(
         "a1", ActionType.SYSTEM, "test,dirName", "Does file exist", "", "",
         TargetHostType.SPECIFIC, Short.valueOf("100"));
@@ -3736,11 +3838,11 @@ public class AmbariManagementControllerTest {
         TargetHostType.ANY, Short.valueOf("100"));
 
     controller.getActionManager().createActionDefinition(
-        "a3", ActionType.SYSTEM, "", "Does file exist", "YARN", "NODEMANAGER",
+        "a3", ActionType.SYSTEM, "", "Does file exist", "MAPREDUCE", "MAPREDUCE_CLIENT",
         TargetHostType.ANY, Short.valueOf("100"));
 
     controller.getActionManager().createActionDefinition(
-        "a4", ActionType.SYSTEM, "", "Does file exist", "MAPREDUCE", "",
+        "a4", ActionType.SYSTEM, "", "Does file exist", "HIVE", "",
         TargetHostType.ANY, Short.valueOf("100"));
 
     actionRequest = new ExecuteActionRequest("c1", null, "a1", null, null, null, null);
@@ -3756,9 +3858,9 @@ public class AmbariManagementControllerTest {
     expectActionCreationErrorWithMessage(actionRequest, requestProperties,
         "Action a1 requires explicit target host(s)");
 
-    actionRequest = new ExecuteActionRequest("c1", null, "a2", "MAPREDUCE", null, null, params);
+    actionRequest = new ExecuteActionRequest("c1", null, "a2", "HIVE", null, null, params);
     expectActionCreationErrorWithMessage(actionRequest, requestProperties,
-        "Action a2 targets service MAPREDUCE that does not match with expected HDFS");
+        "Action a2 targets service HIVE that does not match with expected HDFS");
 
     actionRequest = new ExecuteActionRequest("c1", null, "a2", "HDFS", "HDFS_CLIENT", null, params);
     expectActionCreationErrorWithMessage(actionRequest, requestProperties,
@@ -3776,19 +3878,20 @@ public class AmbariManagementControllerTest {
     expectActionCreationErrorWithMessage(actionRequest, requestProperties,
         "Action a1 targets component HDFS_CLIENT2 without specifying the target service");
 
+    // targets a service that is not a member of the stack (e.g. MR not in HDP-2)
     actionRequest = new ExecuteActionRequest("c1", null, "a3", "", "", null, params);
     expectActionCreationErrorWithMessage(actionRequest, requestProperties,
-        "Action a3 targets service YARN that does not exist");
+        "Action a3 targets service MAPREDUCE that does not exist");
 
-    List<String> hosts = new ArrayList<String>();
+    hosts = new ArrayList<String>();
     hosts.add("h6");
     actionRequest = new ExecuteActionRequest("c1", null, "a2", "", "", hosts, params);
     expectActionCreationErrorWithMessage(actionRequest, requestProperties,
         "Request specifies host h6 but its not a valid host based on the target service=HDFS and component=DATANODE");
 
-    actionRequest = new ExecuteActionRequest("c1", null, "a4", "MAPREDUCE", "", null, params);
+    actionRequest = new ExecuteActionRequest("c1", null, "a4", "HIVE", "", null, params);
     expectActionCreationErrorWithMessage(actionRequest, requestProperties,
-        "Suitable hosts not found, component=, service=MAPREDUCE, cluster=c1, actionName=a4");
+        "Suitable hosts not found, component=, service=HIVE, cluster=c1, actionName=a4");
 
   }
 
@@ -3816,7 +3919,7 @@ public class AmbariManagementControllerTest {
     Cluster cluster = clusters.getCluster("c1");
     cluster.setDesiredStackVersion(new StackId("HDP-0.1"));
     cluster.setCurrentStackVersion(new StackId("HDP-0.1"));
-    
+
     ConfigFactory cf = injector.getInstance(ConfigFactory.class);
     Config config1 = cf.createNew(cluster, "global",
         new HashMap<String, String>(){{ put("key1", "value1"); }});
@@ -3825,12 +3928,12 @@ public class AmbariManagementControllerTest {
     Config config2 = cf.createNew(cluster, "core-site",
         new HashMap<String, String>(){{ put("key1", "value1"); }});
     config2.setVersionTag("version1");
-    
+
     cluster.addConfig(config1);
     cluster.addConfig(config2);
     cluster.addDesiredConfig("_test", config1);
     cluster.addDesiredConfig("_test", config2);
-    
+
     Service hdfs = cluster.addService("HDFS");
     Service mapReduce = cluster.addService("MAPREDUCE");
     hdfs.persist();
@@ -3851,7 +3954,7 @@ public class AmbariManagementControllerTest {
     requestProperties.put(REQUEST_CONTEXT_PROPERTY, "Called from a test");
 
     RequestStatusResponse response = controller.createAction(actionRequest, requestProperties);
-    
+
     assertEquals(1, response.getTasks().size());
     ShortTaskStatus task = response.getTasks().get(0);
 
@@ -5409,7 +5512,7 @@ public class AmbariManagementControllerTest {
       clusterName, serviceName, State.STARTED.name());
     Set<ServiceRequest> setReqs = new HashSet<ServiceRequest>();
     setReqs.add(sr);
-    RequestStatusResponse resp = ServiceResourceProviderTest.updateServices(controller, 
+    RequestStatusResponse resp = ServiceResourceProviderTest.updateServices(controller,
       setReqs, Collections.<String, String>emptyMap(), false, true);
 
     Assert.assertNotNull(resp);
@@ -5512,7 +5615,7 @@ public class AmbariManagementControllerTest {
     String clusterName = "foo1";
     createCluster(clusterName);
     clusters.getCluster(clusterName)
-      .setDesiredStackVersion(new StackId("HDP-0.1"));
+      .setDesiredStackVersion(new StackId("HDP-2.0.7"));
     String serviceName = "HDFS";
     createService(clusterName, serviceName, null);
     String componentName1 = "NAMENODE";
@@ -5558,19 +5661,12 @@ public class AmbariManagementControllerTest {
     Map<String, String> configs = new HashMap<String, String>();
     configs.put("a", "b");
 
-    ConfigurationRequest cr1,cr2;
+    ConfigurationRequest cr1;
     cr1 = new ConfigurationRequest(clusterName, "hdfs-site","version1",
       configs);
     ClusterRequest crReq = new ClusterRequest(null, clusterName, null, null);
     crReq.setDesiredConfig(cr1);
     controller.updateClusters(Collections.singleton(crReq), null);
-    Map<String, String> props = new HashMap<String, String>();
-    props.put("datanodes", host2);
-    cr2 = new ConfigurationRequest(clusterName, "hdfs-exclude-file", "tag1",
-      props);
-    crReq = new ClusterRequest(null, clusterName, null, null);
-    crReq.setDesiredConfig(cr2);
-    controller.updateClusters(Collections.singleton(crReq), null);
 
     // Start
     startService(clusterName, serviceName, false, false);
@@ -5578,12 +5674,16 @@ public class AmbariManagementControllerTest {
     Cluster cluster = clusters.getCluster(clusterName);
     Service s = cluster.getService(serviceName);
     Assert.assertEquals(State.STARTED, s.getDesiredState());
+    ServiceComponentHost scHost = s.getServiceComponent("DATANODE").getServiceComponentHost("h2");
+    Assert.assertEquals(HostComponentAdminState.INSERVICE, scHost.getComponentAdminState());
 
+    // Decommission one datanode
     Map<String, String> params = new HashMap<String, String>(){{
       put("test", "test");
-      put("excludeFileTag", "tag1");
+      put("excluded_hosts", "h2");
     }};
-    ExecuteActionRequest request = new ExecuteActionRequest(clusterName, Role.DECOMMISSION_DATANODE.name(), "HDFS", params);
+    ExecuteActionRequest request = new ExecuteActionRequest(clusterName, "DECOMMISSION", null, "HDFS", "NAMENODE",
+        null, params);
 
     Map<String, String> requestProperties = new HashMap<String, String>();
     requestProperties.put(REQUEST_CONTEXT_PROPERTY, "Called from a test");
@@ -5597,9 +5697,77 @@ public class AmbariManagementControllerTest {
     Assert.assertNotNull(storedTasks);
     Assert.assertNotNull(execCmd.getConfigurationTags().get("hdfs-site"));
     Assert.assertEquals(1, storedTasks.size());
-    Assert.assertEquals(host2, execCmd.getConfigurations().get
-        ("hdfs-exclude-file").get("datanodes"));
-    Assert.assertNotNull(execCmd.getConfigurationTags().get("hdfs-exclude-file"));
+    Assert.assertEquals(HostComponentAdminState.DECOMMISSIONED, scHost.getComponentAdminState());
+    HostRoleCommand command =  storedTasks.get(0);
+    Assert.assertEquals(Role.NAMENODE, command.getRole());
+    Assert.assertEquals(RoleCommand.CUSTOM_COMMAND, command.getRoleCommand());
+    Map<String, Set<String>> cInfo = execCmd.getClusterHostInfo();
+    Assert.assertTrue(cInfo.containsKey("decom_dn_hosts"));
+    Assert.assertTrue(cInfo.get("decom_dn_hosts").size() == 1);
+    Assert.assertEquals("h2",
+        cInfo.get("all_hosts").toArray()[Integer.parseInt(cInfo.get("decom_dn_hosts").iterator().next())]);
+    Assert.assertEquals("DECOMMISSION", execCmd.getHostLevelParams().get("custom_command"));
+
+    // Decommission the other datanode
+    params = new HashMap<String, String>(){{
+      put("test", "test");
+      put("excluded_hosts", "h1");
+    }};
+    request = new ExecuteActionRequest(clusterName, "DECOMMISSION", null, "HDFS", "NAMENODE", null, params);
+
+    response = controller.createAction(request,
+        requestProperties);
+
+    storedTasks = actionDB.getRequestTasks(response.getRequestId());
+    execCmd = storedTasks.get(0).getExecutionCommandWrapper
+        ().getExecutionCommand();
+    Assert.assertNotNull(storedTasks);
+    Assert.assertEquals(1, storedTasks.size());
+    Assert.assertEquals(HostComponentAdminState.DECOMMISSIONED, scHost.getComponentAdminState());
+    cInfo = execCmd.getClusterHostInfo();
+    Assert.assertTrue(cInfo.containsKey("decom_dn_hosts"));
+    Assert.assertEquals("0,1", cInfo.get("decom_dn_hosts").iterator().next());
+    Assert.assertEquals("DECOMMISSION", execCmd.getHostLevelParams().get("custom_command"));
+
+    // Recommission the other datanode  (while adding NN HA)
+    createServiceComponentHost(clusterName, serviceName, componentName1,
+        host2, null);
+    ServiceComponentHostRequest r = new ServiceComponentHostRequest(clusterName, serviceName,
+        componentName1, host2, State.INSTALLED.toString());
+    Set<ServiceComponentHostRequest> requests = new HashSet<ServiceComponentHostRequest>();
+    requests.add(r);
+    controller.updateHostComponents(requests, Collections.<String, String>emptyMap(), true);
+    s.getServiceComponent(componentName1).getServiceComponentHost(host2).setState(State.INSTALLED);
+    r = new ServiceComponentHostRequest(clusterName, serviceName,
+        componentName1, host2, State.STARTED.toString());
+    requests.clear();
+    requests.add(r);
+    controller.updateHostComponents(requests, Collections.<String, String>emptyMap(), true);
+    s.getServiceComponent(componentName1).getServiceComponentHost(host2).setState(State.STARTED);
+
+    params = new HashMap<String, String>(){{
+      put("test", "test");
+      put("included_hosts", "h1 , h2");
+    }};
+    request = new ExecuteActionRequest(clusterName, "DECOMMISSION", null, "HDFS", "NAMENODE", null, params);
+
+    response = controller.createAction(request,
+        requestProperties);
+
+    storedTasks = actionDB.getRequestTasks(response.getRequestId());
+    Assert.assertNotNull(storedTasks);
+    scHost = s.getServiceComponent("DATANODE").getServiceComponentHost("h2");
+    Assert.assertEquals(HostComponentAdminState.INSERVICE, scHost.getComponentAdminState());
+    execCmd = storedTasks.get(0).getExecutionCommandWrapper
+        ().getExecutionCommand();
+    Assert.assertNotNull(storedTasks);
+    Assert.assertEquals(2, storedTasks.size());
+    cInfo = execCmd.getClusterHostInfo();
+    Assert.assertFalse(cInfo.containsKey("decom_dn_hosts"));
+
+    // Slave components will have admin state as INSERVICE even if the state in DB is null
+    scHost.setComponentAdminState(null);
+    Assert.assertEquals(HostComponentAdminState.INSERVICE, scHost.getComponentAdminState());
   }
 
   @Test
@@ -5683,7 +5851,7 @@ public class AmbariManagementControllerTest {
     }
     Assert.assertEquals(true, serviceCheckFound);
   }
-  
+
   @Test
   @Ignore("Unsuported feature !")
   public void testConfigsAttachedToServiceNotCluster() throws AmbariException {
@@ -5696,7 +5864,7 @@ public class AmbariManagementControllerTest {
     String componentName1 = "NAMENODE";
     String componentName2 = "DATANODE";
     String componentName3 = "HDFS_CLIENT";
-    
+
     createServiceComponent(clusterName, serviceName, componentName1,
       State.INIT);
     createServiceComponent(clusterName, serviceName, componentName2,
@@ -5740,11 +5908,11 @@ public class AmbariManagementControllerTest {
       configs);
     cr2 = new ConfigurationRequest(clusterName, "hdfs-site","version1",
       configs);
-    
+
     // create, but don't assign
     controller.createConfiguration(cr1);
     controller.createConfiguration(cr2);
-    
+
     Map<String,String> configVersions = new HashMap<String,String>() {{
       put("core-site", "version1");
       put("hdfs-site", "version1");
@@ -5756,7 +5924,7 @@ public class AmbariManagementControllerTest {
     installService(clusterName, serviceName, false, false);
     // Start
     long requestId = startService(clusterName, serviceName, true, false);
-    
+
     Assert.assertEquals(0, clusters.getCluster(clusterName).getDesiredConfigs().size());
 
     List<Stage> stages = actionDB.getAllStages(requestId);
@@ -6015,7 +6183,7 @@ public class AmbariManagementControllerTest {
     String clusterName = "foo1";
     createCluster(clusterName);
     clusters.getCluster(clusterName)
-      .setDesiredStackVersion(new StackId("HDP-0.1"));
+        .setDesiredStackVersion(new StackId("HDP-2.0.7"));
     String serviceName = "HDFS";
     createService(clusterName, serviceName, null);
     String componentName1 = "NAMENODE";
@@ -6023,11 +6191,11 @@ public class AmbariManagementControllerTest {
     String componentName3 = "HDFS_CLIENT";
 
     createServiceComponent(clusterName, serviceName, componentName1,
-      State.INIT);
+        State.INIT);
     createServiceComponent(clusterName, serviceName, componentName2,
-      State.INIT);
+        State.INIT);
     createServiceComponent(clusterName, serviceName, componentName3,
-      State.INIT);
+        State.INIT);
 
     String host1 = "h1";
     clusters.addHost(host1);
@@ -6044,15 +6212,15 @@ public class AmbariManagementControllerTest {
     clusters.mapHostToCluster(host2, clusterName);
 
     createServiceComponentHost(clusterName, serviceName, componentName1,
-      host1, null);
+        host1, null);
     createServiceComponentHost(clusterName, serviceName, componentName2,
-      host1, null);
+        host1, null);
     createServiceComponentHost(clusterName, serviceName, componentName2,
-      host2, null);
+        host2, null);
     createServiceComponentHost(clusterName, serviceName, componentName3,
-      host1, null);
+        host1, null);
     createServiceComponentHost(clusterName, serviceName, componentName3,
-      host2, null);
+        host2, null);
 
     // Install
     installService(clusterName, serviceName, false, false);
@@ -6061,19 +6229,12 @@ public class AmbariManagementControllerTest {
     Map<String, String> configs = new HashMap<String, String>();
     configs.put("a", "b");
 
-    ConfigurationRequest cr1,cr2;
-    cr1 = new ConfigurationRequest(clusterName, "hdfs-site","version1",
-      configs);
+    ConfigurationRequest cr1, cr2;
+    cr1 = new ConfigurationRequest(clusterName, "hdfs-site", "version1",
+        configs);
     ClusterRequest crReq = new ClusterRequest(null, clusterName, null, null);
     crReq.setDesiredConfig(cr1);
     controller.updateClusters(Collections.singleton(crReq), null);
-    Map<String, String> props = new HashMap<String, String>();
-    props.put("datanodes", host2);
-    cr2 = new ConfigurationRequest(clusterName, "hdfs-exclude-file", "tag1",
-      props);
-    crReq = new ClusterRequest(null, clusterName, null, null);
-    crReq.setDesiredConfig(cr2);
-    controller.updateClusters(Collections.singleton(crReq), null);
 
     // Start
     startService(clusterName, serviceName, false, false);
@@ -6086,8 +6247,14 @@ public class AmbariManagementControllerTest {
     config.setProperties(configs);
     config.setVersionTag("version122");
     Long groupId = createConfigGroup(clusters.getCluster(clusterName), "g1", "t1",
-      new ArrayList<String>() {{ add("h1"); add("h2"); }},
-      new ArrayList<Config>() {{ add(config); }});
+        new ArrayList<String>() {{
+          add("h1");
+          add("h2");
+        }},
+        new ArrayList<Config>() {{
+          add(config);
+        }}
+    );
 
     Assert.assertNotNull(groupId);
 
@@ -6095,28 +6262,34 @@ public class AmbariManagementControllerTest {
     Service s = cluster.getService(serviceName);
     Assert.assertEquals(State.STARTED, s.getDesiredState());
 
-    Map<String, String> params = new HashMap<String, String>(){{
+    Map<String, String> params = new HashMap<String, String>() {{
       put("test", "test");
-      put("excludeFileTag", "tag1");
+      put("excluded_hosts", " h1 ");
     }};
-    ExecuteActionRequest request = new ExecuteActionRequest(clusterName, Role.DECOMMISSION_DATANODE.name(), "HDFS", params);
+    ExecuteActionRequest request = new ExecuteActionRequest(clusterName, "DECOMMISSION", null,
+        "HDFS", "NAMENODE", null, params);
 
     Map<String, String> requestProperties = new HashMap<String, String>();
     requestProperties.put(REQUEST_CONTEXT_PROPERTY, "Called from a test");
 
     RequestStatusResponse response = controller.createAction(request,
-      requestProperties);
+        requestProperties);
 
     List<HostRoleCommand> storedTasks = actionDB.getRequestTasks(response.getRequestId());
     ExecutionCommand execCmd = storedTasks.get(0).getExecutionCommandWrapper
-      ().getExecutionCommand();
+        ().getExecutionCommand();
     Assert.assertNotNull(storedTasks);
     Assert.assertNotNull(execCmd.getConfigurationTags().get("hdfs-site"));
     Assert.assertEquals(1, storedTasks.size());
-    Assert.assertEquals(host2, execCmd.getConfigurations().get
-      ("hdfs-exclude-file").get("datanodes"));
-    Assert.assertNotNull(execCmd.getConfigurationTags().get("hdfs-exclude-file"));
-    Assert.assertEquals("c", execCmd.getConfigurations().get("hdfs-site").get("a"));
+    HostRoleCommand command =  storedTasks.get(0);
+    Assert.assertEquals(Role.NAMENODE, command.getRole());
+    Assert.assertEquals(RoleCommand.CUSTOM_COMMAND, command.getRoleCommand());
+    Map<String, Set<String>> cInfo = execCmd.getClusterHostInfo();
+    Assert.assertTrue(cInfo.containsKey("decom_dn_hosts"));
+    Assert.assertTrue(cInfo.get("decom_dn_hosts").size() == 1);
+    Assert.assertEquals("h1",
+        cInfo.get("all_hosts").toArray()[Integer.parseInt(cInfo.get("decom_dn_hosts").iterator().next())]);
+    Assert.assertEquals("DECOMMISSION", execCmd.getHostLevelParams().get("custom_command"));
   }
 
   @Test
@@ -6320,7 +6493,7 @@ public class AmbariManagementControllerTest {
       Assert.assertEquals(responseWithParams.getServiceName(), SERVICE_NAME);
       Assert.assertTrue(responseWithParams.getConfigTypes().size() > 0);
     }
-    
+
 
     StackServiceRequest invalidRequest = new StackServiceRequest(STACK_NAME, NEW_STACK_VERSION, NON_EXT_VALUE);
     try {
@@ -7233,9 +7406,9 @@ public class AmbariManagementControllerTest {
     Assert.assertNotNull(clusters.getCluster(clusterName).getService(serviceName)
       .getServiceComponent(componentName3)
       .getServiceComponentHost(host2));
-    
-    
-    
+
+
+
     // Install
     ServiceRequest r = new ServiceRequest(clusterName, serviceName, State.INSTALLED.toString());
     Set<ServiceRequest> requests = new HashSet<ServiceRequest>();
@@ -7245,17 +7418,17 @@ public class AmbariManagementControllerTest {
     Assert.assertEquals(State.INSTALLED,
       clusters.getCluster(clusterName).getService(serviceName)
         .getDesiredState());
-    
+
     // set host components on host1 to INSTALLED
     for (ServiceComponentHost sch : clusters.getCluster(clusterName).getServiceComponentHosts(host1)) {
       sch.setState(State.INSTALLED);
     }
-    
+
     // set the host components on host2 to UNKNOWN state to simulate a lost host
     for (ServiceComponentHost sch : clusters.getCluster(clusterName).getServiceComponentHosts(host2)) {
       sch.setState(State.UNKNOWN);
     }
-    
+
     // issue an installed state request without failure
     ServiceComponentHostRequest schr = new ServiceComponentHostRequest(clusterName, "HDFS", "DATANODE", host2, "INSTALLED");
     controller.updateHostComponents(Collections.singleton(schr), new HashMap<String,String>(), false);
@@ -7266,14 +7439,14 @@ public class AmbariManagementControllerTest {
     }
 
   }
-  
+
   @Test
   public void testServiceUpdateRecursiveBadHostComponent() throws Exception {
     String clusterName = "foo1";
     createCluster(clusterName);
     clusters.getCluster(clusterName)
         .setDesiredStackVersion(new StackId("HDP-0.2"));
-    
+
     String serviceName1 = "HDFS";
     createService(clusterName, serviceName1, null);
 
@@ -7323,7 +7496,7 @@ public class AmbariManagementControllerTest {
     sch1.setState(State.INSTALLED);
     sch2.setState(State.UNKNOWN);
     sch3.setState(State.INSTALLED);
-    
+
     // an UNKOWN failure will throw an exception
     ServiceRequest req = new ServiceRequest(clusterName, serviceName1,
         State.INSTALLED.toString());
@@ -7349,26 +7522,26 @@ public class AmbariManagementControllerTest {
       assertFalse(INCORRECT_BASE_URL.equals(repositoryInfo.getBaseUrl()));
     }
   }
-  
+
   @Test
   public void testUpdateRepoUrl() throws Exception {
     String INCORRECT_URL_2 = "http://bar.com/foo";
-    
+
     RepositoryInfo repo = ambariMetaInfo.getRepository(STACK_NAME, STACK_VERSION, OS_TYPE, REPO_ID);
     assertNotNull(repo);
     assertNotNull(repo.getBaseUrl());
-    
+
     String original = repo.getBaseUrl();
-    
+
     repo = ambariMetaInfo.getRepository(STACK_NAME, STACK_VERSION, OS_TYPE, REPO_ID);
     assertEquals(original, repo.getBaseUrl());
-    
+
     ambariMetaInfo.updateRepoBaseURL(STACK_NAME, STACK_VERSION, OS_TYPE, REPO_ID, INCORRECT_BASE_URL);
-    
+
     repo = ambariMetaInfo.getRepository(STACK_NAME, STACK_VERSION, OS_TYPE, REPO_ID);
     assertEquals(INCORRECT_BASE_URL, repo.getBaseUrl());
     assertEquals(original, repo.getDefaultBaseUrl());
-    
+
     ambariMetaInfo.updateRepoBaseURL(STACK_NAME, STACK_VERSION, OS_TYPE, REPO_ID, INCORRECT_URL_2);
     repo = ambariMetaInfo.getRepository(STACK_NAME, STACK_VERSION, OS_TYPE, REPO_ID);
     assertFalse(INCORRECT_BASE_URL.equals(repo.getBaseUrl()));
@@ -7379,35 +7552,35 @@ public class AmbariManagementControllerTest {
     AmbariMetaInfo ami = new AmbariMetaInfo(new File("src/test/resources/stacks"), new File("target/version"));
     injector.injectMembers(ami);
     ami.init();
-    
+
     repo = ami.getRepository(STACK_NAME, STACK_VERSION, OS_TYPE, REPO_ID);
     assertEquals(INCORRECT_URL_2, repo.getBaseUrl());
     assertNotNull(repo.getDefaultBaseUrl());
     assertEquals(original, repo.getDefaultBaseUrl());
-    
+
     ami.updateRepoBaseURL(STACK_NAME, STACK_VERSION, OS_TYPE, REPO_ID, original);
     repo = ami.getRepository(STACK_NAME, STACK_VERSION, OS_TYPE, REPO_ID);
     assertEquals(original, repo.getBaseUrl());
     assertEquals(original, repo.getDefaultBaseUrl());
   }
-  
+
   @Ignore
   public void testUpdateRepoUrlController() throws Exception {
     RepositoryInfo repo = ambariMetaInfo.getRepository(STACK_NAME, STACK_VERSION, OS_TYPE, REPO_ID);
-    
+
     RepositoryRequest request = new RepositoryRequest(STACK_NAME, STACK_VERSION, OS_TYPE, REPO_ID);
     request.setBaseUrl("http://hortonworks.com");
-    
+
     Set<RepositoryRequest> requests = new HashSet<RepositoryRequest>();
     requests.add(request);
-    
+
     // test bad url
     try {
       controller.updateRespositories(requests);
       fail ("Expected exception on invalid url");
     } catch (Exception e) {
     }
-    
+
     // test bad url, but allow to set anyway
     request.setVerifyBaseUrl(false);
     controller.updateRespositories(requests);
@@ -7424,7 +7597,7 @@ public class AmbariManagementControllerTest {
     String baseUrl = repo.getDefaultBaseUrl();
     if (!baseUrl.endsWith("/"))
       baseUrl += "/";
-    
+
     // variation #1: url with trailing slash, suffix preceding slash
     backingProperties.setProperty(Configuration.REPO_SUFFIX_KEY, "/repodata/repomd.xml");
     Assert.assertTrue(baseUrl.endsWith("/") && configuration.getRepoValidationSuffixes()[0].startsWith("/"));
@@ -7440,26 +7613,26 @@ public class AmbariManagementControllerTest {
     Assert.assertEquals(baseUrl, repo.getBaseUrl());
 
     baseUrl = baseUrl.substring(0, baseUrl.length()-1);
-    // variation #3: url with no trailing slash, suffix no prededing slash    
+    // variation #3: url with no trailing slash, suffix no prededing slash
     Assert.assertTrue(!baseUrl.endsWith("/") && !configuration.getRepoValidationSuffixes()[0].startsWith("/"));
     request.setBaseUrl(baseUrl);
     controller.updateRespositories(requests);
     Assert.assertEquals(baseUrl, repo.getBaseUrl());
-    
+
     // variation #4: url with no trailing slash, suffix preceding slash
     backingProperties.setProperty(Configuration.REPO_SUFFIX_KEY, "/repodata/repomd.xml");
     Assert.assertTrue(!baseUrl.endsWith("/") && configuration.getRepoValidationSuffixes()[0].startsWith("/"));
     request.setBaseUrl(baseUrl);
     controller.updateRespositories(requests);
     Assert.assertEquals(baseUrl, repo.getBaseUrl());
-    
+
     // variation #5: multiple suffix tests
     backingProperties.setProperty(Configuration.REPO_SUFFIX_KEY, "/foo/bar.xml,/repodata/repomd.xml");
     Assert.assertTrue(configuration.getRepoValidationSuffixes().length > 1);
     request.setBaseUrl(baseUrl);
     controller.updateRespositories(requests);
     Assert.assertEquals(baseUrl, repo.getBaseUrl());
-    
+
   }
 
   @Test
@@ -7550,12 +7723,12 @@ public class AmbariManagementControllerTest {
   @Test
   public void testDeleteHost() throws Exception {
     String clusterName = "foo1";
-    
+
     createCluster(clusterName);
-    
+
     Cluster cluster = clusters.getCluster(clusterName);
     cluster.setDesiredStackVersion(new StackId("HDP-0.1"));
-    
+
     String serviceName = "HDFS";
     createService(clusterName, serviceName, null);
     String componentName1 = "NAMENODE";
@@ -7570,12 +7743,12 @@ public class AmbariManagementControllerTest {
     clusters.addHost(host1);
     clusters.getHost("h1").setOsType("centos5");
     clusters.getHost("h1").persist();
-    
+
     String host2 = "h2";
     clusters.addHost(host2);
     clusters.getHost("h2").setOsType("centos6");
     clusters.getHost("h2").persist();
-    
+
     String host3 = "h3";
 
     clusters.mapHostToCluster(host1, clusterName);
@@ -7583,10 +7756,10 @@ public class AmbariManagementControllerTest {
     createServiceComponentHost(clusterName, null, componentName1, host1, null);
     createServiceComponentHost(clusterName, serviceName, componentName2, host1, null);
     createServiceComponentHost(clusterName, serviceName, componentName3, host1, null);
-    
+
     // Install
     installService(clusterName, serviceName, false, false);
-    
+
     // make them believe they are up
     Map<String, ServiceComponentHost> hostComponents = cluster.getService(serviceName).getServiceComponent(componentName1).getServiceComponentHosts();
     for (Map.Entry<String, ServiceComponentHost> entry : hostComponents.entrySet()) {
@@ -7600,7 +7773,7 @@ public class AmbariManagementControllerTest {
       cHost.handleEvent(new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis(), cluster.getDesiredStackVersion().getStackId()));
       cHost.handleEvent(new ServiceComponentHostOpSucceededEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis()));
     }
-    
+
     Set<HostRequest> requests = new HashSet<HostRequest>();
     // delete from cluster
     requests.clear();
@@ -7610,7 +7783,7 @@ public class AmbariManagementControllerTest {
       fail("Expect failure deleting hosts when components exist.");
     } catch (Exception e) {
     }
-    
+
     Set<ServiceComponentHostRequest> schRequests = new HashSet<ServiceComponentHostRequest>();
     // maintenance HC for non-clients
     schRequests.add(new ServiceComponentHostRequest(clusterName, serviceName, componentName1, host1, "MAINTENANCE"));
@@ -7623,9 +7796,9 @@ public class AmbariManagementControllerTest {
     schRequests.add(new ServiceComponentHostRequest(clusterName, serviceName, componentName2, host1, null));
     schRequests.add(new ServiceComponentHostRequest(clusterName, serviceName, componentName3, host1, null));
     controller.deleteHostComponents(schRequests);
-    
+
     Assert.assertEquals(0, cluster.getServiceComponentHosts(host1).size());
-    
+
     // delete, which should fail since it is part of a cluster
     requests.clear();
     requests.add(new HostRequest(host1, null, null));
@@ -7634,7 +7807,7 @@ public class AmbariManagementControllerTest {
       fail("Expect failure when removing from host when it is part of a cluster.");
     } catch (Exception e) {
     }
-    
+
     // delete host from cluster
     requests.clear();
     requests.add(new HostRequest(host1, clusterName, null));
@@ -7656,12 +7829,12 @@ public class AmbariManagementControllerTest {
     } catch (HostNotFoundException e) {
       // expected
     }
-    
+
     // remove host2
     requests.clear();
     requests.add(new HostRequest(host2, null, null));
     HostResourceProviderTest.deleteHosts(controller, requests);
-    
+
     // verify host does not exist
     try {
       clusters.getHost(host2);
@@ -7681,7 +7854,7 @@ public class AmbariManagementControllerTest {
     }
 
   }
-  
+
   @Test
   public void testGetRootServices() throws Exception {
 
@@ -7703,7 +7876,7 @@ public class AmbariManagementControllerTest {
       // do nothing
     }
   }
-  
+
   @Test
   public void testGetRootServiceComponents() throws Exception {
 
@@ -7714,7 +7887,7 @@ public class AmbariManagementControllerTest {
     RootServiceComponentRequest requestWithParams = new RootServiceComponentRequest(
         RootServiceResponseFactory.Services.AMBARI.toString(),
         RootServiceResponseFactory.Services.AMBARI.getComponents()[0].toString());
-    
+
     Set<RootServiceComponentResponse> responsesWithParams = controller.getRootServiceComponents(Collections.singleton(requestWithParams));
     Assert.assertEquals(1, responsesWithParams.size());
     for (RootServiceComponentResponse responseWithParams: responsesWithParams) {
@@ -7728,16 +7901,16 @@ public class AmbariManagementControllerTest {
       // do nothing
     }
   }
-  
+
   @Test
   public void testDeleteComponentsOnHost() throws Exception {
     String clusterName = "foo1";
-    
+
     createCluster(clusterName);
-    
+
     Cluster cluster = clusters.getCluster(clusterName);
     cluster.setDesiredStackVersion(new StackId("HDP-0.1"));
-    
+
     String serviceName = "HDFS";
     createService(clusterName, serviceName, null);
     String componentName1 = "NAMENODE";
@@ -7752,16 +7925,16 @@ public class AmbariManagementControllerTest {
     clusters.addHost(host1);
     clusters.getHost("h1").setOsType("centos5");
     clusters.getHost("h1").persist();
-    
+
     clusters.mapHostToCluster(host1, clusterName);
 
     createServiceComponentHost(clusterName, null, componentName1, host1, null);
     createServiceComponentHost(clusterName, serviceName, componentName2, host1, null);
     createServiceComponentHost(clusterName, serviceName, componentName3, host1, null);
-    
+
     // Install
     installService(clusterName, serviceName, false, false);
-    
+
     // make them believe they are up
     Map<String, ServiceComponentHost> hostComponents = cluster.getService(serviceName).getServiceComponent(componentName1).getServiceComponentHosts();
     for (Map.Entry<String, ServiceComponentHost> entry : hostComponents.entrySet()) {
@@ -7775,14 +7948,14 @@ public class AmbariManagementControllerTest {
       cHost.handleEvent(new ServiceComponentHostInstallEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis(), cluster.getDesiredStackVersion().getStackId()));
       cHost.handleEvent(new ServiceComponentHostOpSucceededEvent(cHost.getServiceComponentName(), cHost.getHostName(), System.currentTimeMillis()));
     }
-    
-    
+
+
     ServiceComponentHost sch = cluster.getService(serviceName).getServiceComponent(componentName2).getServiceComponentHost(host1);
     Assert.assertNotNull(sch);
-    
+
     sch.handleEvent(new ServiceComponentHostStartEvent(sch.getServiceComponentName(), sch.getHostName(), System.currentTimeMillis()));
     sch.handleEvent(new ServiceComponentHostStartedEvent (sch.getServiceComponentName(), sch.getHostName(), System.currentTimeMillis()));
-    
+
     Set<ServiceComponentHostRequest> schRequests = new HashSet<ServiceComponentHostRequest>();
     schRequests.add(new ServiceComponentHostRequest(clusterName, null, null, host1, null));
     try {
@@ -7794,7 +7967,7 @@ public class AmbariManagementControllerTest {
 
     sch.handleEvent(new ServiceComponentHostStopEvent(sch.getServiceComponentName(), sch.getHostName(), System.currentTimeMillis()));
     sch.handleEvent(new ServiceComponentHostStoppedEvent (sch.getServiceComponentName(), sch.getHostName(), System.currentTimeMillis()));
-    
+
     schRequests.clear();
     // maintenance HC, DN was already stopped
     schRequests.add(new ServiceComponentHostRequest(clusterName, serviceName, componentName1, host1, "MAINTENANCE"));
@@ -7804,7 +7977,7 @@ public class AmbariManagementControllerTest {
     schRequests.clear();
     schRequests.add(new ServiceComponentHostRequest(clusterName, null, null, host1, null));
     controller.deleteHostComponents(schRequests);
-    
+
     Assert.assertEquals(0, cluster.getServiceComponentHosts(host1).size());
   }
 
@@ -8157,21 +8330,21 @@ public class AmbariManagementControllerTest {
 
       amc.updateHostComponents(componentHostRequests, mapRequestProps, true);
 
-      org.junit.Assert.assertEquals(State.MAINTENANCE, componentHost.getState());
+      Assert.assertEquals(State.MAINTENANCE, componentHost.getState());
 
       componentHostRequests.clear();
       componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", "INSTALLED"));
 
       amc.updateHostComponents(componentHostRequests, mapRequestProps, true);
 
-      org.junit.Assert.assertEquals(State.INSTALLED, componentHost.getState());
+      Assert.assertEquals(State.INSTALLED, componentHost.getState());
 
       componentHostRequests.clear();
       componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host1", "MAINTENANCE"));
 
       amc.updateHostComponents(componentHostRequests, mapRequestProps, true);
 
-      org.junit.Assert.assertEquals(State.MAINTENANCE, componentHost.getState());
+      Assert.assertEquals(State.MAINTENANCE, componentHost.getState());
 
       componentHostRequests.clear();
       componentHostRequests.add(new ServiceComponentHostRequest("c1", null, "NAMENODE", "host2", null));
@@ -8184,7 +8357,7 @@ public class AmbariManagementControllerTest {
       amc.updateHostComponents(componentHostRequests, mapRequestProps, true);
 
       namenodes = cluster.getService("HDFS").getServiceComponent("NAMENODE").getServiceComponentHosts();
-      org.junit.Assert.assertEquals(2, namenodes.size());
+      Assert.assertEquals(2, namenodes.size());
 
       componentHost = namenodes.get("host2");
       componentHost.handleEvent(new ServiceComponentHostInstallEvent(componentHost.getServiceComponentName(), componentHost.getHostName(), System.currentTimeMillis(), "HDP-1.2.0"));

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostComponentResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostComponentResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostComponentResourceProviderTest.java
index fcaca01..bd31b7e 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostComponentResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostComponentResourceProviderTest.java
@@ -126,13 +126,13 @@ public class HostComponentResourceProviderTest {
     StackId stackId2 = new StackId("HDP-0.2");
     allResponse.add(new ServiceComponentHostResponse(
         "Cluster100", "Service100", "Component100", "Host100", State.INSTALLED.toString(), stackId.getStackId(), State.STARTED.toString(),
-        stackId2.getStackId()));
+        stackId2.getStackId(), null));
     allResponse.add(new ServiceComponentHostResponse(
         "Cluster100", "Service100", "Component101", "Host100", State.INSTALLED.toString(), stackId.getStackId(), State.STARTED.toString(),
-        stackId2.getStackId()));
+        stackId2.getStackId(), null));
     allResponse.add(new ServiceComponentHostResponse(
         "Cluster100", "Service100", "Component102", "Host100", State.INSTALLED.toString(), stackId.getStackId(), State.STARTED.toString(),
-        stackId2.getStackId()));
+        stackId2.getStackId(), null));
     Map<String, String> expectedNameValues = new HashMap<String, String>();
     expectedNameValues.put(
         HostComponentResourceProvider.HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID, "Cluster100");
@@ -242,7 +242,7 @@ public class HostComponentResourceProviderTest {
 
     Set<ServiceComponentHostResponse> nameResponse = new HashSet<ServiceComponentHostResponse>();
     nameResponse.add(new ServiceComponentHostResponse(
-        "Cluster102", "Service100", "Component100", "Host100", "STARTED", "", "", ""));
+        "Cluster102", "Service100", "Component100", "Host100", "STARTED", "", "", "", null));
     
     HostComponentResourceProvider provider = 
         new HostComponentResourceProvider(PropertyHelper.getPropertyIds(type),

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostResourceProviderTest.java
index d7e5ba7..9bf0c94 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/HostResourceProviderTest.java
@@ -242,9 +242,12 @@ public class HostResourceProviderTest {
     Set<Cluster> clusterSet = new HashSet<Cluster>();
     clusterSet.add(cluster);
 
-    ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse("Cluster100", "Service100", "Component100", "Host100", "STARTED", "", null, null);
-    ServiceComponentHostResponse shr2 = new ServiceComponentHostResponse("Cluster100", "Service100", "Component102", "Host100", "STARTED", "", null, null);
-    ServiceComponentHostResponse shr3 = new ServiceComponentHostResponse("Cluster100", "Service100", "Component103", "Host100", "STARTED", "", null, null);
+    ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse("Cluster100", "Service100", "Component100",
+        "Host100", "STARTED", "", null, null, null);
+    ServiceComponentHostResponse shr2 = new ServiceComponentHostResponse("Cluster100", "Service100", "Component102",
+        "Host100", "STARTED", "", null, null, null);
+    ServiceComponentHostResponse shr3 = new ServiceComponentHostResponse("Cluster100", "Service100", "Component103",
+        "Host100", "STARTED", "", null, null, null);
 
     Set<ServiceComponentHostResponse> responses = new HashSet<ServiceComponentHostResponse>();
     responses.add(shr1);
@@ -349,9 +352,12 @@ public class HostResourceProviderTest {
     Set<Cluster> clusterSet = new HashSet<Cluster>();
     clusterSet.add(cluster);
 
-    ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse("Cluster100", "Service100", "Component100", "Host100", "STARTED", "", null, null);
-    ServiceComponentHostResponse shr2 = new ServiceComponentHostResponse("Cluster100", "Service100", "Component102", "Host100", "STARTED", "", null, null);
-    ServiceComponentHostResponse shr3 = new ServiceComponentHostResponse("Cluster100", "Service100", "Component103", "Host100", "STARTED", "", null, null);
+    ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse("Cluster100", "Service100", "Component100",
+        "Host100", "STARTED", "", null, null, null);
+    ServiceComponentHostResponse shr2 = new ServiceComponentHostResponse("Cluster100", "Service100", "Component102",
+        "Host100", "STARTED", "", null, null, null);
+    ServiceComponentHostResponse shr3 = new ServiceComponentHostResponse("Cluster100", "Service100", "Component103",
+        "Host100", "STARTED", "", null, null, null);
 
     Set<ServiceComponentHostResponse> responses = new HashSet<ServiceComponentHostResponse>();
     responses.add(shr1);
@@ -454,9 +460,12 @@ public class HostResourceProviderTest {
     Set<Cluster> clusterSet = new HashSet<Cluster>();
     clusterSet.add(cluster);
 
-    ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse("Cluster100", "Service100", "Component100", "Host100", "STARTED", "", null, null);
-    ServiceComponentHostResponse shr2 = new ServiceComponentHostResponse("Cluster100", "Service100", "Component102", "Host100", "INSTALLED", "", null, null);
-    ServiceComponentHostResponse shr3 = new ServiceComponentHostResponse("Cluster100", "Service100", "Component103", "Host100", "STARTED", "", null, null);
+    ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse("Cluster100", "Service100", "Component100",
+        "Host100", "STARTED", "", null, null, null);
+    ServiceComponentHostResponse shr2 = new ServiceComponentHostResponse("Cluster100", "Service100", "Component102",
+        "Host100", "INSTALLED", "", null, null, null);
+    ServiceComponentHostResponse shr3 = new ServiceComponentHostResponse("Cluster100", "Service100", "Component103",
+        "Host100", "STARTED", "", null, null, null);
 
     Set<ServiceComponentHostResponse> responses = new HashSet<ServiceComponentHostResponse>();
     responses.add(shr1);
@@ -642,9 +651,12 @@ public class HostResourceProviderTest {
     Set<Cluster> clusterSet = new HashSet<Cluster>();
     clusterSet.add(cluster);
 
-    ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse("Cluster100", "Service100", "Component100", "Host100", "STARTED", "", null, null);
-    ServiceComponentHostResponse shr2 = new ServiceComponentHostResponse("Cluster100", "Service100", "Component102", "Host100", "INSTALLED", "", null, null);
-    ServiceComponentHostResponse shr3 = new ServiceComponentHostResponse("Cluster100", "Service100", "Component103", "Host100", "STARTED", "", null, null);
+    ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse("Cluster100", "Service100", "Component100",
+        "Host100", "STARTED", "", null, null, null);
+    ServiceComponentHostResponse shr2 = new ServiceComponentHostResponse("Cluster100", "Service100", "Component102",
+        "Host100", "INSTALLED", "", null, null, null);
+    ServiceComponentHostResponse shr3 = new ServiceComponentHostResponse("Cluster100", "Service100", "Component103",
+        "Host100", "STARTED", "", null, null, null);
 
     Set<ServiceComponentHostResponse> responses = new HashSet<ServiceComponentHostResponse>();
     responses.add(shr1);

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ServiceResourceProviderTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ServiceResourceProviderTest.java b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ServiceResourceProviderTest.java
index 25cf6bf..5082dce 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ServiceResourceProviderTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/controller/internal/ServiceResourceProviderTest.java
@@ -490,9 +490,10 @@ public class ServiceResourceProviderTest {
     StackId stackId = createNiceMock(StackId.class);
     ComponentInfo componentInfo = createNiceMock(ComponentInfo.class);
 
-    ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse("C1", "MAPREDUCE", "JOBTRACKER", "Host100", "STARTED", "", null, null);
-    ServiceComponentHostResponse shr2 = new ServiceComponentHostResponse("C1", "MAPREDUCE", "MAPREDUCE_CLIENT", "Host100", "STARTED", "", null, null);
-    ServiceComponentHostResponse shr3 = new ServiceComponentHostResponse("C1", "MAPREDUCE", "TASKTRACKER", "Host100", "STARTED", "", null, null);
+    ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse("C1", "MAPREDUCE", "JOBTRACKER", "Host100",
+        "STARTED", "", null, null, null);
+    ServiceComponentHostResponse shr2 = new ServiceComponentHostResponse("C1", "MAPREDUCE", "MAPREDUCE_CLIENT", "Host100", "STARTED", "", null, null, null);
+    ServiceComponentHostResponse shr3 = new ServiceComponentHostResponse("C1", "MAPREDUCE", "TASKTRACKER", "Host100", "STARTED", "", null, null, null);
 
     Set<ServiceComponentHostResponse> responses = new LinkedHashSet<ServiceComponentHostResponse>();
     responses.add(shr1);
@@ -538,9 +539,9 @@ public class ServiceResourceProviderTest {
     StackId stackId = createNiceMock(StackId.class);
     ComponentInfo componentInfo = createNiceMock(ComponentInfo.class);
 
-    ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse("C1", "MAPREDUCE", "JOBTRACKER", "Host100", "UNKNOWN", "", null, null);
-    ServiceComponentHostResponse shr2 = new ServiceComponentHostResponse("C1", "MAPREDUCE", "MAPREDUCE_CLIENT", "Host100", "STARTED", "", null, null);
-    ServiceComponentHostResponse shr3 = new ServiceComponentHostResponse("C1", "MAPREDUCE", "TASKTRACKER", "Host100", "STARTED", "", null, null);
+    ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse("C1", "MAPREDUCE", "JOBTRACKER", "Host100", "UNKNOWN", "", null, null, null);
+    ServiceComponentHostResponse shr2 = new ServiceComponentHostResponse("C1", "MAPREDUCE", "MAPREDUCE_CLIENT", "Host100", "STARTED", "", null, null, null);
+    ServiceComponentHostResponse shr3 = new ServiceComponentHostResponse("C1", "MAPREDUCE", "TASKTRACKER", "Host100", "STARTED", "", null, null, null);
 
     Set<ServiceComponentHostResponse> responses = new LinkedHashSet<ServiceComponentHostResponse>();
     responses.add(shr1);
@@ -584,9 +585,9 @@ public class ServiceResourceProviderTest {
     StackId stackId = createNiceMock(StackId.class);
     ComponentInfo componentInfo = createNiceMock(ComponentInfo.class);
 
-    ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse("C1", "MAPREDUCE", "JOBTRACKER", "Host100", "STARTING", "", null, null);
-    ServiceComponentHostResponse shr2 = new ServiceComponentHostResponse("C1", "MAPREDUCE", "MAPREDUCE_CLIENT", "Host100", "STARTED", "", null, null);
-    ServiceComponentHostResponse shr3 = new ServiceComponentHostResponse("C1", "MAPREDUCE", "TASKTRACKER", "Host100", "STARTED", "", null, null);
+    ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse("C1", "MAPREDUCE", "JOBTRACKER", "Host100", "STARTING", "", null, null, null);
+    ServiceComponentHostResponse shr2 = new ServiceComponentHostResponse("C1", "MAPREDUCE", "MAPREDUCE_CLIENT", "Host100", "STARTED", "", null, null, null);
+    ServiceComponentHostResponse shr3 = new ServiceComponentHostResponse("C1", "MAPREDUCE", "TASKTRACKER", "Host100", "STARTED", "", null, null, null);
 
     Set<ServiceComponentHostResponse> responses = new LinkedHashSet<ServiceComponentHostResponse>();
     responses.add(shr1);
@@ -630,9 +631,9 @@ public class ServiceResourceProviderTest {
     StackId stackId = createNiceMock(StackId.class);
     ComponentInfo componentInfo = createNiceMock(ComponentInfo.class);
 
-    ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse("C1", "MAPREDUCE", "JOBTRACKER", "Host100", "INSTALLED", "", null, null);
-    ServiceComponentHostResponse shr2 = new ServiceComponentHostResponse("C1", "MAPREDUCE", "MAPREDUCE_CLIENT", "Host100", "STARTED", "", null, null);
-    ServiceComponentHostResponse shr3 = new ServiceComponentHostResponse("C1", "MAPREDUCE", "TASKTRACKER", "Host100", "STARTED", "", null, null);
+    ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse("C1", "MAPREDUCE", "JOBTRACKER", "Host100", "INSTALLED", "", null, null, null);
+    ServiceComponentHostResponse shr2 = new ServiceComponentHostResponse("C1", "MAPREDUCE", "MAPREDUCE_CLIENT", "Host100", "STARTED", "", null, null, null);
+    ServiceComponentHostResponse shr3 = new ServiceComponentHostResponse("C1", "MAPREDUCE", "TASKTRACKER", "Host100", "STARTED", "", null, null, null);
 
     Set<ServiceComponentHostResponse> responses = new LinkedHashSet<ServiceComponentHostResponse>();
     responses.add(shr1);
@@ -676,9 +677,9 @@ public class ServiceResourceProviderTest {
     StackId stackId = createNiceMock(StackId.class);
     ComponentInfo componentInfo = createNiceMock(ComponentInfo.class);
 
-    ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse("C1", "MAPREDUCE", "JOBTRACKER", "Host100", "MAINTENANCE", "", null, null);
-    ServiceComponentHostResponse shr2 = new ServiceComponentHostResponse("C1", "MAPREDUCE", "MAPREDUCE_CLIENT", "Host100", "STARTED", "", null, null);
-    ServiceComponentHostResponse shr3 = new ServiceComponentHostResponse("C1", "MAPREDUCE", "TASKTRACKER", "Host100", "STARTED", "", null, null);
+    ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse("C1", "MAPREDUCE", "JOBTRACKER", "Host100", "MAINTENANCE", "", null, null, null);
+    ServiceComponentHostResponse shr2 = new ServiceComponentHostResponse("C1", "MAPREDUCE", "MAPREDUCE_CLIENT", "Host100", "STARTED", "", null, null, null);
+    ServiceComponentHostResponse shr3 = new ServiceComponentHostResponse("C1", "MAPREDUCE", "TASKTRACKER", "Host100", "STARTED", "", null, null, null);
 
     Set<ServiceComponentHostResponse> responses = new LinkedHashSet<ServiceComponentHostResponse>();
     responses.add(shr1);
@@ -724,9 +725,9 @@ public class ServiceResourceProviderTest {
     StackId stackId = createNiceMock(StackId.class);
     ComponentInfo componentInfo = createNiceMock(ComponentInfo.class);
 
-    ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse("C1", "HDFS", "NAMENODE", "Host100",  "STARTED", "", null, null);
-    ServiceComponentHostResponse shr2 = new ServiceComponentHostResponse("C1", "HDFS", "SECONDARY_NAMENODE", "Host100", "STARTED", "", null, null);
-    ServiceComponentHostResponse shr3 = new ServiceComponentHostResponse("C1", "HDFS", "JOURNALNODE", "Host100", "STARTED", "", null, null);
+    ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse("C1", "HDFS", "NAMENODE", "Host100",  "STARTED", "", null, null, null);
+    ServiceComponentHostResponse shr2 = new ServiceComponentHostResponse("C1", "HDFS", "SECONDARY_NAMENODE", "Host100", "STARTED", "", null, null, null);
+    ServiceComponentHostResponse shr3 = new ServiceComponentHostResponse("C1", "HDFS", "JOURNALNODE", "Host100", "STARTED", "", null, null, null);
 
     Set<ServiceComponentHostResponse> responses = new LinkedHashSet<ServiceComponentHostResponse>();
     responses.add(shr1);
@@ -772,9 +773,9 @@ public class ServiceResourceProviderTest {
     StackId stackId = createNiceMock(StackId.class);
     ComponentInfo componentInfo = createNiceMock(ComponentInfo.class);
 
-    ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse("C1", "HDFS", "NAMENODE", "Host100", "INSTALLED", "", null, null);
-    ServiceComponentHostResponse shr2 = new ServiceComponentHostResponse("C1", "HDFS", "NAMENODE", "Host101", "STARTED", "", null, null);
-    ServiceComponentHostResponse shr3 = new ServiceComponentHostResponse("C1", "HDFS", "JOURNALNODE", "Host100", "STARTED", "", null, null);
+    ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse("C1", "HDFS", "NAMENODE", "Host100", "INSTALLED", "", null, null, null);
+    ServiceComponentHostResponse shr2 = new ServiceComponentHostResponse("C1", "HDFS", "NAMENODE", "Host101", "STARTED", "", null, null, null);
+    ServiceComponentHostResponse shr3 = new ServiceComponentHostResponse("C1", "HDFS", "JOURNALNODE", "Host100", "STARTED", "", null, null, null);
 
     Set<ServiceComponentHostResponse> responses = new LinkedHashSet<ServiceComponentHostResponse>();
     responses.add(shr1);
@@ -820,9 +821,9 @@ public class ServiceResourceProviderTest {
     StackId stackId = createNiceMock(StackId.class);
     ComponentInfo componentInfo = createNiceMock(ComponentInfo.class);
 
-    ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse("C1", "HDFS", "HBASE_MASTER", "Host100",  "STARTED", "", null, null);
-    ServiceComponentHostResponse shr2 = new ServiceComponentHostResponse("C1", "HDFS", "HBASE_MASTER", "Host101", "INSTALLED", "", null, null);
-    ServiceComponentHostResponse shr3 = new ServiceComponentHostResponse("C1", "HDFS", "HBASE_REGIONSERVER", "Host100", "STARTED", "", null, null);
+    ServiceComponentHostResponse shr1 = new ServiceComponentHostResponse("C1", "HDFS", "HBASE_MASTER", "Host100",  "STARTED", "", null, null, null);
+    ServiceComponentHostResponse shr2 = new ServiceComponentHostResponse("C1", "HDFS", "HBASE_MASTER", "Host101", "INSTALLED", "", null, null, null);
+    ServiceComponentHostResponse shr3 = new ServiceComponentHostResponse("C1", "HDFS", "HBASE_REGIONSERVER", "Host100", "STARTED", "", null, null, null);
 
     Set<ServiceComponentHostResponse> responses = new LinkedHashSet<ServiceComponentHostResponse>();
     responses.add(shr1);

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java
index 32567f5..c96fb8c 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/ServiceComponentTest.java
@@ -188,7 +188,7 @@ public class ServiceComponentTest {
     Assert.assertTrue(sc.getServiceComponentHosts().isEmpty());
 
     try {
-      serviceComponentHostFactory.createNew(sc, "h1", false);
+      serviceComponentHostFactory.createNew(sc, "h1");
       fail("Expected error for invalid host");
     } catch (Exception e) {
       // Expected
@@ -199,11 +199,11 @@ public class ServiceComponentTest {
     addHostToCluster("h3", service.getCluster().getClusterName());
 
     ServiceComponentHost sch1 =
-        serviceComponentHostFactory.createNew(sc, "h1", false);
+        serviceComponentHostFactory.createNew(sc, "h1");
     ServiceComponentHost sch2 =
-        serviceComponentHostFactory.createNew(sc, "h2", false);
+        serviceComponentHostFactory.createNew(sc, "h2");
     ServiceComponentHost failSch =
-        serviceComponentHostFactory.createNew(sc, "h2", false);
+        serviceComponentHostFactory.createNew(sc, "h2");
 
     Map<String, ServiceComponentHost> compHosts =
         new HashMap<String, ServiceComponentHost>();
@@ -232,7 +232,7 @@ public class ServiceComponentTest {
     Assert.assertEquals("h2", schCheck.getHostName());
 
     ServiceComponentHost sch3 =
-        serviceComponentHostFactory.createNew(sc, "h3", false);
+        serviceComponentHostFactory.createNew(sc, "h3");
     sc.addServiceComponentHost(sch3);
     sch3.persist();
     Assert.assertNotNull(sc.getServiceComponentHost("h3"));

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
index 7e3a7db..1124b0f 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/cluster/ClusterTest.java
@@ -266,7 +266,7 @@ public class ClusterTest {
     s.addServiceComponent(sc);
     sc.persist();
     ServiceComponentHost sch =
-        serviceComponentHostFactory.createNew(sc, "h1", false);
+        serviceComponentHostFactory.createNew(sc, "h1");
     sc.addServiceComponentHost(sch);
     sch.persist();
 
@@ -285,7 +285,7 @@ public class ClusterTest {
         ServiceComponent sc1 = serviceComponentFactory.createNew(s1, "PIG");
         s1.addServiceComponent(sc1);
         sc1.persist();
-        ServiceComponentHost sch1 = serviceComponentHostFactory.createNew(sc1, "h1", false);
+        ServiceComponentHost sch1 = serviceComponentHostFactory.createNew(sc1, "h1");
         sc1.addServiceComponentHost(sch1);
         sch1.persist();
       }

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java b/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
index e19250e..da3e3e1 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostTest.java
@@ -109,13 +109,13 @@ public class ServiceComponentHostTest {
       String hostName, boolean isClient) throws AmbariException{
     Cluster c = clusters.getCluster("C1");
     
-    return createNewServiceComponentHost(c, svc, svcComponent, hostName, isClient);
+    return createNewServiceComponentHost(c, svc, svcComponent, hostName);
   }
   private ServiceComponentHost createNewServiceComponentHost(
       Cluster c,
       String svc,
       String svcComponent,
-      String hostName, boolean isClient) throws AmbariException{
+      String hostName) throws AmbariException{
 
     Service s = null;
 
@@ -139,7 +139,7 @@ public class ServiceComponentHostTest {
     }
 
     ServiceComponentHost impl = serviceComponentHostFactory.createNew(
-        sc, hostName, isClient);
+        sc, hostName);
     impl.persist();
     Assert.assertEquals(State.INIT,
         impl.getState());
@@ -632,9 +632,9 @@ public class ServiceComponentHostTest {
     
     Cluster cluster = clusters.getCluster(clusterName);
     
-    ServiceComponentHost sch1 = createNewServiceComponentHost(cluster, "HDFS", "NAMENODE", hostName, false);
-    ServiceComponentHost sch2 = createNewServiceComponentHost(cluster, "HDFS", "DATANODE", hostName, false);
-    ServiceComponentHost sch3 = createNewServiceComponentHost(cluster, "MAPREDUCE2", "HISTORYSERVER", hostName, false);
+    ServiceComponentHost sch1 = createNewServiceComponentHost(cluster, "HDFS", "NAMENODE", hostName);
+    ServiceComponentHost sch2 = createNewServiceComponentHost(cluster, "HDFS", "DATANODE", hostName);
+    ServiceComponentHost sch3 = createNewServiceComponentHost(cluster, "MAPREDUCE2", "HISTORYSERVER", hostName);
     
     sch1.setDesiredState(State.INSTALLED);
     sch1.setState(State.INSTALLING);

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/java/org/apache/ambari/server/utils/TestStageUtils.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/java/org/apache/ambari/server/utils/TestStageUtils.java b/ambari-server/src/test/java/org/apache/ambari/server/utils/TestStageUtils.java
index 5452681..4ac45e3 100644
--- a/ambari-server/src/test/java/org/apache/ambari/server/utils/TestStageUtils.java
+++ b/ambari-server/src/test/java/org/apache/ambari/server/utils/TestStageUtils.java
@@ -48,6 +48,7 @@ import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Host;
+import org.apache.ambari.server.state.HostComponentAdminState;
 import org.apache.ambari.server.state.ServiceComponentHostFactory;
 import org.apache.ambari.server.state.StackId;
 import org.apache.commons.logging.Log;
@@ -103,7 +104,7 @@ public class TestStageUtils {
         .getServiceComponent(componentName)
         .addServiceComponentHost(
             serviceComponentHostFactory.createNew(cl.getService(serviceName)
-                .getServiceComponent(componentName), hostList.get(hostIndex), false));
+                .getServiceComponent(componentName), hostList.get(hostIndex)));
       }
     }
   }
@@ -215,12 +216,15 @@ public class TestStageUtils {
     Map<String, List<Integer>> nonameTopology = new HashMap<String, List<Integer>>(); 
     nonameTopology.put("NONAME_SERVER", Collections.singletonList(7));
     addService(fsm.getCluster("c1"), hostList, nonameTopology , "NONAME", injector);
-    
-    
+
+    fsm.getCluster("c1").getService("MAPREDUCE").getServiceComponent("TASKTRACKER").getServiceComponentHost("h2")
+        .setComponentAdminState(HostComponentAdminState.DECOMMISSIONED);
+    fsm.getCluster("c1").getService("MAPREDUCE").getServiceComponent("TASKTRACKER").getServiceComponentHost("h3")
+        .setComponentAdminState(HostComponentAdminState.DECOMMISSIONED);
+
     //Get cluster host info
-    Map<String, Set<String>> info = StageUtils.getClusterHostInfo(fsm.getHostsForCluster("c1"),
-        fsm.getCluster("c1"), new HostsMap(injector.getInstance(Configuration.class)),
-        injector.getInstance(Configuration.class));
+    Map<String, Set<String>> info =
+        StageUtils.getClusterHostInfo(fsm.getHostsForCluster("c1"), fsm.getCluster("c1"));
 
     //All hosts present in cluster host info
     assertEquals(fsm.getHosts().size(), info.get(HOSTS_LIST).size());
@@ -257,8 +261,7 @@ public class TestStageUtils {
       assertEquals(new HashSet<Integer>(pingPorts).size(), actualPingPorts.size());
     
     List<Integer> pingPortsActual = getRangeMappedDecompressedSet(actualPingPorts);
-    
-    
+
     List<Integer> reindexedPorts = getReindexedList(pingPortsActual, new ArrayList<String>(info.get(HOSTS_LIST)), hostList);
     
     //Treat null values
@@ -271,7 +274,9 @@ public class TestStageUtils {
     
     // check for no-name in the list
     assertTrue(info.containsKey("noname_server_hosts"));
-
+    assertTrue(info.containsKey("decom_tt_hosts"));
+    Set<String> decommissionedHosts = info.get("decom_tt_hosts");
+    assertEquals(2, decommissionedHosts.toString().split(",").length);
   }
 
   private void checkServiceCompression(Map<String, Set<String>> info,

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/configuration/core-site.xml b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/configuration/core-site.xml
new file mode 100644
index 0000000..e244fc7
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/configuration/core-site.xml
@@ -0,0 +1,167 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+ <!--
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+ 
+        http://www.apache.org/licenses/LICENSE-2.0
+ 
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+ -->
+ 
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
+
+<!-- i/o properties -->
+
+  <property>
+    <name>io.file.buffer.size</name>
+    <value>131072</value>
+    <description>The size of buffer for use in sequence files.
+  The size of this buffer should probably be a multiple of hardware
+  page size (4096 on Intel x86), and it determines how much data is
+  buffered during read and write operations.</description>
+  </property>
+
+  <property>
+    <name>io.serializations</name>
+    <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
+  </property>
+
+  <property>
+    <name>io.compression.codecs</name>
+    <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec</value>
+    <description>A list of the compression codec classes that can be used
+                 for compression/decompression.</description>
+  </property>
+
+<!-- file system properties -->
+
+  <property>
+    <name>fs.defaultFS</name>
+    <!-- cluster variant -->
+    <value>hdfs://localhost:8020</value>
+    <description>The name of the default file system.  Either the
+  literal string "local" or a host:port for NDFS.</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>fs.trash.interval</name>
+    <value>360</value>
+    <description>Number of minutes between trash checkpoints.
+  If zero, the trash feature is disabled.
+  </description>
+  </property>
+
+  <!-- ipc properties: copied from kryptonite configuration -->
+  <property>
+    <name>ipc.client.idlethreshold</name>
+    <value>8000</value>
+    <description>Defines the threshold number of connections after which
+               connections will be inspected for idleness.
+  </description>
+  </property>
+
+  <property>
+    <name>ipc.client.connection.maxidletime</name>
+    <value>30000</value>
+    <description>The maximum time after which a client will bring down the
+               connection to the server.
+  </description>
+  </property>
+
+  <property>
+    <name>ipc.client.connect.max.retries</name>
+    <value>50</value>
+    <description>Defines the maximum number of retries for IPC connections.</description>
+  </property>
+
+  <!-- Web Interface Configuration -->
+  <property>
+    <name>mapreduce.jobtracker.webinterface.trusted</name>
+    <value>false</value>
+    <description> If set to true, the web interfaces of JT and NN may contain
+                actions, such as kill job, delete file, etc., that should
+                not be exposed to public. Enable this option if the interfaces
+                are only reachable by those who have the right authorization.
+  </description>
+  </property>
+
+ <property>
+   <name>hadoop.security.authentication</name>
+   <value>simple</value>
+   <description>
+   Set the authentication for the cluster. Valid values are: simple or
+   kerberos.
+   </description>
+ </property>
+<property>
+  <name>hadoop.security.authorization</name>
+  <value>false</value>
+  <description>
+     Enable authorization for different protocols.
+  </description>
+</property>
+
+  <property>
+    <name>hadoop.security.auth_to_local</name>
+    <value>
+        RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/
+        RULE:[2:$1@$0](jhs@.*)s/.*/mapred/
+        RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/
+        RULE:[2:$1@$0](hm@.*)s/.*/hbase/
+        RULE:[2:$1@$0](rs@.*)s/.*/hbase/
+        DEFAULT
+    </value>
+<description>The mapping from kerberos principal names to local OS mapreduce.job.user.names.
+  So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
+  "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
+The translations rules have 3 sections:
+      base     filter    substitution
+The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
+
+[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
+[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
+[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
+
+The filter is a regex in parens that must the generated string for the rule to apply.
+
+"(.*%admin)" will take any string that ends in "%admin"
+"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
+
+Finally, the substitution is a sed rule to translate a regex into a fixed string.
+
+"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
+"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
+"s/X/Y/g" replaces all of the "X" in the name with "Y"
+
+So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
+
+RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+DEFAULT
+
+To also translate the names with a second component, you'd make the rules:
+
+RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+RULE:[2:$1@$0](.@ACME.ORG)s/@.//
+DEFAULT
+
+If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
+
+RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
+DEFAULT
+    </description>
+  </property>
+
+</configuration>


[3/7] AMBARI-4270. Add decommission support for TaskTracker and modify support for DataNode to match

Posted by sm...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/metrics.json b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/metrics.json
new file mode 100644
index 0000000..f33a0c0
--- /dev/null
+++ b/ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/metrics.json
@@ -0,0 +1,7800 @@
+{
+  "NAMENODE": {
+    "Component": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "metrics/dfs/FSNamesystem/TotalLoad": {
+            "metric": "dfs.FSNamesystem.TotalLoad",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/BlockCapacity": {
+            "metric": "dfs.FSNamesystem.BlockCapacity",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/GetListingOps": {
+            "metric": "dfs.namenode.GetListingOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FilesAppended": {
+            "metric": "dfs.namenode.FilesAppended",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getProtocolVersion_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/fsync_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.FsyncAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginSuccess_avg_time": {
+            "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/load/load_one": {
+            "metric": "load_one",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/renewLease_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.RenewLeaseNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getFileInfo_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.GetFileInfoAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memNonHeapUsedM": {
+            "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/complete_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.CompleteAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setPermission_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.SetPermissionNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/CapacityTotalGB": {
+            "metric": "dfs.FSNamesystem.CapacityTotalGB",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setOwner_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.SetOwnerNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getBlockLocations_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/process/proc_run": {
+            "metric": "proc_run",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/CapacityUsedGB": {
+            "metric": "dfs.FSNamesystem.CapacityUsedGB",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/AddBlockOps": {
+            "metric": "dfs.namenode.AddBlockOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/swap_total": {
+            "metric": "swap_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FilesDeleted": {
+            "metric": "dfs.namenode.FilesDeleted",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/Syncs_avg_time": {
+            "metric": "dfs.namenode.SyncsAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsBlocked": {
+            "metric": "jvm.JvmMetrics.ThreadsBlocked",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcQueueTime_num_ops": {
+            "metric": "rpc.rpc.RpcQueueTimeNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/process/proc_total": {
+            "metric": "proc_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/blockReport_avg_time": {
+            "metric": "dfs.namenode.BlockReportAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/disk/part_max_used": {
+            "metric": "part_max_used",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getFileInfo_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.GetFileInfoNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getEditLogSize_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginSuccess_num_ops": {
+            "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blockReceived_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_idle": {
+            "metric": "cpu_idle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/versionRequest_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.VersionRequestAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_aidle": {
+            "metric": "cpu_aidle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_free": {
+            "metric": "mem_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/versionRequest_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.VersionRequestNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/addBlock_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.AddBlockNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FilesCreated": {
+            "metric": "dfs.namenode.FilesCreated",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rename_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.RenameAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/bytes_in": {
+            "metric": "bytes_in",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setSafeMode_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.SetSafeModeNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/pkts_out": {
+            "metric": "pkts_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/memNonHeapCommittedM": {
+            "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_cached": {
+            "metric": "mem_cached",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/disk/disk_total": {
+            "metric": "disk_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setPermission_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.SetPermissionAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FilesRenamed": {
+            "metric": "dfs.namenode.FilesRenamed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/register_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setReplication_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.setReplication_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/JournalTransactionsBatchedInSync": {
+            "metric": "dfs.namenode.JournalTransactionsBatchedInSync",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/ugi/loginFailure_num_ops": {
+            "metric": "ugi.UgiMetrics.LoginFailureNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/GetBlockLocations": {
+            "metric": "dfs.namenode.GetBlockLocations",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/fsync_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.FsyncNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_wio": {
+            "metric": "cpu_wio",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/create_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.CreateAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/PendingReplicationBlocks": {
+            "metric": "dfs.FSNamesystem.PendingReplicationBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_speed": {
+            "metric": "cpu_speed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/delete_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.DeleteAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FileInfoOps": {
+            "metric": "dfs.namenode.FileInfoOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/sendHeartbeat_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.SendHeartbeatNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/DeleteFileOps": {
+            "metric": "dfs.namenode.DeleteFileOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/RpcProcessingTime_avg_time": {
+            "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blockReport_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.BlockReportNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setSafeMode_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.SetSafeModeAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthenticationSuccesses": {
+            "metric": "rpc.rpc.RpcAuthenticationSuccesses",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/PendingDeletionBlocks": {
+            "metric": "dfs.FSNamesystem.PendingDeletionBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthenticationFailures": {
+            "metric": "rpc.rpc.RpcAuthenticationFailures",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/network/pkts_in": {
+            "metric": "pkts_in",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_total": {
+            "metric": "mem_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getEditLogSize_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memHeapCommittedM": {
+            "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FilesInGetListingOps": {
+            "metric": "dfs.namenode.FilesInGetListingOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsRunnable": {
+            "metric": "jvm.JvmMetrics.ThreadsRunnable",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/complete_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.CompleteNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsNew": {
+            "metric": "jvm.JvmMetrics.ThreadsNew",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rollFsImage_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.rollFsImage_num_ops",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthorizationFailures": {
+            "metric": "rpc.rpc.RpcAuthorizationFailures",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/Syncs_num_ops": {
+            "metric": "dfs.namenode.SyncsNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcQueueTime_avg_time": {
+            "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blockReceived_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setReplication_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.setReplication_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rollEditLog_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.RollEditLogAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/SentBytes": {
+            "metric": "rpc.rpc.SentBytes",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/FilesTotal": {
+            "metric": "dfs.FSNamesystem.FilesTotal",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/logWarn": {
+            "metric": "jvm.JvmMetrics.LogWarn",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/ExcessBlocks": {
+            "metric": "dfs.FSNamesystem.ExcessBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsTimedWaiting": {
+            "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/gcCount": {
+            "metric": "jvm.JvmMetrics.GcCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/ReceivedBytes": {
+            "metric": "rpc.rpc.ReceivedBytes",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_nice": {
+            "metric": "cpu_nice",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/blockReport_num_ops": {
+            "metric": "dfs.namenode.BlockReportNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/SafemodeTime": {
+            "metric": "dfs.namenode.SafemodeTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rollFsImage_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.rollFsImage_avg_time",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/mkdirs_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.MkdirsAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/NumOpenConnections": {
+            "metric": "rpc.rpc.NumOpenConnections",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memHeapUsedM": {
+            "metric": "jvm.JvmMetrics.MemHeapUsedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks": {
+            "metric": "dfs.FSNamesystem.ScheduledReplicationBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsWaiting": {
+            "metric": "jvm.JvmMetrics.ThreadsWaiting",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/disk/disk_free": {
+            "metric": "disk_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/BlocksTotal": {
+            "metric": "dfs.FSNamesystem.BlocksTotal",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_buffers": {
+            "metric": "mem_buffers",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/gcTimeMillis": {
+            "metric": "jvm.JvmMetrics.GcTimeMillis",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getBlockLocations_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/Transactions_num_ops": {
+            "metric": "dfs.namenode.TransactionsNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/create_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.CreateNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsTerminated": {
+            "metric": "jvm.JvmMetrics.ThreadsTerminated",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/network/bytes_out": {
+            "metric": "bytes_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_user": {
+            "metric": "cpu_user",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/swap_free": {
+            "metric": "swap_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/load/load_five": {
+            "metric": "load_five",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_system": {
+            "metric": "cpu_system",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/CapacityRemainingGB": {
+            "metric": "dfs.FSNamesystem.CapacityRemainingGB",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/Transactions_avg_time": {
+            "metric": "dfs.namenode.TransactionsAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/boottime": {
+            "metric": "boottime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/MissingBlocks": {
+            "metric": "dfs.FSNamesystem.MissingBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/callQueueLen": {
+            "metric": "rpc.rpc.CallQueueLength",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/delete_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.DeleteNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/CorruptBlocks": {
+            "metric": "dfs.FSNamesystem.CorruptBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rename_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.RenameNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blockReport_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.BlockReportAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/mkdirs_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.MkdirsNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/load/load_fifteen": {
+            "metric": "load_fifteen",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/logInfo": {
+            "metric": "jvm.JvmMetrics.LogInfo",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/fsImageLoadTime": {
+            "metric": "dfs.namenode.FsImageLoadTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getListing_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.GetListingNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blocksBeingWrittenReport_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.blocksBeingWrittenReport_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rollEditLog_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.RollEditLogNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/addBlock_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.AddBlockAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blocksBeingWrittenReport_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.blocksBeingWrittenReport_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setOwner_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.SetOwnerAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/RpcProcessingTime_num_ops": {
+            "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_shared": {
+            "metric": "mem_shared",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/UnderReplicatedBlocks": {
+            "metric": "dfs.FSNamesystem.UnderReplicatedBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/sendHeartbeat_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.SendHeartbeatAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/CreateFileOps": {
+            "metric": "dfs.namenode.CreateFileOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/logError": {
+            "metric": "jvm.JvmMetrics.LogError",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginFailure_avg_time": {
+            "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_num": {
+            "metric": "cpu_num",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getProtocolVersion_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/register_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthorizationSuccesses": {
+            "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getListing_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.GetListingAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/logFatal": {
+            "metric": "jvm.JvmMetrics.LogFatal",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/renewLease_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.RenewLeaseAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          }
+        }
+      },
+      {
+        "type": "jmx",
+        "metrics": {
+          "metrics/dfs/namenode/Used": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Used",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/TotalLoad": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.TotalLoad",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memMaxM":{
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemMaxM",
+            "pointInTime" : true,
+            "temporal" : false
+          },
+          "metrics/dfs/FSNamesystem/BlockCapacity": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.BlockCapacity",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/TotalFiles": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalFiles",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/HostName": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.HostName",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/GetListingOps": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.GetListingOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/UpgradeFinalized": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.UpgradeFinalized",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getProtocolVersion_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getProtocolVersion_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/fsync_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.fsync_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/ugi/loginSuccess_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=ugi.loginSuccess_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/Safemode": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Safemode",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/CorruptBlocks": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CorruptBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/LiveNodes": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.LiveNodes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/renewLease_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.renewLease_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getFileInfo_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getFileInfo_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CapacityRemaining": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityRemaining",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/PercentRemaining": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentRemaining",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memNonHeapUsedM": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemNonHeapUsedM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/complete_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.complete_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CapacityTotalGB": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityTotalGB",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getBlockLocations_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getBlockLocations_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/AddBlockOps": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.AddBlockOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CapacityUsedGB": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityUsedGB",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/Syncs_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.Syncs_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsBlocked": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsBlocked",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/RpcQueueTime_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcQueueTime_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/PercentUsed": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentUsed",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/DecomNodes": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DecomNodes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/blockReport_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.blockReport_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/NonDfsUsedSpace": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NonDfsUsedSpace",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/UpgradeFinalized": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.UpgradeFinalized",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getFileInfo_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getFileInfo_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getEditLogSize_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getEditLogSize_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/ugi/loginSuccess_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=ugi.loginSuccess_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/blockReceived_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReceived_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/Safemode": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Safemode",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/FilesCreated": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.FilesCreated",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/addBlock_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.addBlock_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/DecomNodes": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DecomNodes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CapacityUsed": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityUsed",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/NonHeapMemoryUsed": {
+            "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memNonHeapCommittedM": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemNonHeapCommittedM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/DeadNodes": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DeadNodes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/PercentUsed": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentUsed",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/Free": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Free",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/Total": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Total",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/GetBlockLocations": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.GetBlockLocations",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/ugi/loginFailure_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=ugi.loginFailure_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/fsync_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.fsync_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/HeapMemoryMax": {
+            "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/create_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.create_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/PendingReplicationBlocks": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.PendingReplicationBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/UnderReplicatedBlocks": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.UnderReplicatedBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/FileInfoOps": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.FileInfoOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/MissingBlocks": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.MissingBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/sendHeartbeat_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.sendHeartbeat_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/RpcProcessingTime_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcProcessingTime_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/blockReport_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReport_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/CapacityRemaining": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemState.CapacityRemaining",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/rpcAuthenticationSuccesses": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthenticationSuccesses",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/PendingDeletionBlocks": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.PendingDeletionBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/rpcAuthenticationFailures": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthenticationFailures",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getEditLogSize_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getEditLogSize_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memHeapCommittedM": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemHeapCommittedM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/FilesInGetListingOps": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.FilesInGetListingOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsRunnable": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsRunnable",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/BlocksTotal": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.BlocksTotal",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CapacityTotal": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityTotal",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/complete_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.complete_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/LiveNodes": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.LiveNodes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsNew": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsNew",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/rollFsImage_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollFsImage_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/rpcAuthorizationFailures": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthorizationFailures",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/Syncs_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.Syncs_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/StartTime": {
+            "metric": "java.lang:type=Runtime.StartTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/RpcQueueTime_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcQueueTime_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/blockReceived_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReceived_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/rollEditLog_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollEditLog_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/DeadNodes": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DeadNodes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/SentBytes": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.SentBytes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/HeapMemoryUsed": {
+            "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/FilesTotal": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.FilesTotal",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/Version": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Version",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/logWarn": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogWarn",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/ExcessBlocks": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.ExcessBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsTimedWaiting": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsTimedWaiting",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/gcCount": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.GcCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/PercentRemaining": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentRemaining",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/ReceivedBytes": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.ReceivedBytes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/blockReport_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.blockReport_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/NonHeapMemoryMax": {
+            "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/rollFsImage_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollFsImage_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/NumOpenConnections": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.NumOpenConnections",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memHeapUsedM": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemHeapUsedM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.ScheduledReplicationBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsWaiting": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsWaiting",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/BlocksTotal": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.BlocksTotal",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/gcTimeMillis": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.GcTimeMillis",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getBlockLocations_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getBlockLocations_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/Transactions_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.Transactions_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/create_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.create_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/CapacityTotal": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Total",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsTerminated": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsTerminated",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CapacityRemainingGB": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityRemainingGB",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/Transactions_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.Transactions_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/MissingBlocks": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.MissingBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/Threads": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Threads",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/callQueueLen": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.callQueueLen",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CorruptBlocks": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CorruptBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/blockReport_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReport_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/TotalFiles": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalFiles",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/logInfo": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogInfo",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/NameDirStatuses": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NameDirStatuses",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getListing_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getListing_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/rollEditLog_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollEditLog_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/addBlock_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.addBlock_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/RpcProcessingTime_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcProcessingTime_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/CapacityUsed": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Used",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/UnderReplicatedBlocks": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.UnderReplicatedBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/sendHeartbeat_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.sendHeartbeat_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/CreateFileOps": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.CreateFileOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/logError": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogError",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/ugi/loginFailure_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=ugi.loginFailure_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getProtocolVersion_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getProtocolVersion_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/rpcAuthorizationSuccesses": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthorizationSuccesses",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/Version": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Version",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getListing_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getListing_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/logFatal": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogFatal",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/NonDfsUsedSpace": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NonDfsUsedSpace",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/renewLease_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.renewLease_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/TotalBlocks": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CapacityNonDFSUsed": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityNonDFSUsed",
+            "pointInTime": true,
+            "temporal": false
+          }
+        }
+      }
+    ],
+    "HostComponent": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "metrics/dfs/FSNamesystem/TotalLoad": {
+            "metric": "dfs.FSNamesystem.TotalLoad",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/BlockCapacity": {
+            "metric": "dfs.FSNamesystem.BlockCapacity",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/GetListingOps": {
+            "metric": "dfs.namenode.GetListingOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FilesAppended": {
+            "metric": "dfs.namenode.FilesAppended",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getProtocolVersion_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/fsync_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.FsyncAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginSuccess_avg_time": {
+            "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/load/load_one": {
+            "metric": "load_one",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/renewLease_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.RenewLeaseNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getFileInfo_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.GetFileInfoAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memNonHeapUsedM": {
+            "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/complete_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.CompleteAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setPermission_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.SetPermissionNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/CapacityTotalGB": {
+            "metric": "dfs.FSNamesystem.CapacityTotalGB",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setOwner_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.SetOwnerNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getBlockLocations_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/process/proc_run": {
+            "metric": "proc_run",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/CapacityUsedGB": {
+            "metric": "dfs.FSNamesystem.CapacityUsedGB",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/AddBlockOps": {
+            "metric": "dfs.namenode.AddBlockOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/swap_total": {
+            "metric": "swap_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FilesDeleted": {
+            "metric": "dfs.namenode.FilesDeleted",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/Syncs_avg_time": {
+            "metric": "dfs.namenode.SyncsAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsBlocked": {
+            "metric": "jvm.JvmMetrics.ThreadsBlocked",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcQueueTime_num_ops": {
+            "metric": "rpc.rpc.RpcQueueTimeNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/process/proc_total": {
+            "metric": "proc_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/blockReport_avg_time": {
+            "metric": "dfs.namenode.BlockReportAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/disk/part_max_used": {
+            "metric": "part_max_used",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getFileInfo_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.GetFileInfoNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getEditLogSize_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginSuccess_num_ops": {
+            "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blockReceived_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_idle": {
+            "metric": "cpu_idle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/versionRequest_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.VersionRequestAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_aidle": {
+            "metric": "cpu_aidle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_free": {
+            "metric": "mem_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/versionRequest_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.VersionRequestNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/addBlock_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.AddBlockNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FilesCreated": {
+            "metric": "dfs.namenode.FilesCreated",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rename_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.RenameAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/bytes_in": {
+            "metric": "bytes_in",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setSafeMode_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.SetSafeModeNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/pkts_out": {
+            "metric": "pkts_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/memNonHeapCommittedM": {
+            "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_cached": {
+            "metric": "mem_cached",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/disk/disk_total": {
+            "metric": "disk_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setPermission_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.SetPermissionAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FilesRenamed": {
+            "metric": "dfs.namenode.FilesRenamed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/register_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setReplication_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.setReplication_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/JournalTransactionsBatchedInSync": {
+            "metric": "dfs.namenode.JournalTransactionsBatchedInSync",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/ugi/loginFailure_num_ops": {
+            "metric": "ugi.UgiMetrics.LoginFailureNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/GetBlockLocations": {
+            "metric": "dfs.namenode.GetBlockLocations",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/fsync_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.FsyncNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_wio": {
+            "metric": "cpu_wio",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/create_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.CreateAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/PendingReplicationBlocks": {
+            "metric": "dfs.FSNamesystem.PendingReplicationBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_speed": {
+            "metric": "cpu_speed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/delete_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.DeleteAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FileInfoOps": {
+            "metric": "dfs.namenode.FileInfoOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/sendHeartbeat_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.SendHeartbeatNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/DeleteFileOps": {
+            "metric": "dfs.namenode.DeleteFileOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/RpcProcessingTime_avg_time": {
+            "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blockReport_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.BlockReportNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setSafeMode_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.SetSafeModeAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthenticationSuccesses": {
+            "metric": "rpc.rpc.RpcAuthenticationSuccesses",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/PendingDeletionBlocks": {
+            "metric": "dfs.FSNamesystem.PendingDeletionBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthenticationFailures": {
+            "metric": "rpc.rpc.RpcAuthenticationFailures",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/network/pkts_in": {
+            "metric": "pkts_in",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_total": {
+            "metric": "mem_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getEditLogSize_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memHeapCommittedM": {
+            "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FilesInGetListingOps": {
+            "metric": "dfs.namenode.FilesInGetListingOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsRunnable": {
+            "metric": "jvm.JvmMetrics.ThreadsRunnable",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/complete_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.CompleteNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsNew": {
+            "metric": "jvm.JvmMetrics.ThreadsNew",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rollFsImage_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.rollFsImage_num_ops",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthorizationFailures": {
+            "metric": "rpc.rpc.RpcAuthorizationFailures",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/Syncs_num_ops": {
+            "metric": "dfs.namenode.SyncsNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcQueueTime_avg_time": {
+            "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blockReceived_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setReplication_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.setReplication_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rollEditLog_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.RollEditLogAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/SentBytes": {
+            "metric": "rpc.rpc.SentBytes",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/FilesTotal": {
+            "metric": "dfs.FSNamesystem.FilesTotal",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/logWarn": {
+            "metric": "jvm.JvmMetrics.LogWarn",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/ExcessBlocks": {
+            "metric": "dfs.FSNamesystem.ExcessBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsTimedWaiting": {
+            "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/gcCount": {
+            "metric": "jvm.JvmMetrics.GcCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/ReceivedBytes": {
+            "metric": "rpc.rpc.ReceivedBytes",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_nice": {
+            "metric": "cpu_nice",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/blockReport_num_ops": {
+            "metric": "dfs.namenode.BlockReportNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/SafemodeTime": {
+            "metric": "dfs.namenode.SafemodeTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rollFsImage_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.rollFsImage_avg_time",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/mkdirs_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.MkdirsAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/NumOpenConnections": {
+            "metric": "rpc.rpc.NumOpenConnections",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memHeapUsedM": {
+            "metric": "jvm.JvmMetrics.MemHeapUsedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks": {
+            "metric": "dfs.FSNamesystem.ScheduledReplicationBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsWaiting": {
+            "metric": "jvm.JvmMetrics.ThreadsWaiting",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/disk/disk_free": {
+            "metric": "disk_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/BlocksTotal": {
+            "metric": "dfs.FSNamesystem.BlocksTotal",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_buffers": {
+            "metric": "mem_buffers",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/gcTimeMillis": {
+            "metric": "jvm.JvmMetrics.GcTimeMillis",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getBlockLocations_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/Transactions_num_ops": {
+            "metric": "dfs.namenode.TransactionsNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/create_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.CreateNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsTerminated": {
+            "metric": "jvm.JvmMetrics.ThreadsTerminated",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/network/bytes_out": {
+            "metric": "bytes_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_user": {
+            "metric": "cpu_user",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/swap_free": {
+            "metric": "swap_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/load/load_five": {
+            "metric": "load_five",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_system": {
+            "metric": "cpu_system",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/CapacityRemainingGB": {
+            "metric": "dfs.FSNamesystem.CapacityRemainingGB",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/Transactions_avg_time": {
+            "metric": "dfs.namenode.TransactionsAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/boottime": {
+            "metric": "boottime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/MissingBlocks": {
+            "metric": "dfs.FSNamesystem.MissingBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/callQueueLen": {
+            "metric": "rpc.rpc.CallQueueLength",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/delete_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.DeleteNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/CorruptBlocks": {
+            "metric": "dfs.FSNamesystem.CorruptBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rename_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.RenameNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blockReport_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.BlockReportAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/mkdirs_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.MkdirsNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/load/load_fifteen": {
+            "metric": "load_fifteen",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/logInfo": {
+            "metric": "jvm.JvmMetrics.LogInfo",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/fsImageLoadTime": {
+            "metric": "dfs.namenode.FsImageLoadTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getListing_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.GetListingNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blocksBeingWrittenReport_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.blocksBeingWrittenReport_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rollEditLog_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.RollEditLogNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/addBlock_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.AddBlockAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blocksBeingWrittenReport_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.blocksBeingWrittenReport_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setOwner_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.SetOwnerAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/RpcProcessingTime_num_ops": {
+            "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_shared": {
+            "metric": "mem_shared",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/UnderReplicatedBlocks": {
+            "metric": "dfs.FSNamesystem.UnderReplicatedBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/sendHeartbeat_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.SendHeartbeatAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/CreateFileOps": {
+            "metric": "dfs.namenode.CreateFileOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/logError": {
+            "metric": "jvm.JvmMetrics.LogError",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginFailure_avg_time": {
+            "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_num": {
+            "metric": "cpu_num",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getProtocolVersion_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/register_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthorizationSuccesses": {
+            "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getListing_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.GetListingAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/logFatal": {
+            "metric": "jvm.JvmMetrics.LogFatal",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/renewLease_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.RenewLeaseAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          }
+        }
+      },
+      {
+        "type": "jmx",
+        "metrics": {
+          "metrics/dfs/namenode/Used": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Used",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/TotalLoad": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystem.TotalLoad",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memMaxM":{
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemMaxM",
+            "pointInTime" : true,
+            "temporal" : false
+          },
+          "metrics/dfs/FSNamesystem/BlockCapacity": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystem.BlockCapacity",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/TotalFiles": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalFiles",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/GetListingOps": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeActivity.GetListingOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/HostName": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeActivity.tag.Hostname",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/runtime/StartTime": {
+            "metric": "java.lang:type=Runtime.StartTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/UpgradeFinalized": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.UpgradeFinalized",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getProtocolVersion_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.VersionRequestNumOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/fsync_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.FsyncAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/ugi/loginSuccess_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=UgiMetrics.LoginSuccessAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/renewLease_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.renewLease_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CapacityRemaining": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityRemaining",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getFileInfo_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.GetFileInfoAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/PercentRemaining": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentRemaining",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memNonHeapUsedM": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemNonHeapUsedM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/complete_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.CompleteAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CapacityTotalGB": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityTotalGB",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getBlockLocations_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.GetBlockLocationsNumOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CapacityUsedGB": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityUsedGB",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/AddBlockOps": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeActivity.AddBlockOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/Syncs_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeActivity.SyncsAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsBlocked": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsBlocked",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/PercentUsed": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentUsed",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/RpcQueueTime_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcQueueTimeNumOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/blockReport_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeActivity.BlockReportAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/HeapMemoryMax": {
+            "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/HeapMemoryUsed": {
+            "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getFileInfo_num_ops": {
+            "m

<TRUNCATED>

[7/7] git commit: AMBARI-4270. Add decommission support for TaskTracker and modify support for DataNode to match

Posted by sm...@apache.org.
AMBARI-4270. Add decommission support for TaskTracker and modify support for DataNode to match


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/5d3677f7
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/5d3677f7
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/5d3677f7

Branch: refs/heads/trunk
Commit: 5d3677f7485f4d1016a9caaa105e6be42813bce8
Parents: c869765
Author: Sumit Mohanty <sm...@hortonworks.com>
Authored: Mon Jan 13 14:08:35 2014 -0800
Committer: Sumit Mohanty <sm...@hortonworks.com>
Committed: Mon Jan 13 14:08:35 2014 -0800

----------------------------------------------------------------------
 .../libraries/script/script.py                  |    4 +-
 .../java/org/apache/ambari/server/Role.java     |    1 -
 .../ambari/server/actionmanager/Stage.java      |   14 +-
 .../controller/AmbariActionExecutionHelper.java |    2 +-
 .../AmbariCustomCommandExecutionHelper.java     |  688 +-
 .../AmbariCustomCommandExecutionHelperImpl.java |  593 --
 .../AmbariManagementControllerImpl.java         |   49 +-
 .../server/controller/ControllerModule.java     |    2 +-
 .../controller/ServiceComponentHostRequest.java |   18 +
 .../ServiceComponentHostResponse.java           |   63 +-
 .../internal/HostComponentResourceProvider.java |   96 +-
 .../ambari/server/metadata/ActionMetadata.java  |   13 +-
 .../HostComponentDesiredStateEntity.java        |   16 +-
 .../server/state/HostComponentAdminState.java   |   34 +
 .../ambari/server/state/ServiceComponent.java   |    2 +
 .../server/state/ServiceComponentHost.java      |    4 +
 .../state/ServiceComponentHostFactory.java      |    3 +-
 .../server/state/ServiceComponentImpl.java      |   98 +-
 .../svccomphost/ServiceComponentHostImpl.java   |   72 +-
 .../apache/ambari/server/utils/StageUtils.java  |  205 +-
 .../main/resources/Ambari-DDL-MySQL-CREATE.sql  |    2 +-
 .../main/resources/Ambari-DDL-Oracle-CREATE.sql |    2 +-
 .../resources/Ambari-DDL-Postgres-CREATE.sql    |    2 +-
 .../src/main/resources/properties.json          |    1 +
 .../1.3.4/hooks/before-START/scripts/params.py  |   10 -
 .../scripts/shared_initialization.py            |   11 -
 .../HDP/1.3.4/services/HBASE/metainfo.xml       |   10 +
 .../stacks/HDP/1.3.4/services/HDFS/metainfo.xml |   10 +
 .../HDFS/package/scripts/hdfs_namenode.py       |   20 +
 .../services/HDFS/package/scripts/namenode.py   |    7 +
 .../services/HDFS/package/scripts/params.py     |    4 +
 .../package/templates/exclude_hosts_list.j2     |    3 +
 .../HDP/1.3.4/services/MAPREDUCE/metainfo.xml   |   10 +
 .../MAPREDUCE/package/scripts/jobtracker.py     |   22 +
 .../MAPREDUCE/package/scripts/params.py         |    6 +-
 .../package/templates/exclude_hosts_list.j2     |    3 +
 .../2.0.8/hooks/before-START/scripts/params.py  |   10 -
 .../scripts/shared_initialization.py            |   11 -
 .../HDP/2.0.8/services/HBASE/metainfo.xml       |   20 +-
 .../stacks/HDP/2.0.8/services/HDFS/metainfo.xml |   10 +
 .../HDFS/package/scripts/hdfs_namenode.py       |   27 +
 .../services/HDFS/package/scripts/namenode.py   |    7 +
 .../services/HDFS/package/scripts/params.py     |    4 +
 .../package/templates/exclude_hosts_list.j2     |    3 +
 .../stacks/HDP/2.0.8/services/YARN/metainfo.xml |   10 +
 .../upgrade/ddl/Ambari-DDL-MySQL-UPGRADE.sql    |    7 +-
 .../upgrade/ddl/Ambari-DDL-Oracle-UPGRADE.sql   |    3 +
 .../ddl/Ambari-DDL-Postgres-UPGRADE-1.3.0.sql   |    3 +
 .../api/util/StackExtensionHelperTest.java      |   36 +-
 .../AmbariManagementControllerTest.java         |  491 +-
 .../HostComponentResourceProviderTest.java      |    8 +-
 .../internal/HostResourceProviderTest.java      |   36 +-
 .../internal/ServiceResourceProviderTest.java   |   49 +-
 .../server/state/ServiceComponentTest.java      |   10 +-
 .../server/state/cluster/ClusterTest.java       |    4 +-
 .../svccomphost/ServiceComponentHostTest.java   |   12 +-
 .../ambari/server/utils/TestStageUtils.java     |   23 +-
 .../services/HDFS/configuration/core-site.xml   |  167 +
 .../services/HDFS/configuration/global.xml      |  192 +
 .../HDFS/configuration/hadoop-policy.xml        |  134 +
 .../services/HDFS/configuration/hdfs-site.xml   |  513 ++
 .../stacks/HDP/2.0.7/services/HDFS/metainfo.xml |  152 +
 .../stacks/HDP/2.0.7/services/HDFS/metrics.json | 7800 ++++++++++++++++++
 .../HDFS/package/files/checkForFormat.sh        |   62 +
 .../services/HDFS/package/files/checkWebUI.py   |   53 +
 .../services/HDFS/package/scripts/datanode.py   |   57 +
 .../HDFS/package/scripts/hdfs_client.py         |   49 +
 .../HDFS/package/scripts/hdfs_datanode.py       |   56 +
 .../HDFS/package/scripts/hdfs_namenode.py       |  180 +
 .../HDFS/package/scripts/hdfs_snamenode.py      |   53 +
 .../HDFS/package/scripts/journalnode.py         |   74 +
 .../services/HDFS/package/scripts/namenode.py   |   61 +
 .../services/HDFS/package/scripts/params.py     |  180 +
 .../HDFS/package/scripts/service_check.py       |  107 +
 .../services/HDFS/package/scripts/snamenode.py  |   64 +
 .../HDFS/package/scripts/status_params.py       |   31 +
 .../services/HDFS/package/scripts/utils.py      |  138 +
 .../services/HDFS/package/scripts/zkfc_slave.py |   62 +
 .../services/HIVE/configuration/hive-site.xml   |  267 +
 .../stacks/HDP/2.0.7/services/HIVE/metainfo.xml |   47 +
 .../services/HIVE/package/files/addMysqlUser.sh |   41 +
 .../services/HIVE/package/files/hcatSmoke.sh    |   35 +
 .../services/HIVE/package/files/hiveSmoke.sh    |   23 +
 .../services/HIVE/package/files/hiveserver2.sql |   23 +
 .../HIVE/package/files/hiveserver2Smoke.sh      |   31 +
 .../services/HIVE/package/files/pigSmoke.sh     |   18 +
 .../HIVE/package/files/startHiveserver2.sh      |   22 +
 .../HIVE/package/files/startMetastore.sh        |   22 +
 .../services/HIVE/package/scripts/__init__.py   |   19 +
 .../2.0.7/services/HIVE/package/scripts/hcat.py |   47 +
 .../HIVE/package/scripts/hcat_client.py         |   43 +
 .../HIVE/package/scripts/hcat_service_check.py  |   63 +
 .../2.0.7/services/HIVE/package/scripts/hive.py |  122 +
 .../HIVE/package/scripts/hive_client.py         |   41 +
 .../HIVE/package/scripts/hive_metastore.py      |   63 +
 .../HIVE/package/scripts/hive_server.py         |   63 +
 .../HIVE/package/scripts/hive_service.py        |   56 +
 .../HIVE/package/scripts/mysql_server.py        |   77 +
 .../HIVE/package/scripts/mysql_service.py       |   44 +
 .../services/HIVE/package/scripts/params.py     |  123 +
 .../HIVE/package/scripts/service_check.py       |   56 +
 .../HIVE/package/scripts/status_params.py       |   30 +
 .../HIVE/package/templates/hcat-env.sh.j2       |   25 +
 .../HIVE/package/templates/hive-env.sh.j2       |   55 +
 .../stacks/HDP/2.0.7/services/YARN/metainfo.xml |  178 +-
 105 files changed, 13482 insertions(+), 1221 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-agent/src/main/python/resource_management/libraries/script/script.py
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/python/resource_management/libraries/script/script.py b/ambari-agent/src/main/python/resource_management/libraries/script/script.py
index b8c9d83..702aafe 100644
--- a/ambari-agent/src/main/python/resource_management/libraries/script/script.py
+++ b/ambari-agent/src/main/python/resource_management/libraries/script/script.py
@@ -98,7 +98,7 @@ class Script(object):
       # Non-zero exit code is interpreted as an INSTALLED status of a component
       sys.exit(1)
     except Fail:
-      logger.exception("Got exception while executing command {0}:".format(command_name))
+      logger.exception("Error while executing command '{0}':".format(command_name))
       sys.exit(1)
 
 
@@ -108,7 +108,7 @@ class Script(object):
     """
     self_methods = dir(self)
     if not command_name in self_methods:
-      raise Fail("Script {0} has not method '{1}'".format(sys.argv[0], command_name))
+      raise Fail("Script '{0}' has no method '{1}'".format(sys.argv[0], command_name))
     method = getattr(self, command_name)
     return method
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/java/org/apache/ambari/server/Role.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/Role.java b/ambari-server/src/main/java/org/apache/ambari/server/Role.java
index f579062..9eff713 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/Role.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/Role.java
@@ -53,7 +53,6 @@ public class Role {
   
   public static final Role AMBARI_SERVER_ACTION = valueOf("AMBARI_SERVER_ACTION"); 
   public static final Role DATANODE = valueOf("DATANODE");
-  public static final Role DECOMMISSION_DATANODE = valueOf("DECOMMISSION_DATANODE");
   public static final Role FLUME_SERVER = valueOf("FLUME_SERVER");
   public static final Role GANGLIA_MONITOR = valueOf("GANGLIA_MONITOR");
   public static final Role GANGLIA_SERVER = valueOf("GANGLIA_SERVER");

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Stage.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Stage.java b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Stage.java
index 3a9cc5c..c0272ec 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Stage.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/actionmanager/Stage.java
@@ -50,11 +50,7 @@ public class Stage {
   private long stageId = -1;
   private final String logDir;
   private final String requestContext;
-  private final String clusterHostInfo;
-
-  public String getClusterHostInfo() {
-    return clusterHostInfo;
-  }
+  private String clusterHostInfo;
 
   private int taskTimeout = -1;
   private int perTaskTimeFactor = 60000;
@@ -147,6 +143,14 @@ public class Stage {
     return commands;
   }
 
+  public String getClusterHostInfo() {
+    return clusterHostInfo;
+  }
+
+  public void setClusterHostInfo(String clusterHostInfo) {
+    this.clusterHostInfo = clusterHostInfo;
+  }
+
   public synchronized void setStageId(long stageId) {
     if (this.stageId != -1) {
       throw new RuntimeException("Attempt to set stageId again! Not allowed.");

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
index fb7ac0c..4e32f2a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariActionExecutionHelper.java
@@ -295,7 +295,7 @@ public class AmbariActionExecutionHelper {
 
       // Generate cluster host info
       execCmd.setClusterHostInfo(
-          StageUtils.getClusterHostInfo(clusters.getHostsForCluster(clusterName), cluster, hostsMap, configuration));
+          StageUtils.getClusterHostInfo(clusters.getHostsForCluster(clusterName), cluster));
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
index 6165f59..0ac7e6a 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelper.java
@@ -1,37 +1,675 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 package org.apache.ambari.server.controller;
 
+import com.google.gson.Gson;
+import com.google.inject.Inject;
+import com.google.inject.Singleton;
 import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.Role;
 import org.apache.ambari.server.RoleCommand;
 import org.apache.ambari.server.actionmanager.Stage;
+import org.apache.ambari.server.agent.ExecutionCommand;
+import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.metadata.ActionMetadata;
 import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.CommandScriptDefinition;
+import org.apache.ambari.server.state.ComponentInfo;
+import org.apache.ambari.server.state.ConfigHelper;
+import org.apache.ambari.server.state.Host;
+import org.apache.ambari.server.state.HostComponentAdminState;
+import org.apache.ambari.server.state.RepositoryInfo;
+import org.apache.ambari.server.state.Service;
+import org.apache.ambari.server.state.ServiceComponent;
 import org.apache.ambari.server.state.ServiceComponentHost;
 import org.apache.ambari.server.state.ServiceComponentHostEvent;
+import org.apache.ambari.server.state.ServiceInfo;
+import org.apache.ambari.server.state.ServiceOsSpecific;
+import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.svccomphost.ServiceComponentHostOpInProgressEvent;
+import org.apache.ambari.server.utils.StageUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
 import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.COMMAND_TIMEOUT;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.COMMAND_TIMEOUT_DEFAULT;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.CUSTOM_COMMAND;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.DB_DRIVER_FILENAME;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.DB_NAME;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JAVA_HOME;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JCE_NAME;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JDK_LOCATION;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.JDK_NAME;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.MYSQL_JDBC_URL;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.ORACLE_JDBC_URL;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.PACKAGE_LIST;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.REPO_INFO;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SCHEMA_VERSION;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SCRIPT;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SCRIPT_TYPE;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SERVICE_METADATA_FOLDER;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SERVICE_REPO_INFO;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.STACK_NAME;
+import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.STACK_VERSION;
+
+
+/**
+ * Helper class containing logic to process custom command execution requests .
+ * This class has special support needed for SERVICE_CHECK and DECOMMISSION.
+ * These commands are not pass through as Ambari has specific persistence requirements.
+ */
+@Singleton
+public class AmbariCustomCommandExecutionHelper {
+  private final static Logger LOG =
+      LoggerFactory.getLogger(AmbariCustomCommandExecutionHelper.class);
+  // TODO: Remove the hard-coded mapping when stack definition indicates which slave types can be decommissioned
+  private static final Map<String, String> masterToSlaveMappingForDecom = new HashMap<String, String>();
+
+  static {
+    masterToSlaveMappingForDecom.put("NAMENODE", "DATANODE");
+    masterToSlaveMappingForDecom.put("RESOURCEMANAGER", "NODEMANAGER");
+    masterToSlaveMappingForDecom.put("HBASE_MASTER", "HBASE_REGIONSERVER");
+    masterToSlaveMappingForDecom.put("JOBTRACKER", "TASKTRACKER");
+  }
+
+  private static String DECOM_INCLUDED_HOSTS = "included_hosts";
+  private static String DECOM_EXCLUDED_HOSTS = "excluded_hosts";
+  private static String DECOM_SLAVE_COMPONENT = "slave_type";
+  @Inject
+  private ActionMetadata actionMetadata;
+  @Inject
+  private Clusters clusters;
+  @Inject
+  private AmbariManagementController amc;
+  @Inject
+  private Gson gson;
+  @Inject
+  private Configuration configs;
+  @Inject
+  private AmbariMetaInfo ambariMetaInfo;
+  @Inject
+  private ConfigHelper configHelper;
+  ;
+
+  private Boolean isServiceCheckCommand(String command, String service) {
+    List<String> actions = actionMetadata.getActions(service);
+    if (actions == null || actions.size() == 0) {
+      return false;
+    }
+
+    if (!actions.contains(command)) {
+      return false;
+    }
+
+    return true;
+  }
+
+  private Boolean isValidCustomCommand(ExecuteActionRequest actionRequest) throws AmbariException {
+    String clustername = actionRequest.getClusterName();
+    Cluster cluster = clusters.getCluster(clustername);
+    StackId stackId = cluster.getDesiredStackVersion();
+    String serviceName = actionRequest.getServiceName();
+    String componentName = actionRequest.getComponentName();
+    String commandName = actionRequest.getCommandName();
+
+    if (componentName == null) {
+      return false;
+    }
+    ComponentInfo componentInfo = ambariMetaInfo.getComponent(
+        stackId.getStackName(), stackId.getStackVersion(),
+        serviceName, componentName);
+
+    if (!componentInfo.isCustomCommand(commandName) &&
+        !actionMetadata.isDefaultHostComponentCommand(commandName)) {
+      return false;
+    }
+    return true;
+  }
+
+  public void validateCustomCommand(ExecuteActionRequest actionRequest) throws AmbariException {
+    if (actionRequest.getServiceName() == null
+        || actionRequest.getServiceName().isEmpty()
+        || actionRequest.getCommandName() == null
+        || actionRequest.getCommandName().isEmpty()) {
+      throw new AmbariException("Invalid request : " + "cluster="
+          + actionRequest.getClusterName() + ", service="
+          + actionRequest.getServiceName() + ", command="
+          + actionRequest.getCommandName());
+    }
+
+    LOG.info("Received a command execution request"
+        + ", clusterName=" + actionRequest.getClusterName()
+        + ", serviceName=" + actionRequest.getServiceName()
+        + ", request=" + actionRequest.toString());
+
+    if (!isServiceCheckCommand(actionRequest.getCommandName(), actionRequest.getServiceName())
+        && !isValidCustomCommand(actionRequest)) {
+      throw new AmbariException(
+          "Unsupported action " + actionRequest.getCommandName() + " for Service: " + actionRequest.getServiceName()
+              + " and Component: " + actionRequest.getComponentName());
+    }
+  }
+
+  /**
+   * Other than Service_Check and Decommission all other commands are pass-through
+   *
+   * @param actionRequest   received request to execute a command
+   * @param stage           the initial stage for task creation
+   * @param hostLevelParams specific parameters for the hosts
+   * @throws AmbariException
+   */
+  public void addAction(ExecuteActionRequest actionRequest, Stage stage,
+                        Map<String, String> hostLevelParams)
+      throws AmbariException {
+    if (actionRequest.getCommandName().contains("SERVICE_CHECK")) {
+      findHostAndAddServiceCheckAction(actionRequest, stage, hostLevelParams);
+    } else if (actionRequest.getCommandName().equals("DECOMMISSION")) {
+      addDecommissionAction(actionRequest, stage, hostLevelParams);
+    } else if (isValidCustomCommand(actionRequest)) {
+      addCustomCommandAction(actionRequest, stage, hostLevelParams);
+    } else {
+      throw new AmbariException("Unsupported action " + actionRequest.getCommandName());
+    }
+  }
+
+  private void addCustomCommandAction(ExecuteActionRequest actionRequest,
+                                      Stage stage, Map<String, String> hostLevelParams)
+      throws AmbariException {
+
+    if (actionRequest.getHosts().isEmpty()) {
+      throw new AmbariException("Invalid request : No hosts specified.");
+    }
+
+    String serviceName = actionRequest.getServiceName();
+    String componentName = actionRequest.getComponentName();
+    String commandName = actionRequest.getCommandName();
+
+    String clusterName = stage.getClusterName();
+    Cluster cluster = clusters.getCluster(clusterName);
+    StackId stackId = cluster.getDesiredStackVersion();
+    AmbariMetaInfo ambariMetaInfo = amc.getAmbariMetaInfo();
+    ServiceInfo serviceInfo =
+        ambariMetaInfo.getServiceInfo(stackId.getStackName(),
+            stackId.getStackVersion(), serviceName);
+
+    long nowTimestamp = System.currentTimeMillis();
+
+    for (String hostName : actionRequest.getHosts()) {
+
+      stage.addHostRoleExecutionCommand(hostName, Role.valueOf(componentName),
+          RoleCommand.CUSTOM_COMMAND,
+          new ServiceComponentHostOpInProgressEvent(componentName,
+              hostName, nowTimestamp), cluster.getClusterName(), serviceName);
+
+      Map<String, Map<String, String>> configurations =
+          new TreeMap<String, Map<String, String>>();
+      Map<String, Map<String, String>> configTags =
+          amc.findConfigurationTagsWithOverrides(cluster, hostName);
+
+      ExecutionCommand execCmd = stage.getExecutionCommandWrapper(hostName,
+          componentName).getExecutionCommand();
+
+      execCmd.setConfigurations(configurations);
+      execCmd.setConfigurationTags(configTags);
+
+      execCmd.setClusterHostInfo(
+          StageUtils.getClusterHostInfo(clusters.getHostsForCluster(clusterName), cluster));
+
+      if (hostLevelParams == null) {
+        hostLevelParams = new TreeMap<String, String>();
+      }
+      hostLevelParams.put(JDK_LOCATION, amc.getJdkResourceUrl());
+      hostLevelParams.put(JAVA_HOME, amc.getJavaHome());
+      hostLevelParams.put(JDK_NAME, amc.getJDKName());
+      hostLevelParams.put(JCE_NAME, amc.getJCEName());
+      hostLevelParams.put(STACK_NAME, stackId.getStackName());
+      hostLevelParams.put(STACK_VERSION, stackId.getStackVersion());
+      hostLevelParams.put(CUSTOM_COMMAND, commandName);
+      execCmd.setHostLevelParams(hostLevelParams);
+
+      Map<String, String> commandParams = new TreeMap<String, String>();
+      commandParams.put(SCHEMA_VERSION, serviceInfo.getSchemaVersion());
+
+      String commandTimeout = COMMAND_TIMEOUT_DEFAULT;
+
+      if (serviceInfo.getSchemaVersion().equals(AmbariMetaInfo.SCHEMA_VERSION_2)) {
+        // Service check command is not custom command
+        ComponentInfo componentInfo = ambariMetaInfo.getComponent(
+            stackId.getStackName(), stackId.getStackVersion(),
+            serviceName, componentName);
+        CommandScriptDefinition script = componentInfo.getCommandScript();
+
+        if (script != null) {
+          commandParams.put(SCRIPT, script.getScript());
+          commandParams.put(SCRIPT_TYPE, script.getScriptType().toString());
+          commandTimeout = String.valueOf(script.getTimeout());
+        } else {
+          String message = String.format("Component %s has not command script " +
+              "defined. It is not possible to run service check" +
+              " for this service", componentName);
+          throw new AmbariException(message);
+        }
+        // We don't need package/repo infomation to perform service check
+      }
+      commandParams.put(COMMAND_TIMEOUT, commandTimeout);
+
+      commandParams.put(SERVICE_METADATA_FOLDER,
+          serviceInfo.getServiceMetadataFolder());
+
+      execCmd.setCommandParams(commandParams);
+
+    }
+  }
+
+  private void findHostAndAddServiceCheckAction(ExecuteActionRequest actionRequest, Stage stage,
+                                                Map<String, String> hostLevelParams)
+      throws AmbariException {
+    String clusterName = actionRequest.getClusterName();
+    String componentName = actionMetadata.getClient(actionRequest
+        .getServiceName());
+    String serviceName = actionRequest.getServiceName();
+    String smokeTestRole = actionRequest.getCommandName();
+    long nowTimestamp = System.currentTimeMillis();
+    Map<String, String> actionParameters = actionRequest.getParameters();
+
+    String hostName;
+    if (componentName != null) {
+      Map<String, ServiceComponentHost> components = clusters
+          .getCluster(clusterName).getService(actionRequest.getServiceName())
+          .getServiceComponent(componentName).getServiceComponentHosts();
+
+      if (components.isEmpty()) {
+        throw new AmbariException("Hosts not found, component="
+            + componentName + ", service=" + actionRequest.getServiceName()
+            + ", cluster=" + clusterName);
+      }
+      hostName = amc.getHealthyHost(components.keySet());
+    } else {
+      Map<String, ServiceComponent> components = clusters
+          .getCluster(clusterName).getService(actionRequest.getServiceName())
+          .getServiceComponents();
+
+      if (components.isEmpty()) {
+        throw new AmbariException("Components not found, service="
+            + actionRequest.getServiceName() + ", cluster=" + clusterName);
+      }
+
+      ServiceComponent serviceComponent = components.values().iterator()
+          .next();
+
+      if (serviceComponent.getServiceComponentHosts().isEmpty()) {
+        throw new AmbariException("Hosts not found, component="
+            + serviceComponent.getName() + ", service="
+            + actionRequest.getServiceName() + ", cluster=" + clusterName);
+      }
+
+      hostName = serviceComponent.getServiceComponentHosts().keySet()
+          .iterator().next();
+    }
+
+
+    addServiceCheckAction(stage, hostName, smokeTestRole, nowTimestamp,
+        serviceName, componentName, actionParameters,
+        hostLevelParams);
+  }
+
+  /**
+   * Creates and populates service check EXECUTION_COMMAND for host.
+   * Not all EXECUTION_COMMAND parameters are populated here because they
+   * are not needed by service check.
+   */
+  public void addServiceCheckAction(Stage stage,
+                                    String hostname, String smokeTestRole,
+                                    long nowTimestamp,
+                                    String serviceName,
+                                    String componentName,
+                                    Map<String, String> actionParameters,
+                                    Map<String, String> hostLevelParams)
+      throws AmbariException {
+
+    String clusterName = stage.getClusterName();
+    Cluster cluster = clusters.getCluster(clusterName);
+    StackId stackId = cluster.getDesiredStackVersion();
+    AmbariMetaInfo ambariMetaInfo = amc.getAmbariMetaInfo();
+    ServiceInfo serviceInfo =
+        ambariMetaInfo.getServiceInfo(stackId.getStackName(),
+            stackId.getStackVersion(), serviceName);
+
+
+    stage.addHostRoleExecutionCommand(hostname,
+        Role.valueOf(smokeTestRole),
+        RoleCommand.SERVICE_CHECK,
+        new ServiceComponentHostOpInProgressEvent(componentName, hostname,
+            nowTimestamp), cluster.getClusterName(), serviceName);
+
+    // [ type -> [ key, value ] ]
+    Map<String, Map<String, String>> configurations =
+        new TreeMap<String, Map<String, String>>();
+    Map<String, Map<String, String>> configTags =
+        amc.findConfigurationTagsWithOverrides(cluster, hostname);
+
+    ExecutionCommand execCmd = stage.getExecutionCommandWrapper(hostname,
+        smokeTestRole).getExecutionCommand();
+
+    execCmd.setConfigurations(configurations);
+    execCmd.setConfigurationTags(configTags);
+
+    // Generate cluster host info
+    execCmd.setClusterHostInfo(
+        StageUtils.getClusterHostInfo(clusters.getHostsForCluster(clusterName), cluster));
+
+    if (hostLevelParams == null) {
+      hostLevelParams = new TreeMap<String, String>();
+    }
+    hostLevelParams.put(JDK_LOCATION, amc.getJdkResourceUrl());
+    hostLevelParams.put(JAVA_HOME, amc.getJavaHome());
+    hostLevelParams.put(JDK_NAME, amc.getJDKName());
+    hostLevelParams.put(JCE_NAME, amc.getJCEName());
+    hostLevelParams.put(STACK_NAME, stackId.getStackName());
+    hostLevelParams.put(STACK_VERSION, stackId.getStackVersion());
+    hostLevelParams.putAll(amc.getRcaParameters());
+    execCmd.setHostLevelParams(hostLevelParams);
+
+    Map<String, String> commandParams = new TreeMap<String, String>();
+    commandParams.put(SCHEMA_VERSION, serviceInfo.getSchemaVersion());
+
+    String commandTimeout = COMMAND_TIMEOUT_DEFAULT;
+
+
+    if (serviceInfo.getSchemaVersion().equals(AmbariMetaInfo.SCHEMA_VERSION_2)) {
+      // Service check command is not custom command
+      CommandScriptDefinition script = serviceInfo.getCommandScript();
+      if (script != null) {
+        commandParams.put(SCRIPT, script.getScript());
+        commandParams.put(SCRIPT_TYPE, script.getScriptType().toString());
+        commandTimeout = String.valueOf(script.getTimeout());
+      } else {
+        String message = String.format("Service %s has no command script " +
+            "defined. It is not possible to run service check" +
+            " for this service", serviceName);
+        throw new AmbariException(message);
+      }
+      // We don't need package/repo information to perform service check
+    }
+    commandParams.put(COMMAND_TIMEOUT, commandTimeout);
+
+    commandParams.put(SERVICE_METADATA_FOLDER,
+        serviceInfo.getServiceMetadataFolder());
+
+    execCmd.setCommandParams(commandParams);
+
+    if (actionParameters != null) { // If defined
+      execCmd.setRoleParams(actionParameters);
+    }
+
+  }
+
+  private Set<String> getHostList(Map<String, String> cmdParameters, String key) {
+    Set<String> hosts = new HashSet<String>();
+    if (cmdParameters.containsKey(key)) {
+      String allHosts = cmdParameters.get(key);
+      if (allHosts != null) {
+        for (String hostName : allHosts.trim().split(",")) {
+          hosts.add(hostName.trim());
+        }
+      }
+    }
+    return hosts;
+  }
+
+  /**
+   * Processes decommission command. Modifies the host components as needed and then
+   * calls into the implementation of a custom command
+   */
+  private void addDecommissionAction(ExecuteActionRequest request, Stage stage,
+                                     Map<String, String> hostLevelParams)
+      throws AmbariException {
+
+    String clusterName = request.getClusterName();
+    Cluster cluster = clusters.getCluster(clusterName);
+    String serviceName = request.getServiceName();
+
+    if (request.getHosts() != null && request.getHosts().size() != 0) {
+      throw new AmbariException("Decommission command cannot be issued with target host(s) specified.");
+    }
+
+    //Get all hosts to be added and removed
+    Set<String> excludedHosts = getHostList(request.getParameters(), DECOM_EXCLUDED_HOSTS);
+    Set<String> includedHosts = getHostList(request.getParameters(), DECOM_INCLUDED_HOSTS);
+    String slaveCompType = request.getParameters().get(DECOM_SLAVE_COMPONENT);
+
+    Set<String> cloneSet = new HashSet<String>(excludedHosts);
+    cloneSet.retainAll(includedHosts);
+    if (cloneSet.size() > 0) {
+      throw new AmbariException("Same host cannot be specified for inclusion as well as exclusion. Hosts: "
+          + cloneSet.toString());
+    }
+
+    Service service = cluster.getService(serviceName);
+    if (service == null) {
+      throw new AmbariException("Specified service " + serviceName + " is not a valid/deployed service.");
+    }
+
+    String masterCompType = request.getComponentName();
+    Map<String, ServiceComponent> svcComponents = service.getServiceComponents();
+    if (!svcComponents.containsKey(masterCompType)) {
+      throw new AmbariException("Specified component " + masterCompType + " does not belong to service "
+          + serviceName + ".");
+    }
+
+    ServiceComponent masterComponent = svcComponents.get(masterCompType);
+    if (!masterComponent.isMasterComponent()) {
+      throw new AmbariException("Specified component " + masterCompType + " is not a MASTER for service "
+          + serviceName + ".");
+    }
+
+    if (!masterToSlaveMappingForDecom.containsKey(masterCompType)) {
+      throw new AmbariException("Decommissioning is not supported for " + masterCompType);
+    }
+
+    // Find the slave component
+    if (slaveCompType == null || slaveCompType.equals("")) {
+      slaveCompType = masterToSlaveMappingForDecom.get(masterCompType);
+    } else if (!masterToSlaveMappingForDecom.get(masterCompType).equals(slaveCompType)) {
+      throw new AmbariException("Component " + slaveCompType + " is not supported for decommissioning.");
+    }
+
+    // Set/reset decommissioned flag on all components
+    for (ServiceComponentHost sch : svcComponents.get(slaveCompType).getServiceComponentHosts().values()) {
+      if (excludedHosts.contains(sch.getHostName())) {
+        sch.setComponentAdminState(HostComponentAdminState.DECOMMISSIONED);
+        LOG.info("Adding " + slaveCompType + " host to decommissioned list : " + sch.getHostName());
+      }
+      if (includedHosts.contains(sch.getHostName())) {
+        sch.setComponentAdminState(HostComponentAdminState.INSERVICE);
+        LOG.info("Removing " + slaveCompType + " host from the decommissioned list: " + sch.getHostName());
+      }
+    }
+
+    Set<String> masterHosts = masterComponent.getServiceComponentHosts().keySet();
+    ExecuteActionRequest commandRequest = new ExecuteActionRequest(
+        request.getClusterName(), request.getCommandName(), request.getActionName(), request.getServiceName(),
+        masterComponent.getName(), new ArrayList<String>(masterHosts), null);
+
+    String clusterHostInfoJson = StageUtils.getGson().toJson(
+        StageUtils.getClusterHostInfo(clusters.getHostsForCluster(cluster.getClusterName()), cluster));
+
+    // Reset cluster host info as it has changed
+    stage.setClusterHostInfo(clusterHostInfoJson);
+
+    addCustomCommandAction(commandRequest, stage, hostLevelParams);
+  }
+
+  /**
+   * Creates and populates an EXECUTION_COMMAND for host
+   */
+  public void createHostAction(Cluster cluster,
+                               Stage stage, ServiceComponentHost scHost,
+                               Map<String, Map<String, String>> configurations,
+                               Map<String, Map<String, String>> configTags,
+                               RoleCommand roleCommand,
+                               Map<String, String> commandParams,
+                               ServiceComponentHostEvent event)
+      throws AmbariException {
+
+    stage.addHostRoleExecutionCommand(scHost.getHostName(), Role.valueOf(scHost
+        .getServiceComponentName()), roleCommand,
+        event, scHost.getClusterName(),
+        scHost.getServiceName());
+    String serviceName = scHost.getServiceName();
+    String componentName = event.getServiceComponentName();
+    String hostname = scHost.getHostName();
+    String osType = clusters.getHost(hostname).getOsType();
+    StackId stackId = cluster.getDesiredStackVersion();
+    ServiceInfo serviceInfo = ambariMetaInfo.getServiceInfo(stackId.getStackName(),
+        stackId.getStackVersion(), serviceName);
+    ComponentInfo componentInfo = ambariMetaInfo.getComponent(
+        stackId.getStackName(), stackId.getStackVersion(),
+        serviceName, componentName);
+
+    ExecutionCommand execCmd = stage.getExecutionCommandWrapper(scHost.getHostName(),
+        scHost.getServiceComponentName()).getExecutionCommand();
+
+    Host host = clusters.getHost(scHost.getHostName());
+
+    // Hack - Remove passwords from configs
+    if (event.getServiceComponentName().equals(Role.HIVE_CLIENT.toString())) {
+      configHelper.applyCustomConfig(configurations, Configuration.HIVE_CONFIG_TAG,
+          Configuration.HIVE_METASTORE_PASSWORD_PROPERTY, "", true);
+    }
+
+    execCmd.setConfigurations(configurations);
+    execCmd.setConfigurationTags(configTags);
+    if (commandParams == null) { // if not defined
+      commandParams = new TreeMap<String, String>();
+    }
+    commandParams.put(SCHEMA_VERSION, serviceInfo.getSchemaVersion());
+
+
+    // Get command script info for custom command/custom action
+    /*
+     * TODO: Custom actions are not supported yet, that's why we just pass
+     * component main commandScript to agent. This script is only used for
+     * default commads like INSTALL/STOP/START/CONFIGURE
+     */
+    String commandTimeout = ExecutionCommand.KeyNames.COMMAND_TIMEOUT_DEFAULT;
+    CommandScriptDefinition script = componentInfo.getCommandScript();
+    if (serviceInfo.getSchemaVersion().equals(AmbariMetaInfo.SCHEMA_VERSION_2)) {
+      if (script != null) {
+        commandParams.put(SCRIPT, script.getScript());
+        commandParams.put(SCRIPT_TYPE, script.getScriptType().toString());
+        commandTimeout = String.valueOf(script.getTimeout());
+      } else {
+        String message = String.format("Component %s of service %s has no " +
+            "command script defined", componentName, serviceName);
+        throw new AmbariException(message);
+      }
+    }
+    commandParams.put(COMMAND_TIMEOUT, commandTimeout);
+    commandParams.put(SERVICE_METADATA_FOLDER,
+        serviceInfo.getServiceMetadataFolder());
+
+    execCmd.setCommandParams(commandParams);
+
+    Map<String, List<RepositoryInfo>> repos = ambariMetaInfo.getRepository(
+        stackId.getStackName(), stackId.getStackVersion());
+    String repoInfo = "";
+    if (!repos.containsKey(host.getOsType())) {
+      // FIXME should this be an error?
+      LOG.warn("Could not retrieve repo information for host"
+          + ", hostname=" + scHost.getHostName()
+          + ", clusterName=" + cluster.getClusterName()
+          + ", stackInfo=" + stackId.getStackId());
+    } else {
+      repoInfo = gson.toJson(repos.get(host.getOsType()));
+    }
+
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Sending repo information to agent"
+          + ", hostname=" + scHost.getHostName()
+          + ", clusterName=" + cluster.getClusterName()
+          + ", stackInfo=" + stackId.getStackId()
+          + ", repoInfo=" + repoInfo);
+    }
+
+    Map<String, String> hostParams = new TreeMap<String, String>();
+    // TODO: Move parameter population to org.apache.ambari.server.controller.AmbariManagementControllerImpl.createAction()
+    hostParams.put(REPO_INFO, repoInfo);
+    hostParams.put(JDK_LOCATION, amc.getJdkResourceUrl());
+    hostParams.put(JAVA_HOME, amc.getJavaHome());
+    hostParams.put(JDK_NAME, amc.getJDKName());
+    hostParams.put(JCE_NAME, amc.getJCEName());
+    hostParams.put(STACK_NAME, stackId.getStackName());
+    hostParams.put(STACK_VERSION, stackId.getStackVersion());
+    hostParams.put(DB_NAME, amc.getServerDB());
+    hostParams.put(MYSQL_JDBC_URL, amc.getMysqljdbcUrl());
+    hostParams.put(ORACLE_JDBC_URL, amc.getOjdbcUrl());
+    hostParams.putAll(amc.getRcaParameters());
+
+    // Write down os specific info for the service
+    ServiceOsSpecific anyOs = null;
+    if (serviceInfo.getOsSpecifics().containsKey(AmbariMetaInfo.ANY_OS)) {
+      anyOs = serviceInfo.getOsSpecifics().get(AmbariMetaInfo.ANY_OS);
+    }
+    ServiceOsSpecific hostOs = null;
+    if (serviceInfo.getOsSpecifics().containsKey(osType)) {
+      hostOs = serviceInfo.getOsSpecifics().get(osType);
+      // Choose repo that is relevant for host
+      ServiceOsSpecific.Repo serviceRepo = hostOs.getRepo();
+      if (serviceRepo != null) {
+        String serviceRepoInfo = gson.toJson(serviceRepo);
+        hostParams.put(SERVICE_REPO_INFO, serviceRepoInfo);
+      }
+    }
+    // Build package list that is relevant for host
+    List<ServiceOsSpecific.Package> packages =
+        new ArrayList<ServiceOsSpecific.Package>();
+    if (anyOs != null) {
+      packages.addAll(anyOs.getPackages());
+    }
+
+    if (hostOs != null) {
+      packages.addAll(hostOs.getPackages());
+    }
+    String packageList = gson.toJson(packages);
+    hostParams.put(PACKAGE_LIST, packageList);
+
+    if (configs.getServerDBName().equalsIgnoreCase(Configuration
+        .ORACLE_DB_NAME)) {
+      hostParams.put(DB_DRIVER_FILENAME, configs.getOjdbcJarName());
+    } else if (configs.getServerDBName().equalsIgnoreCase(Configuration
+        .MYSQL_DB_NAME)) {
+      hostParams.put(DB_DRIVER_FILENAME, configs.getMySQLJarName());
+    }
+    execCmd.setHostLevelParams(hostParams);
 
-public interface AmbariCustomCommandExecutionHelper {
-  void validateCustomCommand(ExecuteActionRequest actionRequest) throws AmbariException;
-
-  void addAction(ExecuteActionRequest actionRequest, Stage stage,
-                 HostsMap hostsMap, Map<String, String> hostLevelParams)
-      throws AmbariException;
-
-  void addServiceCheckActionImpl(Stage stage,
-                                 String hostname, String smokeTestRole,
-                                 long nowTimestamp,
-                                 String serviceName,
-                                 String componentName,
-                                 Map<String, String> roleParameters,
-                                 HostsMap hostsMap,
-                                 Map<String, String> hostLevelParams)
-              throws AmbariException;
-
-  void createHostAction(Cluster cluster,
-                        Stage stage, ServiceComponentHost scHost,
-                        Map<String, Map<String, String>> configurations,
-                        Map<String, Map<String, String>> configTags,
-                        RoleCommand roleCommand,
-                        Map<String, String> commandParams,
-                        ServiceComponentHostEvent event)
-                      throws AmbariException;
+    Map<String, String> roleParams = new TreeMap<String, String>();
+    execCmd.setRoleParams(roleParams);
+  }
 }

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelperImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelperImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelperImpl.java
deleted file mode 100644
index 8c5d733..0000000
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariCustomCommandExecutionHelperImpl.java
+++ /dev/null
@@ -1,593 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ambari.server.controller;
-
-import com.google.gson.Gson;
-import com.google.inject.Inject;
-import com.google.inject.Singleton;
-import org.apache.ambari.server.AmbariException;
-import org.apache.ambari.server.Role;
-import org.apache.ambari.server.RoleCommand;
-import org.apache.ambari.server.actionmanager.Stage;
-import org.apache.ambari.server.agent.ExecutionCommand;
-import org.apache.ambari.server.api.services.AmbariMetaInfo;
-import org.apache.ambari.server.configuration.Configuration;
-import org.apache.ambari.server.metadata.ActionMetadata;
-import org.apache.ambari.server.state.*;
-import org.apache.ambari.server.state.svccomphost.ServiceComponentHostOpInProgressEvent;
-import org.apache.ambari.server.utils.StageUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.*;
-
-import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.*;
-
-
-/**
- * Helper class containing logic to process custom command execution requests
- */
-@Singleton
-public class AmbariCustomCommandExecutionHelperImpl implements AmbariCustomCommandExecutionHelper {
-  private final static Logger LOG =
-      LoggerFactory.getLogger(AmbariCustomCommandExecutionHelperImpl.class);
-
-  @Inject
-  private ActionMetadata actionMetadata;
-  @Inject
-  private Clusters clusters;
-  @Inject
-  private AmbariManagementController amc;
-  @Inject
-  private Gson gson;
-  @Inject
-  private Configuration configs;
-  @Inject
-  private AmbariMetaInfo ambariMetaInfo;
-  @Inject
-  private ConfigHelper configHelper;
-
-
-  @Override
-  public void validateCustomCommand(ExecuteActionRequest actionRequest) throws AmbariException {
-    if (actionRequest.getServiceName() == null
-        || actionRequest.getServiceName().isEmpty()
-        || actionRequest.getCommandName() == null
-        || actionRequest.getCommandName().isEmpty()) {
-      throw new AmbariException("Invalid request : " + "cluster="
-          + actionRequest.getClusterName() + ", service="
-          + actionRequest.getServiceName() + ", command="
-          + actionRequest.getCommandName());
-    }
-
-    LOG.info("Received a command execution request"
-        + ", clusterName=" + actionRequest.getClusterName()
-        + ", serviceName=" + actionRequest.getServiceName()
-        + ", request=" + actionRequest.toString());
-
-    if (!isValidCommand(actionRequest.getCommandName(),
-      actionRequest.getServiceName()) && !isValidCustomCommand(actionRequest)) {
-      throw new AmbariException(
-          "Unsupported action " + actionRequest.getCommandName() + " for " + actionRequest.getServiceName());
-    }
-  }
-
-  private Boolean isValidCommand(String command, String service) {
-    List<String> actions = actionMetadata.getActions(service);
-    if (actions == null || actions.size() == 0) {
-      return false;
-    }
-
-    if (!actions.contains(command)) {
-      return false;
-    }
-
-    return true;
-  }
-
-  private Boolean isValidCustomCommand(ExecuteActionRequest actionRequest) throws AmbariException {
-    String clustername = actionRequest.getClusterName();
-    Cluster cluster = clusters.getCluster(clustername);
-    StackId stackId = cluster.getDesiredStackVersion();
-    String serviceName = actionRequest.getServiceName();
-    String componentName = actionRequest.getComponentName();
-    String commandName = actionRequest.getCommandName();
-
-    if (componentName == null) {
-      return false;
-    }
-    ComponentInfo componentInfo = ambariMetaInfo.getComponent(
-      stackId.getStackName(), stackId.getStackVersion(),
-      serviceName, componentName);
-
-    if (!componentInfo.isCustomCommand(commandName) &&
-      !actionMetadata.isDefaultHostComponentCommand(commandName)) {
-      return false;
-    }
-    return true;
-  }
-
-  @Override
-  public void addAction(ExecuteActionRequest actionRequest, Stage stage,
-                        HostsMap hostsMap, Map<String, String> hostLevelParams)
-      throws AmbariException {
-    if (actionRequest.getCommandName().contains("SERVICE_CHECK")) {
-      addServiceCheckAction(actionRequest, stage, hostsMap, hostLevelParams);
-    } else if (actionRequest.getCommandName().equals("DECOMMISSION_DATANODE")) {
-      addDecommissionDatanodeAction(actionRequest, stage, hostLevelParams);
-    } else if (isValidCustomCommand(actionRequest)) {
-      addCustomcommandAction(actionRequest, stage, hostsMap, hostLevelParams);
-    } else {
-      throw new AmbariException("Unsupported action " + actionRequest.getCommandName());
-    }
-  }
-
-  private void addCustomcommandAction(ExecuteActionRequest actionRequest,
-    Stage stage, HostsMap hostsMap, Map<String, String> hostLevelParams)
-    throws AmbariException {
-
-    if (actionRequest.getHosts().isEmpty()) {
-      throw new AmbariException("Invalid request : No hosts specified.");
-    }
-
-    String serviceName = actionRequest.getServiceName();
-    String componentName = actionRequest.getComponentName();
-    String commandName = actionRequest.getCommandName();
-
-    String clusterName = stage.getClusterName();
-    Cluster cluster = clusters.getCluster(clusterName);
-    StackId stackId = cluster.getDesiredStackVersion();
-    AmbariMetaInfo ambariMetaInfo = amc.getAmbariMetaInfo();
-    ServiceInfo serviceInfo =
-      ambariMetaInfo.getServiceInfo(stackId.getStackName(),
-        stackId.getStackVersion(), serviceName);
-
-    long nowTimestamp = System.currentTimeMillis();
-
-    for (String hostName: actionRequest.getHosts()) {
-
-      stage.addHostRoleExecutionCommand(hostName, Role.valueOf(componentName),
-        RoleCommand.CUSTOM_COMMAND,
-        new ServiceComponentHostOpInProgressEvent(componentName,
-        hostName, nowTimestamp), cluster.getClusterName(), serviceName);
-
-      Map<String, Map<String, String>> configurations =
-        new TreeMap<String, Map<String, String>>();
-      Map<String, Map<String, String>> configTags =
-        amc.findConfigurationTagsWithOverrides(cluster, hostName);
-
-      ExecutionCommand execCmd =  stage.getExecutionCommandWrapper(hostName,
-        componentName).getExecutionCommand();
-
-      execCmd.setConfigurations(configurations);
-      execCmd.setConfigurationTags(configTags);
-
-      execCmd.setClusterHostInfo(
-        StageUtils.getClusterHostInfo(clusters.getHostsForCluster(clusterName), cluster, hostsMap, configs));
-
-      if (hostLevelParams == null) {
-        hostLevelParams = new TreeMap<String, String>();
-      }
-      hostLevelParams.put(JDK_LOCATION, amc.getJdkResourceUrl());
-      hostLevelParams.put(JAVA_HOME, amc.getJavaHome());
-      hostLevelParams.put(JDK_NAME, amc.getJDKName());
-      hostLevelParams.put(JCE_NAME, amc.getJCEName());
-      hostLevelParams.put(STACK_NAME, stackId.getStackName());
-      hostLevelParams.put(STACK_VERSION,stackId.getStackVersion());
-      hostLevelParams.put(CUSTOM_COMMAND, commandName);
-      execCmd.setHostLevelParams(hostLevelParams);
-
-      Map<String,String> commandParams = new TreeMap<String, String>();
-      commandParams.put(SCHEMA_VERSION, serviceInfo.getSchemaVersion());
-
-      String commandTimeout = COMMAND_TIMEOUT_DEFAULT;
-
-      if (serviceInfo.getSchemaVersion().equals(AmbariMetaInfo.SCHEMA_VERSION_2)) {
-        // Service check command is not custom command
-        ComponentInfo componentInfo = ambariMetaInfo.getComponent(
-          stackId.getStackName(), stackId.getStackVersion(),
-          serviceName, componentName);
-        CommandScriptDefinition script = componentInfo.getCommandScript();
-
-        if (script != null) {
-          commandParams.put(SCRIPT, script.getScript());
-          commandParams.put(SCRIPT_TYPE, script.getScriptType().toString());
-          commandTimeout = String.valueOf(script.getTimeout());
-        } else {
-          String message = String.format("Component %s has not command script " +
-            "defined. It is not possible to run service check" +
-            " for this service", componentName);
-          throw new AmbariException(message);
-        }
-        // We don't need package/repo infomation to perform service check
-      }
-      commandParams.put(COMMAND_TIMEOUT, commandTimeout);
-
-      commandParams.put(SERVICE_METADATA_FOLDER,
-        serviceInfo.getServiceMetadataFolder());
-
-      execCmd.setCommandParams(commandParams);
-
-    }
-  }
-
-  private void addServiceCheckAction(ExecuteActionRequest actionRequest, Stage stage,
-                                     HostsMap hostsMap,
-                                     Map<String, String> hostLevelParams)
-      throws AmbariException {
-    String clusterName = actionRequest.getClusterName();
-    String componentName = actionMetadata.getClient(actionRequest
-        .getServiceName());
-    String serviceName = actionRequest.getServiceName();
-    String smokeTestRole = actionRequest.getCommandName();
-    long nowTimestamp = System.currentTimeMillis();
-    Map<String, String> roleParameters = actionRequest.getParameters();
-
-    String hostName;
-    if (componentName != null) {
-      Map<String, ServiceComponentHost> components = clusters
-          .getCluster(clusterName).getService(actionRequest.getServiceName())
-          .getServiceComponent(componentName).getServiceComponentHosts();
-
-      if (components.isEmpty()) {
-        throw new AmbariException("Hosts not found, component="
-            + componentName + ", service=" + actionRequest.getServiceName()
-            + ", cluster=" + clusterName);
-      }
-      hostName = amc.getHealthyHost(components.keySet());
-    } else {
-      Map<String, ServiceComponent> components = clusters
-          .getCluster(clusterName).getService(actionRequest.getServiceName())
-          .getServiceComponents();
-
-      if (components.isEmpty()) {
-        throw new AmbariException("Components not found, service="
-            + actionRequest.getServiceName() + ", cluster=" + clusterName);
-      }
-
-      ServiceComponent serviceComponent = components.values().iterator()
-          .next();
-
-      if (serviceComponent.getServiceComponentHosts().isEmpty()) {
-        throw new AmbariException("Hosts not found, component="
-            + serviceComponent.getName() + ", service="
-            + actionRequest.getServiceName() + ", cluster=" + clusterName);
-      }
-
-      hostName = serviceComponent.getServiceComponentHosts().keySet()
-          .iterator().next();
-    }
-
-
-    addServiceCheckActionImpl(stage, hostName, smokeTestRole, nowTimestamp,
-            serviceName, componentName, roleParameters, hostsMap,
-            hostLevelParams);
-  }
-
-
-
-  /**
-   * Creates and populates service check EXECUTION_COMMAND for host.
-   * Not all EXECUTION_COMMAND parameters are populated here because they
-   * are not needed by service check.
-   */
-  @Override
-  public void addServiceCheckActionImpl(Stage stage,
-                                        String hostname, String smokeTestRole,
-                                        long nowTimestamp,
-                                        String serviceName,
-                                        String componentName,
-                                        Map<String, String> roleParameters,
-                                        HostsMap hostsMap,
-                                        Map<String, String> hostLevelParams)
-          throws AmbariException{
-
-    String clusterName = stage.getClusterName();
-    Cluster cluster = clusters.getCluster(clusterName);
-    StackId stackId = cluster.getDesiredStackVersion();
-    AmbariMetaInfo ambariMetaInfo = amc.getAmbariMetaInfo();
-    ServiceInfo serviceInfo =
-            ambariMetaInfo.getServiceInfo(stackId.getStackName(),
-              stackId.getStackVersion(), serviceName);
-
-
-    stage.addHostRoleExecutionCommand(hostname,
-            Role.valueOf(smokeTestRole),
-            RoleCommand.SERVICE_CHECK,
-            new ServiceComponentHostOpInProgressEvent(componentName, hostname,
-                    nowTimestamp), cluster.getClusterName(), serviceName);
-
-    // [ type -> [ key, value ] ]
-    Map<String, Map<String, String>> configurations =
-            new TreeMap<String, Map<String, String>>();
-    Map<String, Map<String, String>> configTags =
-            amc.findConfigurationTagsWithOverrides(cluster, hostname);
-
-    ExecutionCommand execCmd =  stage.getExecutionCommandWrapper(hostname,
-            smokeTestRole).getExecutionCommand();
-
-    execCmd.setConfigurations(configurations);
-    execCmd.setConfigurationTags(configTags);
-
-    // Generate cluster host info
-    execCmd.setClusterHostInfo(
-            StageUtils.getClusterHostInfo(clusters.getHostsForCluster(clusterName), cluster, hostsMap, configs));
-
-    if (hostLevelParams == null) {
-      hostLevelParams = new TreeMap<String, String>();
-    }
-    hostLevelParams.put(JDK_LOCATION, amc.getJdkResourceUrl());
-    hostLevelParams.put(JAVA_HOME, amc.getJavaHome());
-    hostLevelParams.put(JDK_NAME, amc.getJDKName());
-    hostLevelParams.put(JCE_NAME, amc.getJCEName());
-    hostLevelParams.put(STACK_NAME, stackId.getStackName());
-    hostLevelParams.put(STACK_VERSION,stackId.getStackVersion());
-    hostLevelParams.putAll(amc.getRcaParameters());
-    execCmd.setHostLevelParams(hostLevelParams);
-
-    Map<String,String> commandParams = new TreeMap<String, String>();
-    commandParams.put(SCHEMA_VERSION, serviceInfo.getSchemaVersion());
-
-    String commandTimeout = COMMAND_TIMEOUT_DEFAULT;
-
-
-    if (serviceInfo.getSchemaVersion().equals(AmbariMetaInfo.SCHEMA_VERSION_2)) {
-      // Service check command is not custom command
-      CommandScriptDefinition script = serviceInfo.getCommandScript();
-      if (script != null) {
-        commandParams.put(SCRIPT, script.getScript());
-        commandParams.put(SCRIPT_TYPE, script.getScriptType().toString());
-        commandTimeout = String.valueOf(script.getTimeout());
-      } else {
-        String message = String.format("Service %s has no command script " +
-                "defined. It is not possible to run service check" +
-                " for this service", serviceName);
-        throw new AmbariException(message);
-      }
-      // We don't need package/repo infomation to perform service check
-    }
-    commandParams.put(COMMAND_TIMEOUT, commandTimeout);
-
-    commandParams.put(SERVICE_METADATA_FOLDER,
-            serviceInfo.getServiceMetadataFolder());
-
-    execCmd.setCommandParams(commandParams);
-
-    if (roleParameters != null) { // If defined
-      execCmd.setRoleParams(roleParameters);
-    }
-
-  }
-
-  private void addDecommissionDatanodeAction(ExecuteActionRequest decommissionRequest, Stage stage,
-                                             Map<String, String> hostLevelParams)
-      throws AmbariException {
-    String hdfsExcludeFileType = "hdfs-exclude-file";
-    // Find hdfs admin host, just decommission from namenode.
-    String clusterName = decommissionRequest.getClusterName();
-    Cluster cluster = clusters.getCluster(clusterName);
-    String serviceName = decommissionRequest.getServiceName();
-    String namenodeHost = clusters.getCluster(clusterName)
-        .getService(serviceName).getServiceComponent(Role.NAMENODE.toString())
-        .getServiceComponentHosts().keySet().iterator().next();
-
-    String excludeFileTag = null;
-    if (decommissionRequest.getParameters() != null
-        && (decommissionRequest.getParameters().get("excludeFileTag") != null)) {
-      excludeFileTag = decommissionRequest.getParameters()
-          .get("excludeFileTag");
-    }
-
-    if (excludeFileTag == null) {
-      throw new AmbariException("No exclude file specified"
-          + " when decommissioning datanodes. Provide parameter excludeFileTag with the tag for config type "
-          + hdfsExcludeFileType);
-    }
-
-    Config config = clusters.getCluster(clusterName).getConfig(
-        hdfsExcludeFileType, excludeFileTag);
-    if (config == null) {
-      throw new AmbariException("Decommissioning datanodes requires the cluster to be associated with config type " +
-          hdfsExcludeFileType + " with a list of datanodes to be decommissioned (\"datanodes\" : list).");
-    }
-
-    LOG.info("Decommissioning data nodes: " + config.getProperties().get("datanodes") +
-        " " + hdfsExcludeFileType + " tag: " + excludeFileTag);
-
-    Map<String, Map<String, String>> configurations =
-        new TreeMap<String, Map<String, String>>();
-
-
-    Map<String, Map<String, String>> configTags = amc.findConfigurationTagsWithOverrides(cluster, namenodeHost);
-
-    // Add the tag for hdfs-exclude-file
-    Map<String, String> excludeTags = new HashMap<String, String>();
-    excludeTags.put(ConfigHelper.CLUSTER_DEFAULT_TAG, config.getVersionTag());
-    configTags.put(hdfsExcludeFileType, excludeTags);
-
-    stage.addHostRoleExecutionCommand(
-        namenodeHost,
-        Role.DECOMMISSION_DATANODE,
-        RoleCommand.EXECUTE,
-        new ServiceComponentHostOpInProgressEvent(Role.DECOMMISSION_DATANODE
-            .toString(), namenodeHost, System.currentTimeMillis()),
-        clusterName, serviceName);
-
-    ExecutionCommand execCmd = stage.getExecutionCommandWrapper(namenodeHost,
-        Role.DECOMMISSION_DATANODE.toString()).getExecutionCommand();
-
-    execCmd.setConfigurations(configurations);
-    execCmd.setConfigurationTags(configTags);
-    /*
-     TODO: When migrating to custom services, datanode decommision
-     probably will be implemented as a custom action; that's why
-     we have no schema version 2 command parameters here
-    */
-    execCmd.setHostLevelParams(hostLevelParams);
-  }
-
-
-  /**
-   * Creates and populates an EXECUTION_COMMAND for host
-   */
-  @Override
-  public void createHostAction(Cluster cluster,
-                               Stage stage, ServiceComponentHost scHost,
-                               Map<String, Map<String, String>> configurations,
-                               Map<String, Map<String, String>> configTags,
-                               RoleCommand roleCommand,
-                               Map<String, String> commandParams,
-                               ServiceComponentHostEvent event)
-          throws AmbariException {
-
-    stage.addHostRoleExecutionCommand(scHost.getHostName(), Role.valueOf(scHost
-            .getServiceComponentName()), roleCommand,
-            event, scHost.getClusterName(),
-            scHost.getServiceName());
-    String serviceName = scHost.getServiceName();
-    String componentName = event.getServiceComponentName();
-    String hostname = scHost.getHostName();
-    String osType = clusters.getHost(hostname).getOsType();
-    StackId stackId = cluster.getDesiredStackVersion();
-    ServiceInfo serviceInfo = ambariMetaInfo.getServiceInfo(stackId.getStackName(),
-            stackId.getStackVersion(), serviceName);
-    ComponentInfo componentInfo = ambariMetaInfo.getComponent(
-            stackId.getStackName(), stackId.getStackVersion(),
-            serviceName, componentName);
-
-    ExecutionCommand execCmd = stage.getExecutionCommandWrapper(scHost.getHostName(),
-            scHost.getServiceComponentName()).getExecutionCommand();
-
-    Host host = clusters.getHost(scHost.getHostName());
-
-    // Hack - Remove passwords from configs
-    if (event.getServiceComponentName().equals(Role.HIVE_CLIENT.toString())) {
-      configHelper.applyCustomConfig(configurations, Configuration.HIVE_CONFIG_TAG,
-              Configuration.HIVE_METASTORE_PASSWORD_PROPERTY, "", true);
-    }
-
-    execCmd.setConfigurations(configurations);
-    execCmd.setConfigurationTags(configTags);
-    if (commandParams == null) { // if not defined
-      commandParams = new TreeMap<String, String>();
-    }
-    commandParams.put(SCHEMA_VERSION, serviceInfo.getSchemaVersion());
-
-
-    // Get command script info for custom command/custom action
-    /*
-     * TODO: Custom actions are not supported yet, that's why we just pass
-     * component main commandScript to agent. This script is only used for
-     * default commads like INSTALL/STOP/START/CONFIGURE
-     */
-    String commandTimeout = ExecutionCommand.KeyNames.COMMAND_TIMEOUT_DEFAULT;
-    CommandScriptDefinition script = componentInfo.getCommandScript();
-    if (serviceInfo.getSchemaVersion().equals(AmbariMetaInfo.SCHEMA_VERSION_2)) {
-      if (script != null) {
-        commandParams.put(SCRIPT, script.getScript());
-        commandParams.put(SCRIPT_TYPE, script.getScriptType().toString());
-        commandTimeout = String.valueOf(script.getTimeout());
-      } else {
-        String message = String.format("Component %s of service %s has not " +
-                "command script defined", componentName, serviceName);
-        throw new AmbariException(message);
-      }
-    }
-    commandParams.put(COMMAND_TIMEOUT, commandTimeout);
-    commandParams.put(SERVICE_METADATA_FOLDER,
-            serviceInfo.getServiceMetadataFolder());
-
-    execCmd.setCommandParams(commandParams);
-
-    Map<String, List<RepositoryInfo>> repos = ambariMetaInfo.getRepository(
-            stackId.getStackName(), stackId.getStackVersion());
-    String repoInfo = "";
-    if (!repos.containsKey(host.getOsType())) {
-      // FIXME should this be an error?
-      LOG.warn("Could not retrieve repo information for host"
-              + ", hostname=" + scHost.getHostName()
-              + ", clusterName=" + cluster.getClusterName()
-              + ", stackInfo=" + stackId.getStackId());
-    } else {
-      repoInfo = gson.toJson(repos.get(host.getOsType()));
-    }
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Sending repo information to agent"
-              + ", hostname=" + scHost.getHostName()
-              + ", clusterName=" + cluster.getClusterName()
-              + ", stackInfo=" + stackId.getStackId()
-              + ", repoInfo=" + repoInfo);
-    }
-
-    Map<String, String> hostParams = new TreeMap<String, String>();
-    // TODO: Move parameter population to org.apache.ambari.server.controller.AmbariManagementControllerImpl.createAction()
-    hostParams.put(REPO_INFO, repoInfo);
-    hostParams.put(JDK_LOCATION, amc.getJdkResourceUrl());
-    hostParams.put(JAVA_HOME, amc.getJavaHome());
-    hostParams.put(JDK_NAME, amc.getJDKName());
-    hostParams.put(JCE_NAME, amc.getJCEName());
-    hostParams.put(STACK_NAME, stackId.getStackName());
-    hostParams.put(STACK_VERSION, stackId.getStackVersion());
-    hostParams.put(DB_NAME, amc.getServerDB());
-    hostParams.put(MYSQL_JDBC_URL, amc.getMysqljdbcUrl());
-    hostParams.put(ORACLE_JDBC_URL, amc.getOjdbcUrl());
-    hostParams.putAll(amc.getRcaParameters());
-
-    // Write down os specific info for the service
-    ServiceOsSpecific anyOs = null;
-    if (serviceInfo.getOsSpecifics().containsKey(AmbariMetaInfo.ANY_OS)) {
-      anyOs = serviceInfo.getOsSpecifics().get(AmbariMetaInfo.ANY_OS);
-    }
-    ServiceOsSpecific hostOs = null;
-    if (serviceInfo.getOsSpecifics().containsKey(osType)) {
-      hostOs = serviceInfo.getOsSpecifics().get(osType);
-      // Choose repo that is relevant for host
-      ServiceOsSpecific.Repo serviceRepo= hostOs.getRepo();
-      if (serviceRepo != null) {
-        String serviceRepoInfo = gson.toJson(serviceRepo);
-        hostParams.put(SERVICE_REPO_INFO, serviceRepoInfo);
-      }
-    }
-    // Build package list that is relevant for host
-    List<ServiceOsSpecific.Package> packages =
-            new ArrayList<ServiceOsSpecific.Package>();
-    if (anyOs != null) {
-      packages.addAll(anyOs.getPackages());
-    }
-
-    if (hostOs != null) {
-      packages.addAll(hostOs.getPackages());
-    }
-    String packageList = gson.toJson(packages);
-    hostParams.put(PACKAGE_LIST, packageList);
-
-    if (configs.getServerDBName().equalsIgnoreCase(Configuration
-            .ORACLE_DB_NAME)) {
-      hostParams.put(DB_DRIVER_FILENAME, configs.getOjdbcJarName());
-    } else if (configs.getServerDBName().equalsIgnoreCase(Configuration
-            .MYSQL_DB_NAME)) {
-      hostParams.put(DB_DRIVER_FILENAME, configs.getMySQLJarName());
-    }
-    execCmd.setHostLevelParams(hostParams);
-
-    Map<String, String> roleParams = new TreeMap<String, String>();
-    execCmd.setRoleParams(roleParams);
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
index afff8e4..570e574 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
@@ -457,8 +457,7 @@ public class AmbariManagementControllerImpl implements
       boolean isClient = compInfo.isClient();
 
       ServiceComponentHost sch =
-          serviceComponentHostFactory.createNew(sc, request.getHostname(),
-              isClient);
+          serviceComponentHostFactory.createNew(sc, request.getHostname());
 
       if (request.getDesiredState() != null
           && !request.getDesiredState().isEmpty()) {
@@ -538,7 +537,10 @@ public class AmbariManagementControllerImpl implements
 
   private Stage createNewStage(Cluster cluster, long requestId, String requestContext, String clusterHostInfo) {
     String logDir = BASE_LOG_DIR + File.pathSeparator + requestId;
-    return stageFactory.createNew(requestId, logDir, cluster.getClusterName(), requestContext, clusterHostInfo);
+    Stage stage =
+        stageFactory.createNew(requestId, logDir, cluster.getClusterName(), requestContext, clusterHostInfo);
+    stage.setStageId(0);
+    return stage;
   }
 
 
@@ -691,6 +693,14 @@ public class AmbariManagementControllerImpl implements
                 && (desiredStateToCheck != sch.getDesiredState())) {
               continue;
             }
+            if (request.getAdminState() != null) {
+              String stringToMatch =
+                  sch.getComponentAdminState() == null ? "" : sch.getComponentAdminState().name();
+              if (!request.getAdminState().equals(stringToMatch)) {
+                continue;
+              }
+            }
+
             ServiceComponentHostResponse r = sch.convertToResponse();
             if (filterBasedConfigStaleness && r.isStaleConfig() != staleConfig) {
               continue;
@@ -714,6 +724,14 @@ public class AmbariManagementControllerImpl implements
               continue;
             }
 
+            if (request.getAdminState() != null) {
+              String stringToMatch =
+                  sch.getComponentAdminState() == null ? "" : sch.getComponentAdminState().name();
+              if (!request.getAdminState().equals(stringToMatch)) {
+                continue;
+              }
+            }
+
             ServiceComponentHostResponse r = sch.convertToResponse();
             if (filterBasedConfigStaleness && r.isStaleConfig() != staleConfig) {
               continue;
@@ -1082,15 +1100,12 @@ public class AmbariManagementControllerImpl implements
 
       // FIXME cannot work with a single stage
       // multiple stages may be needed for reconfigure
-      long stageId = 0;
       Map<String, Set<String>> clusterHostInfo = StageUtils.getClusterHostInfo(
-          clusters.getHostsForCluster(cluster.getClusterName()), cluster, hostsMap, injector.getInstance(Configuration.class));
-      
-      
+          clusters.getHostsForCluster(cluster.getClusterName()), cluster);
+
       String clusterHostInfoJson = StageUtils.getGson().toJson(clusterHostInfo);
       
       Stage stage = createNewStage(cluster, requestId, requestContext, clusterHostInfoJson);
-      stage.setStageId(stageId);
 
       //HACK
       String jobtrackerHost = getJobTrackerHost(cluster);
@@ -1263,9 +1278,9 @@ public class AmbariManagementControllerImpl implements
           continue;
         }
         Configuration configuration = injector.getInstance(Configuration.class);
-        customCommandExecutionHelper.addServiceCheckActionImpl(stage, clientHost,
-                smokeTestRole, nowTimestamp, serviceName,
-                null, null, hostsMap, null);
+        customCommandExecutionHelper.addServiceCheckAction(stage, clientHost,
+            smokeTestRole, nowTimestamp, serviceName,
+            null, null, null);
 
       }
 
@@ -1582,6 +1597,11 @@ public class AmbariManagementControllerImpl implements
           + ", cluster name, component name and host name should be"
           + " provided");
     }
+
+    if (request.getAdminState() != null) {
+      throw new IllegalArgumentException("Property adminState cannot be modified through update. Use service " +
+          "specific DECOMMISSION action to decommision/recommission components.");
+    }
   }
 
   private String findServiceName(Cluster cluster, String componentName) throws AmbariException {
@@ -2019,14 +2039,11 @@ public class AmbariManagementControllerImpl implements
     }
 
     Map<String, Set<String>> clusterHostInfo = StageUtils.getClusterHostInfo(
-        clusters.getHostsForCluster(cluster.getClusterName()), cluster, hostsMap,
-        configs);
+        clusters.getHostsForCluster(cluster.getClusterName()), cluster);
 
     String clusterHostInfoJson = StageUtils.getGson().toJson(clusterHostInfo);
     Stage stage = createNewStage(cluster, actionManager.getNextRequestId(), requestContext, clusterHostInfoJson);
 
-    stage.setStageId(0);
-
     Map<String, String> params = new TreeMap<String, String>();
     // TODO : Update parameter population to be done only here
     params.put(JDK_LOCATION, this.jdkResourceUrl);
@@ -2034,7 +2051,7 @@ public class AmbariManagementControllerImpl implements
     params.putAll(getRcaParameters());
 
     if (actionRequest.isCommand()) {
-      customCommandExecutionHelper.addAction(actionRequest, stage, hostsMap, params);
+      customCommandExecutionHelper.addAction(actionRequest, stage, params);
     } else {
       actionExecutionHelper.addAction(actionExecContext, stage, configs, hostsMap, params);
     }

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java
index b3776e7..b4d96ee 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java
@@ -104,7 +104,7 @@ public class ControllerModule extends AbstractModule {
     bind(SecureRandom.class).in(Scopes.SINGLETON);
 
     bind(Clusters.class).to(ClustersImpl.class);
-    bind(AmbariCustomCommandExecutionHelper.class).to(AmbariCustomCommandExecutionHelperImpl.class);
+    bind(AmbariCustomCommandExecutionHelper.class);
     bind(ActionDBAccessor.class).to(ActionDBAccessorImpl.class);
     bind(CustomActionDBAccessor.class).to(CustomActionDBAccessorImpl.class);
     bindConstant().annotatedWith(Names.named("schedulerSleeptime")).to(10000L);

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostRequest.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostRequest.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostRequest.java
index 13cbde9..6b673b0 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostRequest.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostRequest.java
@@ -35,6 +35,8 @@ public class ServiceComponentHostRequest {
   private String desiredStackId; // UPDATE
 
   private String staleConfig; // GET - predicate
+
+  private String adminState; // GET - predicate
   
   public ServiceComponentHostRequest(String clusterName,
                                      String serviceName,
@@ -147,6 +149,20 @@ public class ServiceComponentHostRequest {
     return this.staleConfig;
   }
 
+  /**
+   * @param adminState the adminState to use as predicate
+   */
+  public void setAdminState(String adminState) {
+    this.adminState = adminState;
+  }
+
+  /**
+   * @return the admin state of the component
+   */
+  public String getAdminState() {
+    return this.adminState;
+  }
+
   public String toString() {
     StringBuilder sb = new StringBuilder();
     sb.append("{"
@@ -156,6 +172,8 @@ public class ServiceComponentHostRequest {
         + ", hostname=" + hostname
         + ", desiredState=" + desiredState
         + ", desiredStackId=" + desiredStackId
+        + ", staleConfig=" + staleConfig
+        + ", adminState=" + adminState
         + "}");
     return sb.toString();
   }

http://git-wip-us.apache.org/repos/asf/ambari/blob/5d3677f7/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostResponse.java
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostResponse.java b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostResponse.java
index d746733..a776ac2 100644
--- a/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostResponse.java
+++ b/ambari-server/src/main/java/org/apache/ambari/server/controller/ServiceComponentHostResponse.java
@@ -18,7 +18,7 @@
 
 package org.apache.ambari.server.controller;
 
-import org.apache.ambari.server.state.DesiredConfig;
+import org.apache.ambari.server.state.HostComponentAdminState;
 import org.apache.ambari.server.state.HostConfig;
 
 import java.util.Map;
@@ -26,32 +26,24 @@ import java.util.Map;
 public class ServiceComponentHostResponse {
 
   private String clusterName; // REF
-
   private String serviceName;
-
   private String componentName;
-
   private String hostname;
-
   // type -> desired config
   private Map<String, HostConfig> actualConfigs;
-
   private String liveState;
-
   private String stackVersion;
-
   private String desiredStackVersion;
-
   private String desiredState;
-  
   private boolean staleConfig = false;
+  private String adminState = null;
 
 
   public ServiceComponentHostResponse(String clusterName, String serviceName,
                                       String componentName, String hostname,
-                                      String liveState,
-                                      String stackVersion,
-                                      String desiredState, String desiredStackVersion) {
+                                      String liveState, String stackVersion,
+                                      String desiredState, String desiredStackVersion,
+                                      HostComponentAdminState adminState) {
     this.clusterName = clusterName;
     this.serviceName = serviceName;
     this.componentName = componentName;
@@ -60,6 +52,9 @@ public class ServiceComponentHostResponse {
     this.stackVersion = stackVersion;
     this.desiredState = desiredState;
     this.desiredStackVersion = desiredStackVersion;
+    if (adminState != null) {
+      this.adminState = adminState.name();
+    }
   }
 
   /**
@@ -174,10 +169,28 @@ public class ServiceComponentHostResponse {
     this.clusterName = clusterName;
   }
 
+  /**
+   * @return the admin state of the host component
+   */
+  public String getAdminState() {
+    return adminState;
+  }
+
+  /**
+   * @param adminState of the host component
+   */
+  public void setAdminState(String adminState) {
+    this.adminState = adminState;
+  }
+
   @Override
   public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
+    if (this == o) {
+      return true;
+    }
+    if (o == null || getClass() != o.getClass()) {
+      return false;
+    }
 
     ServiceComponentHostResponse that =
         (ServiceComponentHostResponse) o;
@@ -191,7 +204,7 @@ public class ServiceComponentHostResponse {
       return false;
     }
     if (componentName != null ?
-        !componentName.equals(that.componentName) : that.componentName != null){
+        !componentName.equals(that.componentName) : that.componentName != null) {
       return false;
     }
     if (hostname != null ?
@@ -206,19 +219,12 @@ public class ServiceComponentHostResponse {
   public int hashCode() {
     int result = clusterName != null ? clusterName.hashCode() : 0;
     result = 71 * result + (serviceName != null ? serviceName.hashCode() : 0);
-    result = 71 * result + (componentName != null ? componentName.hashCode():0);
+    result = 71 * result + (componentName != null ? componentName.hashCode() : 0);
     result = 71 * result + (hostname != null ? hostname.hashCode() : 0);
     return result;
   }
 
   /**
-   * @param configs the actual configs
-   */
-  public void setActualConfigs(Map<String, HostConfig> configs) {
-    actualConfigs = configs;
-  }
-  
-  /**
    * @return the actual configs
    */
   public Map<String, HostConfig> getActualConfigs() {
@@ -226,12 +232,19 @@ public class ServiceComponentHostResponse {
   }
 
   /**
+   * @param configs the actual configs
+   */
+  public void setActualConfigs(Map<String, HostConfig> configs) {
+    actualConfigs = configs;
+  }
+
+  /**
    * @return if the configs are stale
    */
   public boolean isStaleConfig() {
     return staleConfig;
   }
-  
+
   /**
    * @param stale
    */