You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ds...@apache.org on 2014/05/14 16:02:21 UTC

[13/15] AMBARI-5747 Remove facter-1.6.10, Ruby, Puppet dependencies from pom.xml and src (dsen)

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/files/task-log4j.properties
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/files/task-log4j.properties b/ambari-agent/src/main/puppet/modules/hdp-hadoop/files/task-log4j.properties
deleted file mode 100644
index c8939fc..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/files/task-log4j.properties
+++ /dev/null
@@ -1,132 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-
-# Define some default values that can be overridden by system properties
-hadoop.root.logger=INFO,console
-hadoop.log.dir=.
-hadoop.log.file=hadoop.log
-
-#
-# Job Summary Appender 
-#
-# Use following logger to send summary to separate file defined by 
-# hadoop.mapreduce.jobsummary.log.file rolled daily:
-# hadoop.mapreduce.jobsummary.logger=INFO,JSA
-# 
-hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
-hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hadoop.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshhold=ALL
-
-#
-# Daily Rolling File Appender
-#
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this 
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-
-#
-# TaskLog Appender
-#
-
-#Default values
-hadoop.tasklog.taskid=null
-hadoop.tasklog.iscleanup=false
-hadoop.tasklog.noKeepSplits=4
-hadoop.tasklog.totalLogFileSize=100
-hadoop.tasklog.purgeLogSplits=true
-hadoop.tasklog.logsRetainHours=12
-
-log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
-log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
-log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
-log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
-
-log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
-log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-
-#
-# Rolling File Appender
-#
-
-#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Logfile size and and 30-day backups
-#log4j.appender.RFA.MaxFileSize=1MB
-#log4j.appender.RFA.MaxBackupIndex=30
-
-#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-# Custom Logging levels
-
-hadoop.metrics.log.level=INFO
-#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
-#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
-#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
-log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
-
-# Jets3t library
-log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
-
-#
-# Null Appender
-# Trap security logger on the hadoop client side
-#
-log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_mode.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_mode.rb b/ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_mode.rb
deleted file mode 100644
index 828b593..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_mode.rb
+++ /dev/null
@@ -1,65 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#to handle differences in how args passed in
-module Puppet::Parser::Functions
-  newfunction(:hdp_hadoop_get_mode, :type => :rvalue) do |args|
-  
-    dir = args[0]
-
-    oozie_dir = lookupvar("::hdp::params::oozie_hdfs_user_dir")
-    oozie_dir_mode = lookupvar("::hdp::params::oozie_hdfs_user_mode") 
-    
-    hcat_dir = lookupvar("::hdp::params::hcat_hdfs_user_dir")
-    hcat_dir_mode = lookupvar("::hdp::params::hcat_hdfs_user_mode") 
-    
-    webhcat_dir = lookupvar("::hdp::params::webhcat_hdfs_user_dir")
-    webhcat_dir_mode = lookupvar("::hdp::params::webhcat_hdfs_user_mode") 
-    
-    hive_dir = lookupvar("::hdp::params::hive_hdfs_user_dir")
-    hive_dir_mode = lookupvar("::hdp::params::hive_hdfs_user_mode") 
-    
-    smoke_dir = lookupvar("::hdp::params::smoke_hdfs_user_dir")
-    smoke_dir_mode = lookupvar("::hdp::params::smoke_hdfs_user_mode") 
-    
-    modes = []
-    modes.push({:dir => oozie_dir, :mode => oozie_dir_mode})
-    modes.push({:dir => hcat_dir, :mode => hcat_dir_mode})
-    modes.push({:dir => webhcat_dir, :mode => webhcat_dir_mode})
-    modes.push({:dir => hive_dir, :mode => hive_dir_mode})
-    modes.push({:dir => smoke_dir, :mode => smoke_dir_mode})
-
-    modes_grouped = {}
-    modes.each do |item|
-      if modes_grouped[item[:dir]].nil?
-        modes_grouped[item[:dir]]=[]
-      end
-      modes_grouped[item[:dir]]=modes_grouped[item[:dir]] + [(item[:mode])]
-    end
-
-    modes_max = {}
-    
-    modes_grouped.each_key do |key|
-      modes_max[key] = modes_grouped[key].max
-    end
-
-    modes_max[dir]
-  end
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_namenode_id.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_namenode_id.rb b/ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_namenode_id.rb
deleted file mode 100644
index 719d1e6..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_namenode_id.rb
+++ /dev/null
@@ -1,47 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#to get namenode service id in HA setup
-
-module Puppet::Parser::Functions
-  newfunction(:hdp_hadoop_get_namenode_id, :type => :rvalue) do |args|
-    namenode_id = ""
-    if args.length > 1
-      # Get hdfs-site to lookup hostname properties
-      lookup_property = args[0]
-      siteName = args[1]
-      siteConfig = lookupvar("#{siteName}")
-      nn_ids_str = lookupvar("::hdp::params::dfs_ha_namenode_ids")
-      hostname = lookupvar("::hdp::params::hostname")
-      nn_ids = nn_ids_str.to_s.split(',')
-
-      if nn_ids.length > 1
-        nn_ids.each do |id|
-          lookup_key = lookup_property + "." + id.to_s.strip
-          property_val = siteConfig.fetch(lookup_key, "")
-          if property_val != "" and property_val.include? hostname
-            namenode_id = id
-          end
-        end
-      end
-    end
-    namenode_id.strip
-  end
-end
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_owner.rb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_owner.rb b/ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_owner.rb
deleted file mode 100644
index 9ae36ef..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/lib/puppet/parser/functions/hdp_hadoop_get_owner.rb
+++ /dev/null
@@ -1,51 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#to handle differences in how args passed in
-module Puppet::Parser::Functions
-  newfunction(:hdp_hadoop_get_owner, :type => :rvalue) do |args|
-  
-    dir = args[0]
-    
-    oozie_dir = lookupvar("::hdp::params::oozie_hdfs_user_dir")
-    oozie_user = lookupvar("::hdp::params::oozie_user") 
-
-    hcat_dir = lookupvar("::hdp::params::hcat_hdfs_user_dir")
-    hcat_user = lookupvar("::hdp::params::hcat_user") 
-
-    webhcat_dir = lookupvar("::hdp::params::webhcat_hdfs_user_dir")
-    webhcat_user = lookupvar("::hdp::params::webhcat_user") 
-
-    hive_dir = lookupvar("::hdp::params::hive_hdfs_user_dir")
-    hive_user = lookupvar("::hdp::params::hive_user") 
-
-    smoke_dir = lookupvar("::hdp::params::smoke_hdfs_user_dir")
-    smoke_user = lookupvar("::hdp::params::smokeuser") 
-
-    dirs_to_owners = {}
-    dirs_to_owners[oozie_dir] = oozie_user
-    dirs_to_owners[hcat_dir] = hcat_user
-    dirs_to_owners[webhcat_dir] = webhcat_user
-    dirs_to_owners[hive_dir] = hive_user
-    dirs_to_owners[smoke_dir] = smoke_user
-
-    dirs_to_owners[dir]
-  end
-end

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/client.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/client.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/client.pp
deleted file mode 100644
index 97629a8..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/client.pp
+++ /dev/null
@@ -1,56 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::client(
-  $service_state = $hdp::params::cluster_client_state
-) inherits hdp-hadoop::params
-{
-  $hdp::params::service_exists['hdp-hadoop::client'] = true
-
-  Hdp-hadoop::Common<||>{service_state => $service_state}
-
-  if ($hdp::params::use_32_bits_on_slaves == true) {
-    Hdp-hadoop::Package<||>{include_32_bit => true}
-    Hdp-hadoop::Configfile<||>{sizes +> 32}
-  } else {
-    Hdp-hadoop::Package<||>{include_64_bit => true}
-    Hdp-hadoop::Configfile<||>{sizes +> 64}
-  }
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['installed_and_configured','uninstalled']) {
-    #adds package, users and directories, and common hadoop configs
-    include hdp-hadoop::initialize
-
-    if ( ($service_state == 'installed_and_configured') and
-         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
-      $masterHost = $kerberos_adminclient_host[0]
-      hdp::download_keytab { 'hadoop_client_ambari_qa_keytab' :
-        masterhost => $masterHost,
-        keytabdst => "${$keytab_path}/${smokeuser}.headless.keytab",
-        keytabfile => "${smokeuser}.headless.keytab",
-        owner => $smokeuser,
-        hostnameInPrincipals => 'no'
-      }
-    }
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/datanode.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/datanode.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/datanode.pp
deleted file mode 100644
index 04a0d8e..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/datanode.pp
+++ /dev/null
@@ -1,100 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::datanode(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-hadoop::params 
-{
-
-  $hdp::params::service_exists['hdp-hadoop::datanode'] = true
-
-  Hdp-hadoop::Common<||>{service_state => $service_state}
-
-  if ($hdp::params::use_32_bits_on_slaves == true) {
-    Hdp-hadoop::Package<||>{include_32_bit => true}
-    Hdp-hadoop::Configfile<||>{sizes +> 32}
-  } else {
-    Hdp-hadoop::Package<||>{include_64_bit => true}
-    Hdp-hadoop::Configfile<||>{sizes +> 64}
-  }
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) { 
-    $dfs_data_dir = $hdp-hadoop::params::dfs_data_dir
-  
-    if (($hdp::params::service_exists['hdp-hadoop::namenode'] == true) or ($hdp::params::service_exists['hdp-hadoop::snamenode'] == true)){
-      $a_namenode_on_node = true
-    } else {
-      $a_namenode_on_node = false
-    }
-
-    #adds package, users and directories, and common hadoop configs
-    include hdp-hadoop::initialize
-
-    if ( ($service_state == 'installed_and_configured') and
-         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
-      $masterHost = $kerberos_adminclient_host[0]
-      hdp::download_keytab { 'datanode_service_keytab' :
-        masterhost => $masterHost,
-        keytabdst => "${$keytab_path}/dn.service.keytab",
-        keytabfile => 'dn.service.keytab',
-        owner => $hdp-hadoop::params::hdfs_user
-      }
-    }
-
-  
-    hdp-hadoop::datanode::create_data_dirs { $dfs_data_dir: 
-      service_state => $service_state
-    }
-
-    if ($a_namenode_on_node == true){
-      $create_pid_dir = false
-      $create_log_dir = false
-    } else {
-      $create_pid_dir = true
-      $create_log_dir = true
-    }
-    
-    hdp-hadoop::service{ 'datanode':
-      ensure         => $service_state,
-      user           => $hdp-hadoop::params::hdfs_user,
-      create_pid_dir => $create_pid_dir,
-      create_log_dir => $create_log_dir
-    }
-    
-    #top level does not need anchors
-    Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Datanode::Create_data_dirs<||> -> Hdp-hadoop::Service['datanode'] -> Anchor['hdp-hadoop::end'] 
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
-
-define hdp-hadoop::datanode::create_data_dirs($service_state)
-{
-  $dirs = hdp_array_from_comma_list($name)
-  hdp::directory_recursive_create_ignore_failure { $dirs :
-    owner => $hdp-hadoop::params::hdfs_user,
-    mode => '0750',
-    service_state => $service_state,
-    force => true
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/glusterfs.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/glusterfs.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/glusterfs.pp
deleted file mode 100644
index 9d044ff..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/glusterfs.pp
+++ /dev/null
@@ -1,36 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::glusterfs_client(
-  $service_state = $hdp::params::cluster_client_state,
-  $opts = {}
-) inherits hdp-hadoop::params
-{
-  $hdp::params::service_exists['hdp-hadoop::glusterfs_client'] = true
-  Hdp-hadoop::Common<||>{service_state => $service_state}
-  Hdp-hadoop::Package<||>{include_64_bit => true}
-  Hdp-hadoop::Configfile<||>{sizes +> 64}
-  
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) {
-  	#adds package, users and directories, and common hadoop configs
-  	include hdp-hadoop::initialize
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/glusterfs_service_check.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/glusterfs_service_check.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/glusterfs_service_check.pp
deleted file mode 100644
index 87c03c8..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/glusterfs_service_check.pp
+++ /dev/null
@@ -1,26 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::glusterfs_service_check(
-  $service_state = $hdp::params::cluster_client_state
-) inherits hdp-hadoop::params
-{
-  $hdp::params::service_exists['hdp-hadoop::glusterfs'] = true
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/copyfromlocal.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/copyfromlocal.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/copyfromlocal.pp
deleted file mode 100644
index d8099df..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/copyfromlocal.pp
+++ /dev/null
@@ -1,84 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-define hdp-hadoop::hdfs::copyfromlocal(
-  $service_state,
-  $owner = unset,
-  $group = unset,
-  $recursive_chown = false,
-  $mode = undef,
-  $recursive_chmod = false,
-  $dest_dir = undef,
-  $kinit_if_needed = undef
-) 
-{
- 
-  if ($service_state == 'running') {
-    $copy_cmd = "fs -copyFromLocal ${name} ${dest_dir}"
-    if ($kinit_if_needed == undef) {
-      $unless_cmd = "hadoop fs -ls ${dest_dir} >/dev/null 2>&1"
-    } else {
-      $unless_cmd = "${kinit_if_needed} hadoop fs -ls ${dest_dir} >/dev/null 2>&1"
-    }
-    ## exec-hadoop does a kinit based on user, but unless does not
-    hdp-hadoop::exec-hadoop { $copy_cmd:
-      command => $copy_cmd,
-      unless => $unless_cmd,
-      user => $owner
-    }
-    if ($owner == unset) {
-      $chown = ""
-    } else {
-      if ($group == unset) {
-        $chown = $owner
-      } else {
-        $chown = "${owner}:${group}"
-     } 
-    }  
- 
-    if (chown != "") {
-      #TODO: see if there is a good 'unless test'
-      if ($recursive_chown == true) {
-        $chown_cmd = "fs -chown -R ${chown} ${dest_dir}"
-      } else {
-        $chown_cmd = "fs -chown ${chown} ${dest_dir}"
-      }
-      hdp-hadoop::exec-hadoop {$chown_cmd :
-        command => $chown_cmd,
-        user => $owner
-      }
-      Hdp-hadoop::Exec-hadoop[$copy_cmd] -> Hdp-hadoop::Exec-hadoop[$chown_cmd]
-    }
-  
-    if ($mode != undef) {
-      #TODO: see if there is a good 'unless test'
-      if ($recursive_mode == true) {
-        $chmod_cmd = "fs -chmod -R ${mode} ${dest_dir}"
-      } else {
-        $chmod_cmd = "fs -chmod ${mode} ${dest_dir}"
-      }
-      hdp-hadoop::exec-hadoop {$chmod_cmd :
-        command => $chmod_cmd,
-        user => $owner
-      }
-      Hdp-hadoop::Exec-hadoop[$copy_cmd] -> Hdp-hadoop::Exec-hadoop[$chmod_cmd]
-    }
-  }       
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/decommission.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/decommission.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/decommission.pp
deleted file mode 100644
index 68ef792..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/decommission.pp
+++ /dev/null
@@ -1,49 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::hdfs::decommission(
-) inherits hdp-hadoop::params
-{
-  if hdp_is_empty($configuration[hdfs-site]['dfs.hosts.exclude']) {
-    hdp_fail("There is no path to exclude file in configuration!")
-  }
-
-  $kinit_path = $hdp::params::kinit_path_local
-  $keytab_path = $hdp::params::hdfs_user_keytab
-  $hdfs_user = $hdp::params::hdfs_user
-  $kinit_cmd = "su - ${hdfs_user} -c '${kinit_path} -kt ${keytab_path} ${hdfs_user}'"
-
-  if ($hdp::params::security_enabled == true) {
-    exec { 'kinit_before_decommission' :
-      command => $kinit_cmd,
-      path => ['/bin'],
-      before => Hdp-Hadoop::Hdfs::Generate_Exclude_File['exclude_file']
-    }
-  }
-
-  hdp-hadoop::hdfs::generate_exclude_file{'exclude_file':}
-
-  hdp::exec{"hadoop dfsadmin -refreshNodes":
-      command => "hadoop dfsadmin -refreshNodes",
-      user => $hdp::params::hdfs_user,
-      require => Hdp-Hadoop::Hdfs::Generate_Exclude_File['exclude_file']
-    }
-  
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/directory.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/directory.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/directory.pp
deleted file mode 100644
index f0852ae..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/directory.pp
+++ /dev/null
@@ -1,121 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#TODO: unset should br changed to undef; just to be consistent
-define hdp-hadoop::hdfs::directory(
-  $service_state = 'running',
-  $owner = unset,
-  $group = unset,
-  $recursive_chown = false,
-  $mode = undef,
-  $recursive_chmod = false
-) 
-{
-  $dir_exists = "hadoop fs -ls ${name} >/dev/null 2>&1"
-  $namenode_safe_mode_off = "hadoop dfsadmin -safemode get|grep 'Safe mode is OFF'"
-
-  # Short circuit the expensive dfs client checks if directory was already created
-  $stub_dir = $hdp-hadoop::params::namenode_dirs_created_stub_dir
-  $stub_filename = $hdp-hadoop::params::namenode_dirs_stub_filename
-  $dir_absent_in_stub = "grep -q '^${name}$' ${stub_dir}/${stub_filename} > /dev/null 2>&1; test $? -ne 0"
-  $record_dir_in_stub = "echo '${name}' >> ${stub_dir}/${stub_filename}"
-  $tries = 30
-  $try_sleep = 10
-
-  if ($hdp::params::dfs_ha_enabled == true) {
-     $namenode_id = $hdp-hadoop::params::namenode_id
-     if (hdp_is_empty($namenode_id) == false) {
-       $dfs_check_nn_status_cmd = "hdfs haadmin -getServiceState $namenode_id | grep active > /dev/null"
-     }
-   } else {
-     $dfs_check_nn_status_cmd = "true"
-   }
-
-  if ($service_state == 'running') {
-    if (hdp_get_major_stack_version($hdp::params::stack_version) >= 2) {
-      $mkdir_cmd = "fs -mkdir -p ${name}"
-    } else {
-      $mkdir_cmd = "fs -mkdir ${name}"
-    }
-
-    hdp-hadoop::exec-hadoop { $mkdir_cmd:
-      command   => $mkdir_cmd,
-      unless    => "$dir_absent_in_stub && $dfs_check_nn_status_cmd && $dir_exists && ! $namenode_safe_mode_off",
-      onlyif    => "$dir_absent_in_stub && $dfs_check_nn_status_cmd && ! $dir_exists",
-      try_sleep => $try_sleep,
-      tries     => $tries
-    }
-
-    hdp::exec { $record_dir_in_stub:
-      command => $record_dir_in_stub,
-      user => $hdp-hadoop::params::hdfs_user,
-      onlyif => $dir_absent_in_stub
-    }
-
-    Hdp-hadoop::Exec-hadoop[$mkdir_cmd] ->
-    Hdp::Exec[$record_dir_in_stub]
-
-    if ($owner == unset) {
-      $chown = ""
-    } else {
-      if ($group == unset) {
-        $chown = $owner
-      } else {
-        $chown = "${owner}:${group}"
-     } 
-    }  
- 
-    if (chown != "") {
-      #TODO: see if there is a good 'unless test'
-      if ($recursive_chown == true) {
-        $chown_cmd = "fs -chown -R ${chown} ${name}"
-      } else {
-        $chown_cmd = "fs -chown ${chown} ${name}"
-      }
-      hdp-hadoop::exec-hadoop {$chown_cmd :
-        command   => $chown_cmd,
-        onlyif    => "$dir_absent_in_stub && $dfs_check_nn_status_cmd && $namenode_safe_mode_off && $dir_exists",
-        try_sleep => $try_sleep,
-        tries     => $tries
-      }
-      Hdp-hadoop::Exec-hadoop[$mkdir_cmd] ->
-      Hdp-hadoop::Exec-hadoop[$chown_cmd] ->
-      Hdp::Exec[$record_dir_in_stub]
-    }
-  
-    if ($mode != undef) {
-      #TODO: see if there is a good 'unless test'
-      if ($recursive_chmod == true) {
-        $chmod_cmd = "fs -chmod -R ${mode} ${name}"
-      } else {
-        $chmod_cmd = "fs -chmod ${mode} ${name}"
-      }
-      hdp-hadoop::exec-hadoop { $chmod_cmd :
-        command   => $chmod_cmd,
-        onlyif    => "$dir_absent_in_stub && $dfs_check_nn_status_cmd && $namenode_safe_mode_off && $dir_exists",
-        try_sleep => $try_sleep,
-        tries     => $tries
-      }
-      Hdp-hadoop::Exec-hadoop[$mkdir_cmd] ->
-      Hdp-hadoop::Exec-hadoop[$chmod_cmd] ->
-      Hdp::Exec[$record_dir_in_stub]
-    }
-  }       
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/generate_exclude_file.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/generate_exclude_file.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/generate_exclude_file.pp
deleted file mode 100644
index 5053e73..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/generate_exclude_file.pp
+++ /dev/null
@@ -1,42 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-define hdp-hadoop::hdfs::generate_exclude_file()
-{
-  $exlude_file_path = $configuration['hdfs-site']['dfs.hosts.exclude']
-  ## Generate exclude file if exists value of $configuration['hdfs-exclude-file']['datanodes']
-  ## or value for $configuration['hdfs-exclude-file']['datanodes'] is empty
-  if (hdp_is_empty($configuration) == false and
-    hdp_is_empty($configuration['hdfs-exclude-file']) == false) and
-    (hdp_is_empty($configuration['hdfs-exclude-file']['datanodes']) == false)
-    or has_key($configuration['hdfs-exclude-file'], 'datanodes') {
-    ##Create file with list of excluding hosts
-    $exlude_hosts_list = hdp_array_from_comma_list($configuration['hdfs-exclude-file']['datanodes'])
-    file { $exlude_file_path :
-      ensure => file,
-      content => template('hdp-hadoop/exclude_hosts_list.erb')
-    }
-  }
-}
-
-
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/service_check.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/service_check.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/service_check.pp
deleted file mode 100644
index 37d0eea..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/hdfs/service_check.pp
+++ /dev/null
@@ -1,170 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::hdfs::service_check()
-{
-  $unique = hdp_unique_id_and_date()
-  $dir = '/tmp'
-  $tmp_file = "${dir}/${unique}"
-
-  $safemode_command = "dfsadmin -safemode get | grep OFF"
-
-  $create_dir_cmd = "fs -mkdir ${dir} ; hadoop fs -chmod -R 777 ${dir}"
-  $test_dir_exists = "hadoop fs -test -e ${dir}" #TODO: may fix up fact that test needs explicit hadoop while command does not
-  $cleanup_cmd = "fs -rm ${tmp_file}"
-  #cleanup put below to handle retries; if retrying there wil be a stale file that needs cleanup; exit code is fn of second command
-  $create_file_cmd = "${cleanup_cmd}; hadoop fs -put /etc/passwd ${tmp_file}" #TODO: inconsistent that second comamnd needs hadoop
-  $test_cmd = "fs -test -e ${tmp_file}"
-
-  anchor { 'hdp-hadoop::hdfs::service_check::begin':}
-
-  hdp-hadoop::exec-hadoop { 'hdfs::service_check::check_safemode':
-    command   => $safemode_command,
-    tries     => 20,
-    try_sleep => 15,
-    logoutput => true,
-    user      => $hdp::params::smokeuser,
-    require   => Anchor['hdp-hadoop::hdfs::service_check::begin']
-  }
-
-  hdp-hadoop::exec-hadoop { 'hdfs::service_check::create_dir':
-    command   => $create_dir_cmd,
-    unless    => $test_dir_exists,
-    tries     => 3,
-    try_sleep => 5,
-    user      => $hdp::params::smokeuser,
-    require   => Hdp-hadoop::Exec-hadoop['hdfs::service_check::check_safemode']
-  }
-
-  hdp-hadoop::exec-hadoop { 'hdfs::service_check::create_file':
-    command   => $create_file_cmd,
-    tries     => 3,
-    try_sleep => 5,
-    user      => $hdp::params::smokeuser,
-    require   => Hdp-hadoop::Exec-hadoop['hdfs::service_check::create_dir'],
-    notify    => Hdp-hadoop::Exec-hadoop['hdfs::service_check::test']
-  }
-
-
-   #TODO: put in after testing
- #  hdp-hadoop::exec-hadoop { 'hdfs::service_check::cleanup':
- #   command     => $cleanup_cmd,
- #   refreshonly => true,
- #   require     => Hdp-hadoop::Exec-hadoop['hdfs::service_check::test'],
- #   before      => Anchor['hdp-hadoop::hdfs::service_check::end']
-  #}
-
-  hdp-hadoop::exec-hadoop { 'hdfs::service_check::test':
-    command     => $test_cmd,
-    refreshonly => true,
-    user      => $hdp::params::smokeuser,
-    require     => Hdp-hadoop::Exec-hadoop['hdfs::service_check::create_file'],
-    before      => Anchor['hdp-hadoop::hdfs::service_check::journalnode_check:begin']
-  }
-
-  anchor { 'hdp-hadoop::hdfs::service_check::journalnode_check:begin':}
-
-  if hdp_is_empty($hdp::params::journalnode_hosts) {
-    ##No journalnode hosts
-    Anchor['hdp-hadoop::hdfs::service_check::journalnode_check:begin'] ->
-      Anchor['hdp-hadoop::hdfs::service_check::journalnode_check:end']
-
-  } else {
-    ## Cluster has journalnode hosts, run test of journalnodes
-    $journalnode_hosts_comma_sep = hdp_comma_list_from_array($hdp::params::journalnode_hosts)
-    class { 'hdp-hadoop::journalnode::service_check':
-      journalnode_hosts => $journalnode_hosts_comma_sep,
-      require          => Anchor['hdp-hadoop::hdfs::service_check::journalnode_check:begin'],
-      before           => Anchor['hdp-hadoop::hdfs::service_check::journalnode_check:end']
-    }
-  }
-
-  anchor { 'hdp-hadoop::hdfs::service_check::journalnode_check:end':} ->
-    anchor { 'hdp-hadoop::hdfs::service_check::zkfc_check:begin':}
-
-  if hdp_is_empty($hdp::params::zkfc_hosts) {
-    ## No zkfc hosts
-    Anchor['hdp-hadoop::hdfs::service_check::zkfc_check:begin'] ->
-      Anchor['hdp-hadoop::hdfs::service_check::zkfc_check:end']
-  } else {
-    ## Cluster has zkfc hosts, run test of local zkfc daemon if current host
-    ## is namenode. If namenode has not ZKFC installed, it is also considered
-    ## as a misconfiguration.
-    if ($hdp::params::is_namenode_master) {
-      class { 'hdp-hadoop::zkfc::service_check':
-        require          => Anchor['hdp-hadoop::hdfs::service_check::zkfc_check:begin'],
-        before           => Anchor['hdp-hadoop::hdfs::service_check::zkfc_check:end']
-      }
-    }
-  }
-
-  anchor { 'hdp-hadoop::hdfs::service_check::zkfc_check:end':} ->
-    anchor{ 'hdp-hadoop::hdfs::service_check::end':}
-
-}
-
-class hdp-hadoop::journalnode::service_check($journalnode_hosts)
-{
-  $journalnode_port = $hdp::params::journalnode_port
-  $smoke_test_user = $hdp::params::smokeuser
-  
-  $checkWebUIFileName = "checkWebUI.py"
-  $checkWebUIFilePath = "/tmp/$checkWebUIFileName"
-
-  $checkWebUICmd = "su - ${smoke_test_user} -c 'python $checkWebUIFilePath -m $journalnode_hosts -p $journalnode_port'"
-
-  file { $checkWebUIFilePath:
-    ensure => present,
-    source => "puppet:///modules/hdp-hadoop/$checkWebUIFileName",
-    mode => '0755'
-  }
-
-  exec { $checkWebUIFilePath:
-    command   => $checkWebUICmd,
-    tries     => 3,
-    try_sleep => 5,
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    logoutput => "true"
-}
-  anchor{"hdp-hadoop::smoketest::begin":} -> File[$checkWebUIFilePath] -> Exec[$checkWebUIFilePath] -> anchor{"hdp-hadoop::smoketest::end":}
-}
-
-class hdp-hadoop::zkfc::service_check() inherits hdp-hadoop::params
-{
-  $hdfs_user = $hdp::params::hdfs_user
-  $pid_dir = "${hdp-hadoop::params::hadoop_pid_dir_prefix}/${hdfs_user}"
-  $pid_file = "${pid_dir}/hadoop-${hdfs_user}-zkfc.pid"
-
-  # Here we check if pid file exists and if yes, then we run 'ps pid' command
-  # that returns 1 if process is not running
-  $check_zkfc_process_cmd = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
-
-  exec { $check_zkfc_process_cmd:
-    command   => $check_zkfc_process_cmd,
-    tries     => 3,
-    try_sleep => 5,
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    logoutput => "true"
-  }
-
-  anchor{"hdp-hadoop::zkfc::service_check::begin":} -> Exec[$check_zkfc_process_cmd] ->
-    anchor{"hdp-hadoop::zkfc::service_check::end":}
-
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/init.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/init.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/init.pp
deleted file mode 100644
index 8389bd2..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/init.pp
+++ /dev/null
@@ -1,547 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#singleton for use with <||> form so that namenode, datanode, etc can pass state to hdp-hadoop and still use include
-define hdp-hadoop::common(
-  $service_state
-)
-{
-  class { 'hdp-hadoop':
-    service_state => $service_state
-  }
-  anchor{'hdp-hadoop::common::begin':} -> Class['hdp-hadoop'] -> anchor{'hdp-hadoop::common::end':} 
-}
-
-class hdp-hadoop::initialize()
-{
-  if ($hdp::params::component_exists['hdp-hadoop'] == true) {
-  } else {
-    $hdp::params::component_exists['hdp-hadoop'] = true
-  }
-  hdp-hadoop::common { 'common':}
-  anchor{'hdp-hadoop::initialize::begin':} -> Hdp-hadoop::Common['common'] -> anchor{'hdp-hadoop::initialize::end':}
-
-  # Configs generation
-  debug('##Configs generation for hdp-hadoop')
-
-  if has_key($configuration, 'mapred-queue-acls') {
-    configgenerator::configfile{'mapred-queue-acls': 
-      modulespath => $hdp-hadoop::params::conf_dir,
-      filename => 'mapred-queue-acls.xml',
-      module => 'hdp-hadoop',
-      configuration => $configuration['mapred-queue-acls'],
-      owner => $hdp-hadoop::params::mapred_user,
-      group => $hdp::params::user_group
-    }
-  } else { # Manually overriding ownership of file installed by hadoop package
-    file { "${hdp-hadoop::params::conf_dir}/mapred-queue-acls.xml":
-      owner => $hdp-hadoop::params::mapred_user,
-      group => $hdp::params::user_group
-    }
-  }
-  
-  if has_key($configuration, 'hadoop-policy') {
-    configgenerator::configfile{'hadoop-policy': 
-      modulespath => $hdp-hadoop::params::conf_dir,
-      filename => 'hadoop-policy.xml',
-      module => 'hdp-hadoop',
-      configuration => $configuration['hadoop-policy'],
-      owner => $hdp-hadoop::params::hdfs_user,
-      group => $hdp::params::user_group
-    }
-  } else { # Manually overriding ownership of file installed by hadoop package
-    file { "${hdp-hadoop::params::conf_dir}/hadoop-policy.xml":
-      owner => $hdp-hadoop::params::hdfs_user,
-      group => $hdp::params::user_group
-    }
-  }
-
-  if has_key($configuration, 'core-site') {
-      configgenerator::configfile{'core-site': 
-        modulespath => $hdp-hadoop::params::conf_dir,
-        filename => 'core-site.xml',
-        module => 'hdp-hadoop',
-        configuration => $configuration['core-site'],
-        owner => $hdp-hadoop::params::hdfs_user,
-        group => $hdp::params::user_group
-      }
-  } else { # Manually overriding ownership of file installed by hadoop package
-    file { "${hdp-hadoop::params::conf_dir}/core-site.xml":
-      owner => $hdp-hadoop::params::hdfs_user,
-      group => $hdp::params::user_group
-    }
-  }
-
-  if has_key($configuration, 'mapred-site') {
-    configgenerator::configfile{'mapred-site': 
-      modulespath => $hdp-hadoop::params::conf_dir,
-      filename => 'mapred-site.xml',
-      module => 'hdp-hadoop',
-      configuration => $configuration['mapred-site'],
-      owner => $hdp-hadoop::params::mapred_user,
-      group => $hdp::params::user_group
-    }
-  } else { # Manually overriding ownership of file installed by hadoop package
-    file { "${hdp-hadoop::params::conf_dir}/mapred-site.xml":
-      owner => $hdp-hadoop::params::mapred_user,
-      group => $hdp::params::user_group
-    }
-  }
-
-  $task_log4j_properties_location = "${hdp-hadoop::params::conf_dir}/task-log4j.properties"
-  
-  file { $task_log4j_properties_location:
-    owner   => $hdp-hadoop::params::mapred_user,
-    group   => $hdp::params::user_group,
-    mode    => 644,
-    ensure  => present,
-    source  => "puppet:///modules/hdp-hadoop/task-log4j.properties",
-    replace => false
-  }
-
-  if has_key($configuration, 'capacity-scheduler') {
-    configgenerator::configfile{'capacity-scheduler':
-      modulespath => $hdp-hadoop::params::conf_dir,
-      filename => 'capacity-scheduler.xml',
-      module => 'hdp-hadoop',
-      configuration => $configuration['capacity-scheduler'],
-      owner => $hdp-hadoop::params::hdfs_user,
-      group => $hdp::params::user_group,
-    }
-  } else { # Manually overriding ownership of file installed by hadoop package
-    file { "${hdp-hadoop::params::conf_dir}/capacity-scheduler.xml":
-      owner => $hdp-hadoop::params::hdfs_user,
-      group => $hdp::params::user_group
-    }
-  } 
-
-
-  if has_key($configuration, 'hdfs-site') {
-    configgenerator::configfile{'hdfs-site': 
-      modulespath => $hdp-hadoop::params::conf_dir,
-      filename => 'hdfs-site.xml',
-      module => 'hdp-hadoop',
-      configuration => $configuration['hdfs-site'],
-      owner => $hdp-hadoop::params::hdfs_user,
-      group => $hdp::params::user_group
-    }
-  } else { # Manually overriding ownership of file installed by hadoop package
-    file { "${hdp-hadoop::params::conf_dir}/hdfs-site.xml":
-      owner => $hdp-hadoop::params::hdfs_user,
-      group => $hdp::params::user_group
-    }
-  }
-
-  if has_key($configuration, 'hdfs-exclude-file') {
-    hdp-hadoop::hdfs::generate_exclude_file{'exclude_file':}
-  }
-
-  hdp::package {'ambari-log4j':
-    package_type  => 'ambari-log4j'
-  }
-
-  file { '/usr/lib/hadoop/lib/hadoop-tools.jar':
-    ensure => 'link',
-    target => '/usr/lib/hadoop/hadoop-tools.jar',
-    mode => 755,
-  }
-
-  file { "${hdp-hadoop::params::conf_dir}/configuration.xsl":
-    owner => $hdp-hadoop::params::hdfs_user,
-    group => $hdp::params::user_group
-  }
-
-  file { "${hdp-hadoop::params::conf_dir}/fair-scheduler.xml":
-    owner => $hdp-hadoop::params::mapred_user,
-    group => $hdp::params::user_group
-  }
-
-  file { "${hdp-hadoop::params::conf_dir}/masters":
-    owner => $hdp-hadoop::params::hdfs_user,
-    group => $hdp::params::user_group
-  }
-
-  file { "${hdp-hadoop::params::conf_dir}/ssl-client.xml.example":
-    owner => $hdp-hadoop::params::mapred_user,
-    group => $hdp::params::user_group
-  }
-
-  file { "${hdp-hadoop::params::conf_dir}/ssl-server.xml.example":
-    owner => $hdp-hadoop::params::mapred_user,
-    group => $hdp::params::user_group
-  }
-
-  if (hdp_get_major_stack_version($hdp::params::stack_version) >= 2) {
-    if (hdp_is_empty($configuration) == false and hdp_is_empty($configuration['hdfs-site']) == false) {
-      if (hdp_is_empty($configuration['hdfs-site']['dfs.hosts.exclude']) == false) and
-         (hdp_is_empty($configuration['hdfs-exclude-file']) or
-          has_key($configuration['hdfs-exclude-file'], 'datanodes') == false) {
-        $exlude_file_path = $configuration['hdfs-site']['dfs.hosts.exclude']
-        file { $exlude_file_path :
-        ensure => present,
-        owner => $hdp-hadoop::params::hdfs_user,
-        group => $hdp::params::user_group
-        }
-      }
-      if (hdp_is_empty($hdp::params::slave_hosts) == false and hdp_is_empty($configuration['hdfs-site']['dfs.hosts']) == false) {
-        $include_file_path = $configuration['hdfs-site']['dfs.hosts']
-        $include_hosts_list = $hdp::params::slave_hosts
-        file { $include_file_path :
-        ensure => present,
-        owner => $hdp-hadoop::params::hdfs_user,
-        group => $hdp::params::user_group,
-        content => template('hdp-hadoop/include_hosts_list.erb')
-        }
-      }
-    }
-  }
-
-}
-
-class hdp-hadoop(
-  $service_state
-)
-{
-  include hdp-hadoop::params
-  $hadoop_config_dir = $hdp-hadoop::params::conf_dir
-  $mapred_user = $hdp-hadoop::params::mapred_user  
-  $hdfs_user = $hdp-hadoop::params::hdfs_user
-  $hadoop_tmp_dir = $hdp-hadoop::params::hadoop_tmp_dir
-
-  anchor{'hdp-hadoop::begin':} 
-  anchor{'hdp-hadoop::end':} 
-
-  if ($service_state=='uninstalled') {
-    hdp-hadoop::package { 'hadoop':
-      ensure => 'uninstalled'
-    }
-
-    hdp::directory_recursive_create { $hadoop_config_dir:
-      service_state => $::service_state,
-      force => true
-    }
-
-    Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Package<||> -> Hdp::Directory_recursive_create[$hadoop_config_dir] -> Anchor['hdp-hadoop::end']
-  } else {
-    
-    hdp-hadoop::package { 'hadoop':}
-
-    #Replace limits config file
-    hdp::configfile {"${hdp::params::limits_conf_dir}/hdfs.conf":
-      component => 'hadoop',
-      owner => 'root',
-      group => 'root',
-      require => Hdp-hadoop::Package['hadoop'],
-      before  => Anchor['hdp-hadoop::end'],
-      mode => 644    
-    }
-
-    hdp::directory_recursive_create { $hadoop_config_dir:
-      service_state => $::service_state,
-      force => true,
-      owner => 'root',
-      group => 'root'
-    }
- 
-    hdp::user{ 'hdfs_user':
-      user_name => $hdfs_user,
-      groups => [$hdp::params::user_group]
-    }
-    
-    hdp::user { 'mapred_user':
-      user_name => $mapred_user,
-      groups => [$hdp::params::user_group]
-    }
-
-    $logdirprefix = $hdp-hadoop::params::hdfs_log_dir_prefix
-    hdp::directory_recursive_create { $logdirprefix: 
-        owner => 'root'
-    }
-    $piddirprefix = $hdp-hadoop::params::hadoop_pid_dir_prefix
-    hdp::directory_recursive_create { $piddirprefix: 
-        owner => 'root'
-    }
-
-    $dfs_domain_socket_path_dir = hdp_get_directory_from_filepath($hdp-hadoop::params::dfs_domain_socket_path)
-    hdp::directory_recursive_create { $dfs_domain_socket_path_dir:
-      owner => $hdfs_user,
-      group => $hdp::params::user_group,
-      mode  => '0644'
-    }
- 
-    #taskcontroller.cfg properties conditional on security
-    if ($hdp::params::security_enabled == true) {
-      file { "${hdp::params::hadoop_bin}/task-controller":
-        owner   => 'root',
-        group   => $hdp-hadoop::params::mapred_tt_group,
-        mode    => '6050',
-        require => Hdp-hadoop::Package['hadoop'],
-        before  => Anchor['hdp-hadoop::end']
-      }
-      $tc_owner = 'root'
-      $tc_mode = '0644'
-    } else {
-      $tc_owner = $hdfs_user
-      $tc_mode = undef
-    }
-    hdp-hadoop::configfile { 'taskcontroller.cfg' :
-      tag   => 'common',
-      owner => $tc_owner,
-      mode  => $tc_mode
-    }
-
-    $template_files = [ 'hadoop-env.sh', 'commons-logging.properties', 'slaves']
-    hdp-hadoop::configfile { $template_files:
-      tag   => 'common', 
-      owner => $hdfs_user
-    }
-
-    if (hdp_get_major_stack_version($hdp::params::stack_version) >= 2) {
-      hdp-hadoop::configfile { 'health_check' :
-        tag   => 'common',
-        owner => $hdfs_user,
-        template_tag => 'v2'
-      }
-    } else {
-      hdp-hadoop::configfile { 'health_check' :
-        tag   => 'common',
-        owner => $hdfs_user
-      }
-    }
-
-    # log4j.properties has to be installed just one time to prevent
-    # manual changes overwriting
-    if ($service_state=='installed_and_configured') {
-      hdp-hadoop::configfile { 'log4j.properties' :
-        tag   => 'common',
-        owner => $hdfs_user,
-      }
-    }
-
-    # updating log4j.properties with data which is sent from server
-    hdp-hadoop::update-log4j-properties { 'log4j.properties': }
-    
-    hdp-hadoop::configfile { 'hadoop-metrics2.properties' : 
-      tag   => 'common', 
-      owner => $hdfs_user,
-    }
-
-    # Copy database drivers for rca enablement
-    $server_db_name = $hdp::params::server_db_name
-    $hadoop_lib_home = $hdp::params::hadoop_lib_home
-    $db_driver_filename = $hdp::params::db_driver_file
-    $oracle_driver_url = $hdp::params::oracle_jdbc_url
-    $mysql_driver_url = $hdp::params::mysql_jdbc_url
-
-    if ($server_db_name == 'oracle' and $oracle_driver_url != "") {
-      $db_driver_dload_cmd = "curl -kf --retry 5 $oracle_driver_url -o ${hadoop_lib_home}/${db_driver_filename}"
-    } elsif ($server_db_name == 'mysql' and $mysql_driver_url != "") {
-      $db_driver_dload_cmd = "curl -kf --retry 5 $mysql_driver_url -o ${hadoop_lib_home}/${db_driver_filename}"
-    }
-    if ($db_driver_dload_cmd != undef) {
-      exec { '${db_driver_dload_cmd}':
-        command => $db_driver_dload_cmd,
-        unless  => "test -e ${hadoop_lib_home}/${db_driver_filename}",
-        creates => "${hadoop_lib_home}/${db_driver_filename}",
-        path    => ["/bin","/usr/bin/"],
-        require => Hdp-hadoop::Package['hadoop'],
-        before  => Anchor['hdp-hadoop::end']
-      }
-    }
-
-    if (hdp_get_major_stack_version($hdp::params::stack_version) >= 2) {
-      hdp::directory_recursive_create { "$hadoop_tmp_dir":
-        service_state => $service_state,
-        force => true,
-        owner => $hdfs_user
-      }
-    }
-
-    if (hdp_get_major_stack_version($hdp::params::stack_version) >= 2) {
-      Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Package<||> ->  Hdp::User<|title == $hdfs_user or title == $mapred_user|>  ->
-      Hdp::Directory_recursive_create[$hadoop_config_dir] -> Hdp-hadoop::Configfile<|tag == 'common'|> -> Hdp-hadoop::Update-log4j-properties['log4j.properties'] ->
-      Hdp::Directory_recursive_create[$logdirprefix] -> Hdp::Directory_recursive_create[$piddirprefix] -> Hdp::Directory_recursive_create["$hadoop_tmp_dir"] -> Anchor['hdp-hadoop::end']
-    } else {
-      Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Package<||> ->  Hdp::User<|title == $hdfs_user or title == $mapred_user|>  ->
-      Hdp::Directory_recursive_create[$hadoop_config_dir] -> Hdp-hadoop::Configfile<|tag == 'common'|> -> Hdp-hadoop::Update-log4j-properties['log4j.properties'] ->
-      Hdp::Directory_recursive_create[$logdirprefix] -> Hdp::Directory_recursive_create[$piddirprefix] -> Anchor['hdp-hadoop::end']
-    }
-
-  }
-}
-
-class hdp-hadoop::enable-ganglia()
-{
-  Hdp-hadoop::Configfile<|title  == 'hadoop-metrics2.properties'|>{template_tag => 'GANGLIA'}
-}
-
-###config file helper
-define hdp-hadoop::configfile(
-  $owner = undef,
-  $hadoop_conf_dir = $hdp-hadoop::params::conf_dir,
-  $mode = undef,
-  $namenode_host = undef,
-  $jtnode_host = undef,
-  $snamenode_host = undef,
-  $template_tag = undef,
-  $size = undef, #TODO: deprecate
-  $sizes = []
-) 
-{
-  #TODO: may need to be fixed 
-  if ($jtnode_host == undef) {
-    $calc_jtnode_host = $namenode_host
-  } else {
-    $calc_jtnode_host = $jtnode_host 
-  }
- 
-  #only set 32 if theer is a 32 bit component and no 64 bit components
-  if (64 in $sizes) {
-    $common_size = 64
-  } elsif (32 in $sizes) {
-    $common_size = 32
-  } else {
-    $common_size = 6
-  }
-  
-  hdp::configfile { "${hadoop_conf_dir}/${name}":
-    component      => 'hadoop',
-    owner          => $owner,
-    mode           => $mode,
-    namenode_host  => $namenode_host,
-    snamenode_host => $snamenode_host,
-    jtnode_host    => $calc_jtnode_host,
-    template_tag   => $template_tag,
-    size           => $common_size
-  }
-}
-
-#####
-define hdp-hadoop::exec-hadoop(
-  $command,
-  $unless = undef,
-  $refreshonly = undef,
-  $echo_yes = false,
-  $kinit_override = false,
-  $tries = 1,
-  $timeout = 900,
-  $try_sleep = undef,
-  $user = undef,
-  $logoutput = undef,
-  $onlyif = undef,
-  $path = undef
-)
-{
-  include hdp-hadoop::params
-  $security_enabled = $hdp::params::security_enabled
-  $conf_dir = $hdp-hadoop::params::conf_dir
-  $hdfs_user = $hdp-hadoop::params::hdfs_user
-  $hbase_user = $hdp-hadoop::params::hbase_user
-
-  if ($user == undef) {
-    $run_user = $hdfs_user
-  } else {
-    $run_user = $user
-  }
-
-  if (($security_enabled == true) and ($kinit_override == false)) {
-    if ($run_user in [$hdfs_user,'root']) {
-      $keytab = $hdp::params::hdfs_user_keytab
-      $principal = $hdfs_user
-    } elsif ($run_user in [$hbase_user]) {
-      $keytab = $hdp::params::hbase_user_keytab
-      $principal = $hbase_user
-    } else {
-      $keytab = $hdp::params::smokeuser_keytab
-      $principal = $hdp::params::smokeuser
-    }
-    $kinit_if_needed = "su - ${run_user} -c '${hdp::params::kinit_path_local} -kt ${keytab} ${principal}'"
-  } else {
-    $kinit_if_needed = ""
-  }
-  
-  if ($path == undef) {
-    if ($echo_yes == true) {
-      $cmd = "yes Y | hadoop --config ${conf_dir} ${command}"
-    } else {
-      $cmd = "hadoop --config ${conf_dir} ${command}"
-    } 
-    } else {
-      $cmd = "${path} ${command}"
-    }
-  
-  if ($kinit_if_needed != "") {
-    exec { "kinit_before_${cmd}":
-      command => $kinit_if_needed,
-      path => ['/bin'],
-      before => Hdp::Exec[$cmd]
-    }
-  }
-
-  hdp::exec { $cmd:
-    command     => $cmd,
-    user        => $run_user,
-    unless      => $unless,
-    refreshonly => $refreshonly,
-    tries       => $tries,
-    timeout     => $timeout,
-    try_sleep   => $try_sleep,
-    logoutput   => $logoutput,
-    onlyif      => $onlyif,
-  }
-}
-
-#####
-define hdp-hadoop::update-log4j-properties(
-  $hadoop_conf_dir = $hdp-hadoop::params::conf_dir
-)
-{
-  $properties = [
-    { name => 'ambari.jobhistory.database', value => $hdp-hadoop::params::ambari_db_rca_url },
-    { name => 'ambari.jobhistory.driver', value => $hdp-hadoop::params::ambari_db_rca_driver },
-    { name => 'ambari.jobhistory.user', value => $hdp-hadoop::params::ambari_db_rca_username },
-    { name => 'ambari.jobhistory.password', value => $hdp-hadoop::params::ambari_db_rca_password },
-    { name => 'ambari.jobhistory.logger', value => 'DEBUG,JHA' },
-
-    { name => 'log4j.appender.JHA', value => 'org.apache.ambari.log4j.hadoop.mapreduce.jobhistory.JobHistoryAppender' },
-    { name => 'log4j.appender.JHA.database', value => '${ambari.jobhistory.database}' },
-    { name => 'log4j.appender.JHA.driver', value => '${ambari.jobhistory.driver}' },
-    { name => 'log4j.appender.JHA.user', value => '${ambari.jobhistory.user}' },
-    { name => 'log4j.appender.JHA.password', value => '${ambari.jobhistory.password}' },
-
-    { name => 'log4j.logger.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger', value => '${ambari.jobhistory.logger}' },
-    { name => 'log4j.additivity.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger', value => 'true' }
-  ]
-  hdp-hadoop::update-log4j-property { $properties :
-    log4j_file      => $name,
-    hadoop_conf_dir => $hadoop_conf_dir
-  }
-}
-
-#####
-define hdp-hadoop::update-log4j-property(
-  $log4j_file,
-  $hadoop_conf_dir = $hdp-hadoop::params::conf_dir
-)
-{
-  hdp::exec{ "sed -i 's~\\(${hdp-hadoop::params::rca_disabled_prefix}\\)\\?${name[name]}=.*~${hdp-hadoop::params::rca_prefix}${name[name]}=${name[value]}~' ${hadoop_conf_dir}/${log4j_file}":
-    command => "sed -i 's~\\(${hdp-hadoop::params::rca_disabled_prefix}\\)\\?${name[name]}=.*~${hdp-hadoop::params::rca_prefix}${name[name]}=${name[value]}~' ${hadoop_conf_dir}/${log4j_file}"
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/jobtracker.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/jobtracker.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/jobtracker.pp
deleted file mode 100644
index 23503da..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/jobtracker.pp
+++ /dev/null
@@ -1,96 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::jobtracker(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-hadoop::params
-{
-  $hdp::params::service_exists['hdp-hadoop::jobtracker'] = true
-  Hdp-hadoop::Common<||>{service_state => $service_state}
-  Hdp-hadoop::Package<||>{include_64_bit => true}
-  Hdp-hadoop::Configfile<||>{sizes +> 64}
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) {
-    $mapred_user = $hdp-hadoop::params::mapred_user
-    $mapred_local_dir = $hdp-hadoop::params::mapred_local_dir 
-  
-    #adds package, users and directories, and common hadoop configs
-    include hdp-hadoop::initialize
-
-    if ( ($service_state == 'installed_and_configured') and
-         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
-      $masterHost = $kerberos_adminclient_host[0]
-      hdp::download_keytab { 'jobtracker_service_keytab' :
-        masterhost => $masterHost,
-        keytabdst => "${$keytab_path}/jt.service.keytab",
-        keytabfile => 'jt.service.keytab',
-        owner => $hdp-hadoop::params::mapred_user
-      }
-    }
-     
-    hdp-hadoop::jobtracker::create_local_dirs { $mapred_local_dir: 
-      service_state => $service_state
-    }
-
-    #TODO: cleanup 
-    Hdp-Hadoop::Configfile<||>{jtnode_host => $hdp::params::host_address}
-
-    #TODO: do we keep precondition here?
-    if ($service_state == 'running' and $hdp-hadoop::params::use_preconditions == true) {
-      class { 'hdp-hadoop::hdfs::service_check':
-        before => Hdp-hadoop::Service['jobtracker'],
-        require => Class['hdp-hadoop']
-      }
-    }
-
-    hdp-hadoop::service{ 'jobtracker':
-      ensure       => $service_state,
-      user         => $mapred_user
-    }
-  
-    hdp-hadoop::service{ 'historyserver':
-      ensure         => $service_state,
-      user           => $mapred_user,
-      create_pid_dir => false,
-      create_log_dir => false
-    }
-
-    #top level does not need anchors
-    Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Service['jobtracker'] -> Hdp-hadoop::Service['historyserver'] 
-    -> Anchor['hdp-hadoop::end']
-    Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Jobtracker::Create_local_dirs<||> -> Hdp-hadoop::Service['jobtracker'] 
-    -> Anchor['hdp-hadoop::end']
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
-
-define hdp-hadoop::jobtracker::create_local_dirs($service_state)
-{
-    $dirs = hdp_array_from_comma_list($name)
-    hdp::directory_recursive_create { $dirs :
-      owner => $hdp-hadoop::params::mapred_user,
-      mode => '0755',
-      service_state => $service_state,
-      force => true
-    }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/jobtracker/service_check.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/jobtracker/service_check.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/jobtracker/service_check.pp
deleted file mode 100644
index af5e095..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/jobtracker/service_check.pp
+++ /dev/null
@@ -1,29 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::jobtracker::service_check()
-{
-  hdp-hadoop::exec-hadoop { 'jobtracker::service_check':
-    command   => 'job -list',
-    tries     => 3,
-    try_sleep => 5,
-    user => $hdp::params::smokeuser
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/journalnode.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/journalnode.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/journalnode.pp
deleted file mode 100644
index f45c684..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/journalnode.pp
+++ /dev/null
@@ -1,60 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::journalnode(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-hadoop::params 
-{
-
-  $hdp::params::service_exists['hdp-hadoop::journalnode'] = true
-  
-  Hdp-hadoop::Common<||>{service_state => $service_state}
-  Hdp-hadoop::Package<||>{include_64_bit => true}
-  Hdp-hadoop::Configfile<||>{sizes +> 64}
-  
-  $jn_edits_dir = $hdp-hadoop::params::jn_edits_dir
-  $hdfs_user = $hdp-hadoop::params::hdfs_user
-  
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) { 
-  
-    #adds package, users and directories, and common hadoop configs
-    include hdp-hadoop::initialize
-    
-    hdp::directory_recursive_create{ $jn_edits_dir:
-        service_state => $service_state,
-        force => true,
-        owner => $hdfs_user
-      }
-      
-    hdp-hadoop::service{ 'journalnode':
-      ensure         => $service_state,
-      user           => $hdp-hadoop::params::hdfs_user,
-      create_pid_dir => true,
-      create_log_dir => true
-    }
-    
-    #top level does not need anchors
-    Anchor['hdp-hadoop::begin'] -> Hdp::Directory_recursive_create[$jn_edits_dir] -> Hdp-hadoop::Service['journalnode'] -> Anchor['hdp-hadoop::end'] 
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/mapred/service_check.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/mapred/service_check.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/mapred/service_check.pp
deleted file mode 100644
index df4ba7b..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/mapred/service_check.pp
+++ /dev/null
@@ -1,75 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::mapred::service_check() 
-{
-  $smoke_test_user = $hdp::params::smokeuser
-  $jar_location = $hdp::params::hadoop_jar_location
-  $input_file = 'mapredsmokeinput'
-  $output_file = "mapredsmokeoutput"
-
-  $cleanup_cmd = "dfs -rmr ${output_file} ${input_file}"
-  #cleanup put below to handle retries; if retrying there wil be a stale file that needs cleanup; exit code is fn of second command
-  $create_file_cmd = "$cleanup_cmd ; hadoop dfs -put /etc/passwd ${input_file} " #TODO: inconsistent that second comamnd needs hadoop
-  $test_cmd = "fs -test -e ${output_file}" 
-  $run_wordcount_job = "jar ${jar_location}/hadoop-examples.jar  wordcount ${input_file} ${output_file}"
-  
-  anchor { 'hdp-hadoop::mapred::service_check::begin':}
-
-  hdp-hadoop::exec-hadoop { 'mapred::service_check::create_file':
-    command   => $create_file_cmd,
-    tries     => 1,
-    try_sleep => 5,
-    require   => Anchor['hdp-hadoop::mapred::service_check::begin'],
-  #  notify    => Hdp-hadoop::Exec-hadoop['mapred::service_check::run_wordcount'],
-    user      => $smoke_test_user
-  }
-
-  hdp-hadoop::exec-hadoop { 'mapred::service_check::run_wordcount':
-    command   => $run_wordcount_job,
-    tries     => 1,
-    try_sleep => 5,
-    require   => Hdp-hadoop::Exec-hadoop['mapred::service_check::create_file'],
-    notify    => Hdp-hadoop::Exec-hadoop['mapred::service_check::test'],
-    user      => $smoke_test_user,
-    logoutput => "true"
-  }
-
-#  exec { 'runjob':
-#    command   => "hadoop jar ${jar_location}/hadoop-examples.jar  wordcount ${input_file} ${output_file}",
-#    tries     => 1,
-#    try_sleep => 5,
-#    require   => Hdp-hadoop::Exec-hadoop['mapred::service_check::create_file'],
-#    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-#    notify    => Hdp-hadoop::Exec-hadoop['mapred::service_check::test'],
-#    logoutput => "true",
-#    user      => $smoke_test_user
-#  }
-
-  hdp-hadoop::exec-hadoop { 'mapred::service_check::test':
-    command     => $test_cmd,
-    refreshonly => true,
-    require     => Hdp-hadoop::Exec-hadoop['mapred::service_check::run_wordcount'],
-    before      => Anchor['hdp-hadoop::mapred::service_check::end'], #TODO: remove after testing
-    user        => $smoke_test_user
-  }
-  
-  anchor{ 'hdp-hadoop::mapred::service_check::end':}
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode.pp
deleted file mode 100644
index d0fc226..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode.pp
+++ /dev/null
@@ -1,285 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::namenode(
-  $service_state = $hdp::params::cluster_service_state,
-  $slave_hosts = [],
-  $format = true,
-  $opts = {}
-) inherits hdp-hadoop::params
-{
-  $hdp::params::service_exists['hdp-hadoop::namenode'] = true
-
-  Hdp-hadoop::Common<||>{service_state => $service_state}
-  Hdp-hadoop::Package<||>{include_64_bit => true}
-  Hdp-hadoop::Configfile<||>{sizes +> 64}
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) { 
-    $dfs_name_dir = $hdp-hadoop::params::dfs_name_dir
-  
-    #adds package, users and directories, and common hadoop configs
-    include hdp-hadoop::initialize
-
-    if ( ($service_state == 'installed_and_configured') and 
-         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
-      $masterHost = $kerberos_adminclient_host[0]
-      hdp::download_keytab { 'namenode_service_keytab' :
-        masterhost => $masterHost,
-        keytabdst => "${$keytab_path}/nn.service.keytab",
-        keytabfile => 'nn.service.keytab',
-        owner => $hdp-hadoop::params::hdfs_user
-      }
-      hdp::download_keytab { 'namenode_hdfs_headless_keytab' :   
-        masterhost => $masterHost,
-        keytabdst => "${$keytab_path}/hdfs.headless.keytab",
-        keytabfile => 'hdfs.headless.keytab', 
-        owner => $hdp-hadoop::params::hdfs_user, 
-        hostnameInPrincipals => 'no'
-      }
-      hdp::download_keytab { 'namenode_spnego_keytab' :   
-        masterhost => $masterHost,
-        keytabdst => "${$keytab_path}/spnego.service.keytab",
-        keytabfile => 'spnego.service.keytab', 
-        owner => $hdp-hadoop::params::hdfs_user, 
-        mode => '0440',
-        group => $hdp::params::user_group
-      }
-    }
-
-    hdp-hadoop::namenode::create_name_dirs { $dfs_name_dir: 
-      service_state => $service_state
-    }
-   
-    Hdp-Hadoop::Configfile<||>{namenode_host => $hdp::params::host_address}
-    Hdp::Configfile<||>{namenode_host => $hdp::params::host_address} #for components other than hadoop (e.g., hbase) 
-  
-    if ($service_state == 'running' and $format == true) {
-      class {'hdp-hadoop::namenode::format' : }
-    }
-
-    hdp-hadoop::service{ 'namenode':
-      ensure       => $service_state,
-      user         => $hdp-hadoop::params::hdfs_user,
-      initial_wait => hdp_option_value($opts,'wait')
-    }
-
-    hdp-hadoop::namenode::create_app_directories { 'create_app_directories' :
-      service_state => $service_state
-    }
-
-    hdp-hadoop::namenode::create_user_directories { 'create_user_directories' :
-      service_state => $service_state
-    }
-
-    Anchor['hdp-hadoop::begin'] ->
-    Hdp-hadoop::Namenode::Create_name_dirs<||> ->
-    Hdp-hadoop::Service['namenode'] ->
-    Hdp-hadoop::Namenode::Create_app_directories<||> ->
-    Hdp-hadoop::Namenode::Create_user_directories<||> ->
-    Anchor['hdp-hadoop::end']
-
-    if ($service_state == 'running' and $format == true) {
-      Anchor['hdp-hadoop::begin'] ->
-      Hdp-hadoop::Namenode::Create_name_dirs<||> ->
-      Class['hdp-hadoop::namenode::format'] ->
-      Hdp-hadoop::Service['namenode'] ->
-      Anchor['hdp-hadoop::end']
-    }
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
-
-define hdp-hadoop::namenode::create_name_dirs($service_state)
-{
-  $dirs = hdp_array_from_comma_list($name)
-  hdp::directory_recursive_create { $dirs :
-    owner => $hdp-hadoop::params::hdfs_user,
-    mode => '0755',
-    service_state => $service_state,
-    force => true
-  }
-}
-
-define hdp-hadoop::namenode::create_app_directories($service_state)
-{
-
-  if ($service_state == 'running') {
-   
-    hdp-hadoop::hdfs::directory{ "/tmp" :
-      service_state => $service_state,
-      owner => $hdp-hadoop::params::hdfs_user,
-      mode => '777'
-    }
-
-    hdp-hadoop::hdfs::directory{ '/mapred' :
-      service_state => $service_state,
-      owner         => $hdp-hadoop::params::mapred_user
-    }
-
-    hdp-hadoop::hdfs::directory{ '/mapred/system' :
-      service_state => $service_state,
-      owner         => $hdp-hadoop::params::mapred_user
-    }
-
-    Hdp-hadoop::Hdfs::Directory['/mapred'] -> Hdp-hadoop::Hdfs::Directory['/mapred/system']
-
-    if ($hdp::params::hbase_master_hosts != "") {
-
-      hdp-hadoop::hdfs::directory { $hdp-hadoop::params::hdfs_root_dir:
-        owner         => $hdp::params::hbase_user,
-        service_state => $service_state
-      }
-
-      $hbase_staging_dir = $hdp::params::hbase_staging_dir
-      hdp-hadoop::hdfs::directory { $hbase_staging_dir:
-        owner         => $hdp::params::hbase_user,
-        service_state => $service_state,
-        mode             => '711'
-      }
-    }
-
-    if ($hdp::params::hive_server_host != "") {
-      $hive_user = $hdp::params::hive_user
-      $hive_apps_whs_dir = $hdp::params::hive_apps_whs_dir
-
-      hdp-hadoop::hdfs::directory{ $hive_apps_whs_dir:
-        service_state   => $service_state,
-        owner            => $hive_user,
-        mode             => '777',
-        recursive_chmod  => true
-      }
-    }
-
-    if ($hdp::params::webhcat_server_host != "") {
-      $webhcat_user = $hdp::params::webhcat_user
-      $webhcat_apps_dir = hdp_get_directory_from_filepath(hdp_get_dir_from_url(hdp_default("webhcat-site/templeton.streaming.jar",""), "/apps/webhcat"))
-
-      hdp-hadoop::hdfs::directory{ $webhcat_apps_dir:
-        service_state => $service_state,
-        owner => $webhcat_user,
-        mode  => '755',
-        recursive_chmod => true
-      }
-    }
-
-    if (hdp_get_major_stack_version($hdp::params::stack_version) >= 2) {
-      if ($hdp::params::nm_hosts != "") {
-        if ($hdp::params::yarn_log_aggregation_enabled == "true") {
-          $yarn_user = $hdp::params::yarn_user
-          $yarn_nm_app_log_dir = $hdp::params::yarn_nm_app_log_dir
-
-          hdp-hadoop::hdfs::directory{ $yarn_nm_app_log_dir:
-            service_state => $service_state,
-            owner => $yarn_user,
-            group => $hdp::params::user_group,
-            mode  => '1777',
-            recursive_chmod => true
-          }
-        }
-      }
-
-
-      if ($hdp::params::hs_host != "") {
-        $mapred_user = $hdp::params::mapred_user
-        $mapreduce_jobhistory_intermediate_done_dir = $hdp::params::mapreduce_jobhistory_intermediate_done_dir
-        $group = $hdp::params::user_group
-        $mapreduce_jobhistory_done_dir = $hdp::params::mapreduce_jobhistory_done_dir
-
-        hdp-hadoop::hdfs::directory{ $mapreduce_jobhistory_intermediate_done_dir:
-          service_state => $service_state,
-          owner => $mapred_user,
-          group => $group,
-          mode  => '1777'
-        }
-
-        hdp-hadoop::hdfs::directory{ $mapreduce_jobhistory_done_dir:
-          service_state => $service_state,
-          owner => $mapred_user,
-          group => $group,
-          mode  => '1777'
-        }
-      }
-    }
-  }
-}
-
-
-define hdp-hadoop::namenode::create_user_directories($service_state)
-{
-  if ($service_state == 'running') {
-    $smoke_hdfs_user_dir = $hdp::params::smoke_hdfs_user_dir
-
-    $smoke_user_dir_item="$smoke_hdfs_user_dir,"
-
-    if ($hdp::params::hive_server_host != "") {
-      $hive_hdfs_user_dir = $hdp::params::hive_hdfs_user_dir
-      $hive_dir_item="$hive_hdfs_user_dir,"
-    } else {
-      $hive_dir_item=""
-    }
-
-    if ($hdp::params::oozie_server != "") {
-      $oozie_hdfs_user_dir = $hdp::params::oozie_hdfs_user_dir
-      $oozie_dir_item="$oozie_hdfs_user_dir,"
-    } else {
-      $oozie_dir_item=""
-    }
-    
-    if ($hdp::params::webhcat_server_host != "") {
-      $hcat_hdfs_user_dir = $hdp::params::hcat_hdfs_user_dir
-      $webhcat_hdfs_user_dir = $hdp::params::webhcat_hdfs_user_dir
-      $webhcat_dir_item="$webhcat_hdfs_user_dir,"
-      if ($hcat_hdfs_user_dir != webhcat_hdfs_user_dir) {
-        $hcat_dir_item="$hcat_hdfs_user_dir,"
-      } else {
-        $hcat_dir_item=""
-      }
-    } else {
-      $webhcat_dir_item=""
-    }
-
-    $users_dir_list_comm_sep = "$smoke_user_dir_item $hive_dir_item $oozie_dir_item $hcat_dir_item $webhcat_dir_item"
-
-    #Get unique users directories set
-    $users_dirs_set = hdp_set_from_comma_list($users_dir_list_comm_sep)
-
-    hdp-hadoop::namenode::create_user_directory{ $users_dirs_set:
-      service_state => $service_state
-    }
-  }
-  
-}
-
-define hdp-hadoop::namenode::create_user_directory($service_state)
-{
-  
-  $owner = hdp_hadoop_get_owner($name)
-  $mode = hdp_hadoop_get_mode($name)
-  debug("## Creating user directory: $name, owner: $owner, mode: $mode")
-  hdp-hadoop::hdfs::directory{ $name:
-   service_state   => $service_state,
-   mode            => $mode,
-   owner           => $owner,
-   recursive_chmod => true
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode/format.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode/format.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode/format.pp
deleted file mode 100644
index fb9d2ab..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode/format.pp
+++ /dev/null
@@ -1,61 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::namenode::format(
-  $force = false
-)
-{
-  $mark_dir = $hdp-hadoop::params::namenode_formatted_mark_dir
-  $dfs_name_dir = $hdp-hadoop::params::dfs_name_dir
-  $hdfs_user = $hdp::params::hdfs_user
-  $hadoop_conf_dir = $hdp-hadoop::params::conf_dir
-
-  # Avoid formatting standby namenode in a HA cluster
-  if ($hdp::params::dfs_ha_enabled == false) {
-    if ($force == true) {
-        hdp-hadoop::exec-hadoop { 'namenode -format' :
-        command => 'namenode -format',
-        kinit_override => true,
-        notify  => Hdp::Exec['set namenode mark']
-      }
-    } else {
-
-      file { '/tmp/checkForFormat.sh':
-        ensure => present,
-        source => "puppet:///modules/hdp-hadoop/checkForFormat.sh",
-        mode => '0755'
-      }
-
-      exec { '/tmp/checkForFormat.sh':
-        command   => "sh /tmp/checkForFormat.sh ${hdfs_user} ${hadoop_conf_dir} ${mark_dir} ${dfs_name_dir} ",
-        unless   => "test -d ${mark_dir}",
-        require   => File['/tmp/checkForFormat.sh'],
-        path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-        logoutput => "true",
-        notify   => Hdp::Exec['set namenode mark']
-      }
-    }
-
-    hdp::exec { 'set namenode mark' :
-      command     => "mkdir -p ${mark_dir}",
-      refreshonly => true
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode/service_check.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode/service_check.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode/service_check.pp
deleted file mode 100644
index d4c0523..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode/service_check.pp
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::namenode::service_check()
-{
-  hdp-hadoop::exec-hadoop { 'namenode::service_check':
-    command   => 'dfs -ls /',
-    tries     => 3,
-    try_sleep => 5
-  }
-}