You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ds...@apache.org on 2014/05/14 16:02:20 UTC

[12/15] AMBARI-5747 Remove facter-1.6.10, Ruby, Puppet dependencies from pom.xml and src (dsen)

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/package.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/package.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/package.pp
deleted file mode 100644
index 4beaafd..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/package.pp
+++ /dev/null
@@ -1,44 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#singleton, but using define so can use collections to override params
-define hdp-hadoop::package(
-  $ensure = 'present',
-  $include_32_bit = false,
-  $include_64_bit = false
-)
-{
-  #just use 32 if its specifically requested and no 64 bit requests
-  if ($include_32_bit == true) and ($include_64_bit != true) {
-    $size = 32
-  } else  {
-    $size = 64
-  }
-  $package = "hadoop ${size}"
-  $lzo_enabled = $hdp::params::lzo_enabled
-
-  hdp::package{ $package:
-    ensure       => $ensure,
-    package_type => 'hadoop',
-    size         => $size,
-    lzo_needed   => $lzo_enabled
-  }
-  anchor{ 'hdp-hadoop::package::helper::begin': } -> Hdp::Package[$package] -> anchor{ 'hdp-hadoop::package::helper::end': }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/params.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/params.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/params.pp
deleted file mode 100644
index 5b9ebaa..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/params.pp
+++ /dev/null
@@ -1,222 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::params(
-) inherits hdp::params 
-{
-
-  ##TODO: for testing in masterless mode
-  $use_preconditions = false
-  ####  
-  $conf_dir = $hdp::params::hadoop_conf_dir 
-
-  ####hbase
-  $hdfs_root_dir = $hdp::params::hbase_hdfs_root_dir
-
-  ####### users
-
-  $mapred_user = $hdp::params::mapred_user
-  $hdfs_user = $hdp::params::hdfs_user
-  
-  ##### security related
-  $keytab_path = hdp_default("keytab_path","/etc/security/keytabs")
- 
-  if ($hdp::params::security_enabled == true) {
-    $enable_security_authorization = true
-    $security_type = "kerberos"
-    $task_controller = "org.apache.hadoop.mapred.LinuxTaskController"
-    $dfs_datanode_address = 1019
-    $dfs_datanode_http_address = 1022
-  } else {
-    $enable_security_authorization = false
-    $security_type = "simple"
-    $task_controller = "org.apache.hadoop.mapred.DefaultTaskController"
-    $dfs_datanode_address = hdp_default("dfs_datanode_address","50010")
-    $dfs_datanode_http_address = hdp_default("dfs_datanode_http_address","50075")
-  }
-
-  ### hadoop-env
-  
-  $dtnode_heapsize = hdp_default("dtnode_heapsize","1024m")
-  $ttnode_heapsize = hdp_default("ttnode_heapsize","1024m")
-
-  $hadoop_heapsize = hdp_default("hadoop_heapsize","1024")
-
-  $hdfs_log_dir_prefix = hdp_default("hdfs_log_dir_prefix","/var/log/hadoop")
-
-  $hadoop_pid_dir_prefix = hdp_default("hadoop_pid_dir_prefix","/var/run/hadoop")
-  $run_dir = $hadoop_pid_dir_prefix
-
-  $namenode_formatted_mark_dir = "${run_dir}/hdfs/namenode/formatted/"
-
-  $jtnode_heapsize = hdp_default("jtnode_heapsize","1024m")
-
-  $jtnode_opt_maxnewsize = hdp_default("jtnode_opt_maxnewsize","200m")
-
-  $jtnode_opt_newsize = hdp_default("jtnode_opt_newsize","200m")
-
-  $namenode_heapsize = hdp_default("namenode_heapsize","1024m")
-
-  $namenode_opt_maxnewsize = hdp_default("namenode_opt_maxnewsize","640m")
-
-  $namenode_opt_newsize = hdp_default("namenode_opt_newsize","640m")
-  
-  $hadoop_libexec_dir = hdp_default("hadoop_libexec_dir","/usr/lib/hadoop/libexec")
-  
-  $mapreduce_libs_path = hdp_default("mapreduce_libs_path","/usr/lib/hadoop-mapreduce/*")
-  
-  $mapred_log_dir_prefix = hdp_default("mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
-
-  $mapred_pid_dir_prefix = hdp_default("mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-
-  # Cannot create new dir in directory.pp, reusing existing path
-  $namenode_dirs_created_stub_dir = "${hdfs_log_dir_prefix}/${hdp::params::hdfs_user}"
-  $namenode_dirs_stub_filename = "namenode_dirs_created"
-
-  ### JSVC_HOME path is correct for AMD64 only, but can be changed through API
-  if ($hdp::params::hdp_os_type == "suse") {
-    $jsvc_path = hdp_default("jsvc_path","/usr/lib/bigtop-utils")
-  } else {
-    $jsvc_path = hdp_default("jsvc_path","/usr/libexec/bigtop-utils")
-  }
-
-  ### compression related
-  if (($hdp::params::lzo_enabled == true) and ($hdp::params::snappy_enabled == true)) {
-    $mapred_compress_map_output = true
-    $compression_codecs =  "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.SnappyCodec"
-    $mapred_map_output_compression_codec = "org.apache.hadoop.io.compress.SnappyCodec"
-  } elsif ($hdp::params::snappy_enabled == true) {
-    $mapred_compress_map_output = true
-    $compression_codecs = "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec" 
-    $mapred_map_output_compression_codec = "org.apache.hadoop.io.compress.SnappyCodec"
-  } elsif ($hdp::params::lzo_enabled == true) {
-    $mapred_compress_map_output = true
-    $compression_codecs = "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec"
-    $mapred_map_output_compression_codec = "com.hadoop.compression.lzo.LzoCodec"
-  } else { 
-    $mapred_compress_map_output = false
-    $compression_codecs = "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec"
-    $mapred_map_output_compression_codec = "org.apache.hadoop.io.compress.DefaultCodec"
-  }
-
-  ### core-site
-  if (hdp_get_major_stack_version($stack_version) >= 2) {
-    $fs_checkpoint_dir = hdp_default("hdfs-site/dfs.namenode.checkpoint.dir","/tmp/hadoop-hdfs/dfs/namesecondary")
-  } else {
-    $fs_checkpoint_dir = hdp_default("core-site/fs.checkpoint.dir","/tmp/hadoop-hdfs/dfs/namesecondary")
-  }
-
-  $proxyuser_group = hdp_default("core-site/proxyuser.group","users")
-  
-  $hadoop_tmp_dir = hdp_default("core-site/hadoop.tmp.dir","/tmp/hadoop-$hdfs_user")
-  
-  $hadoop_ssl_enabled = hdp_default("core-site/hadoop.ssl.enabled","false")
-
-  ### hdfs-site
-  $datanode_du_reserved = hdp_default("hdfs-site/datanode.du.reserved",1073741824)
-
-  $dfs_block_local_path_access_user = hdp_default("hdfs-site/dfs.block.local.path.access.user","hbase")
-
-  $dfs_data_dir = $hdp::params::dfs_data_dir
-
-  $dfs_datanode_data_dir_perm = hdp_default("hdfs-site/dfs.datanode.data.dir.perm",750)
-
-  $dfs_datanode_failed_volume_tolerated = hdp_default("hdfs-site/dfs.datanode.failed.volume.tolerated",0)
-
-  $dfs_exclude = hdp_default("hdfs-site/dfs.exclude","dfs.exclude")
-
-  $dfs_include = hdp_default("hdfs-site/dfs.include","dfs.include")
-  
-  if (hdp_get_major_stack_version($stack_version) >= 2) {
-    $dfs_name_dir = hdp_default("hdfs-site/dfs.namenode.name.dir","/tmp/hadoop-hdfs/dfs/name")
-  } else {
-    $dfs_name_dir = hdp_default("hdfs-site/dfs.name.dir","/tmp/hadoop-hdfs/dfs/name")
-  }
-  
-  $dfs_replication = hdp_default("hdfs-site/dfs.replication",3)
-
-  $dfs_support_append = hdp_default("hdfs-site/dfs.support.append",true)
-
-  $dfs_webhdfs_enabled = hdp_default("hdfs-site/dfs.webhdfs.enabled",false)
-  
-  $jn_edits_dir = hdp_default("hdfs-site/dfs.journalnode.edits.dir", "/grid/0/hdfs/journal")
-  
-  $dfs_domain_socket_path = hdp_default("hdfs-site/dfs.domain.socket.path","/var/lib/hadoop-hdfs/dn_socket")
-
- ######### mapred #######
-   ### mapred-site
-
-  $mapred_system_dir = '/mapred/system'
-
-  $mapred_child_java_opts_sz = hdp_default("mapred-site/mapred.child.java.opts.sz","-Xmx768m")
-
-  $mapred_cluster_map_mem_mb = hdp_default("mapred-site/mapred.cluster.map.mem.mb","-1")
-
-  $mapred_cluster_max_map_mem_mb = hdp_default("mapred-site/mapred.cluster.max.map.mem.mb","-1")
-
-  $mapred_cluster_max_red_mem_mb = hdp_default("mapred-site/mapred.cluster.max.red.mem.mb","-1")
-
-  $mapred_cluster_red_mem_mb = hdp_default("mapred-site/mapred.cluster.red.mem.mb","-1")
-
-  $mapred_job_map_mem_mb = hdp_default("mapred-site/mapred.job.map.mem.mb","-1")
-
-  $mapred_job_red_mem_mb = hdp_default("mapred-site/mapred.job.red.mem.mb","-1")
-
-  $mapred_jobstatus_dir = hdp_default("mapred-site/mapred.jobstatus.dir","file:////mapred/jobstatus")
-
-  if (hdp_get_major_stack_version($stack_version) >= 2) {
-    $mapred_local_dir = hdp_default("mapred-site/mapreduce.cluster.local.dir","/tmp/hadoop-mapred/mapred/local")
-  } else {
-    $mapred_local_dir = hdp_default("mapred-site/mapred.local.dir","/tmp/hadoop-mapred/mapred/local")
-  }
-
-  $mapred_tt_group = hdp_default("mapred-site/mapreduce.tasktracker.group", "hadoop")
-   
-  $mapreduce_userlog_retainhours = hdp_default("mapred-site/mapreduce.userlog.retainhours",24)
-
-  $maxtasks_per_job = hdp_default("mapred-site/maxtasks.per.job","-1")
-
-  $scheduler_name = hdp_default("mapred-site/scheduler.name","org.apache.hadoop.mapred.CapacityTaskScheduler")
-
-  #### health_check
-
-  $security_enabled = $hdp::params::security_enabled
-
-  $task_bin_exe = hdp_default("task_bin_exe")
-
-  $rca_enabled = hdp_default("rca_enabled", false)
-  $rca_disabled_prefix = "###"
-  if ($rca_enabled == true) {
-    $rca_prefix = ""
-  } else {
-    $rca_prefix = $rca_disabled_prefix
-  }
-  # $ambari_db_server_host = hdp_default("ambari_db_server_host", "localhost")
-  $ambari_db_rca_url = hdp_default("ambari_db_rca_url", "jdbc:postgresql://localhost/ambarirca")
-  $ambari_db_rca_driver = hdp_default("ambari_db_rca_driver", "org.postgresql.Driver")
-  $ambari_db_rca_username = hdp_default("ambari_db_rca_username", "mapred")
-  $ambari_db_rca_password = hdp_default("ambari_db_rca_password", "mapred")
-
-  if ($hdp::params::dfs_ha_enabled == true) {
-    $nameservice = $hdp::params::dfs_ha_nameservices
-    $namenode_id = hdp_hadoop_get_namenode_id("dfs.namenode.rpc-address.${nameservice}", "hdfs-site")
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/service.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/service.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/service.pp
deleted file mode 100644
index 98def76..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/service.pp
+++ /dev/null
@@ -1,132 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-define hdp-hadoop::service(
-  $ensure = 'running',
-  $user,
-  $initial_wait = undef,
-  $create_pid_dir = true,
-  $create_log_dir = true
-)
-{
-
-  $security_enabled = $hdp::params::security_enabled
-
-  #NOTE does not work if namenode and datanode are on same host 
-  $pid_dir = "${hdp-hadoop::params::hadoop_pid_dir_prefix}/${user}"
-  
-  $hadoop_libexec_dir = $hdp-hadoop::params::hadoop_libexec_dir
-  
-  if (($security_enabled == true) and ($name == 'datanode')) {
-    $run_as_root = true
-  } else {       
-    $run_as_root = false
-  }
-
-  if (($security_enabled == true) and ($name == 'datanode')) {
-    $hdfs_user = $hdp::params::hdfs_user
-    $pid_file = "${hdp-hadoop::params::hadoop_pid_dir_prefix}/${hdfs_user}/hadoop-${hdfs_user}-${name}.pid"
-  } else {
-    $pid_file = "${pid_dir}/hadoop-${user}-${name}.pid"
-  } 
-
-  $log_dir = "${hdp-hadoop::params::hdfs_log_dir_prefix}/${user}"
-  $hadoop_daemon = "export HADOOP_LIBEXEC_DIR=${hadoop_libexec_dir} && ${hdp::params::hadoop_bin}/hadoop-daemon.sh"
-   
-  $cmd = "${hadoop_daemon} --config ${hdp-hadoop::params::conf_dir}"
-  if ($ensure == 'running') {
-    if ($run_as_root == true) {
-      $daemon_cmd = "su - root -c  '${cmd} start ${name}'"
-    } else {
-      $daemon_cmd = "su - ${user} -c  '${cmd} start ${name}'"
-    }
-    # Here we check if pid file exists and if yes, then we run 'ps pid' command
-    # that returns 1 if process is not running
-    $service_is_up = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
-  } elsif ($ensure == 'stopped') {
-    if ($run_as_root == true) {
-      $daemon_cmd = "su - root -c  '${cmd} stop ${name}' && rm -f ${pid_file}"
-    } else {
-      $daemon_cmd = "su - ${user} -c  '${cmd} stop ${name}' && rm -f ${pid_file}"
-    }
-    $service_is_up = undef
-  } else {
-    $daemon_cmd = undef
-  }
- 
-  if ($create_pid_dir == true) {
-    hdp::directory_recursive_create { $pid_dir: 
-      owner       => $user,
-      context_tag => 'hadoop_service',
-      service_state => $::service_state,
-      force => true
-    }
-  }
-  
-  if ($create_log_dir == true) {
-    hdp::directory_recursive_create { $log_dir: 
-      owner       => $user,
-      context_tag => 'hadoop_service',
-      service_state => $::service_state,
-      force => true
-    }
-  }
-  if ($daemon_cmd != undef) {
-    if ($name == 'datanode' and $ensure == 'running') {
-      exec { 'delete_pid_before_datanode_start':
-        command  => "rm -f ${pid_file}",
-        unless       => $service_is_up,
-        path => $hdp::params::exec_path
-      }
-    }
-    hdp::exec { $daemon_cmd:
-      command      => $daemon_cmd,
-      unless       => $service_is_up,
-      initial_wait => $initial_wait
-    }
-  }
-
-  anchor{"hdp-hadoop::service::${name}::begin":}
-  anchor{"hdp-hadoop::service::${name}::end":}
-  if ($daemon_cmd != undef) {
-    Anchor["hdp-hadoop::service::${name}::begin"] -> Hdp::Exec[$daemon_cmd] -> Anchor["hdp-hadoop::service::${name}::end"]
-
-    if ($create_pid_dir == true) {
-      Anchor["hdp-hadoop::service::${name}::begin"] -> Hdp::Directory_recursive_create[$pid_dir] -> Hdp::Exec[$daemon_cmd] 
-    }
-     if ($create_log_dir == true) {
-      Anchor["hdp-hadoop::service::${name}::begin"] -> Hdp::Directory_recursive_create[$log_dir] -> Hdp::Exec[$daemon_cmd] 
-    }
-    if ($name == 'datanode' and $ensure == 'running') {
-      Anchor["hdp-hadoop::service::${name}::begin"] -> Exec['delete_pid_before_datanode_start'] -> Hdp::Exec[$daemon_cmd]
-    }
-  }
-  if ($ensure == 'running') {
-    #TODO: look at Puppet resource retry and retry_sleep
-    #TODO: can make sleep contingent on $name
-    $sleep = 5
-    $post_check = "sleep ${sleep}; ${service_is_up}"
-    hdp::exec { $post_check:
-      command => $post_check,
-      unless  => $service_is_up
-    }
-    Hdp::Exec[$daemon_cmd] -> Hdp::Exec[$post_check] -> Anchor["hdp-hadoop::service::${name}::end"]
-  }  
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/jobtracker-conn.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/jobtracker-conn.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/jobtracker-conn.pp
deleted file mode 100644
index f0338f9..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/jobtracker-conn.pp
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::slave::jobtracker-conn($jobtracker_host)
-{
-  Hdp-Hadoop::Configfile<||>{jtnode_host => $jobtracker_host}
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/master-conn.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/master-conn.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/master-conn.pp
deleted file mode 100644
index 326f31d..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/master-conn.pp
+++ /dev/null
@@ -1,27 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::slave::master-conn($master_host)
-{
-  Hdp-Hadoop::Configfile<||>{
-    namenode_host => $master_host,
-    jtnode_host   => $master_host
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/namenode-conn.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/namenode-conn.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/namenode-conn.pp
deleted file mode 100644
index 8047c05..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/slave/namenode-conn.pp
+++ /dev/null
@@ -1,27 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#TODO: this might be replaced by just using hdp::namenode-conn
-class hdp-hadoop::slave::namenode-conn($namenode_host)
-{
-  #TODO: check if can get rido of both
-  Hdp-Hadoop::Configfile<||>{namenode_host => $namenode_host}
-  Hdp::Configfile<||>{namenode_host => $namenode_host} #for components other than hadoop (e.g., hbase) 
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/smoketest.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/smoketest.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/smoketest.pp
deleted file mode 100644
index 296a0d4..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/smoketest.pp
+++ /dev/null
@@ -1,46 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::smoketest(
-  $opts={}
-)
-{
-  #TODO: put in wait
-  #TODO: look for better way to compute outname
-  $date_format = '"%M%d%y"'
-  $outname = inline_template("<%=  `date +${date_format}`.chomp %>")
-
-  #TODO: hardwired to run on namenode and to use user hdfs
-
-  $put = "dfs -put /etc/passwd passwd-${outname}"
-  $exec = "jar /usr/share/hadoop/hadoop-examples-*.jar wordcount passwd-${outname} ${outname}.out"
-  $result = "fs -test -e ${outname}.out /dev/null 2>&1"
-  anchor{ "hdp-hadoop::smoketest::begin" :} ->
-  hdp-hadoop::exec-hadoop{ $put:
-    command => $put
-  } ->
-  hdp-hadoop::exec-hadoop{ $exec:
-    command =>  $exec
-  } ->
-  hdp-hadoop::exec-hadoop{ $result:
-    command =>  $result
-  } ->
-  anchor{ "hdp-hadoop::smoketest::end" :}
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/snamenode.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/snamenode.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/snamenode.pp
deleted file mode 100644
index f2c5beb..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/snamenode.pp
+++ /dev/null
@@ -1,98 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::snamenode(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-hadoop::params  
-{
-  $hdp::params::service_exists['hdp-hadoop::snamenode'] = true
-
-  Hdp-hadoop::Common<||>{service_state => $service_state}
-  Hdp-hadoop::Package<||>{include_64_bit => true}
-  Hdp-hadoop::Configfile<||>{sizes +> 64}
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) {
-    $fs_checkpoint_dir = $hdp-hadoop::params::fs_checkpoint_dir
-  
-    #adds package, users and directories, and common hadoop configs
-    include hdp-hadoop::initialize
-
-    if ( ($service_state == 'installed_and_configured') and
-         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
-      if ($hdp::params::service_exists['hdp-hadoop::namenode'] != true) {
-        $masterHost = $kerberos_adminclient_host[0]
-        hdp::download_keytab { 'snamenode_service_keytab' :
-          masterhost => $masterHost,
-          keytabdst => "${$keytab_path}/nn.service.keytab",
-          keytabfile => 'nn.service.keytab',
-          owner => $hdp-hadoop::params::hdfs_user
-        }
-        hdp::download_keytab { 'snamenode_spnego_keytab' :   
-          masterhost => $masterHost,
-          keytabdst => "${$keytab_path}/spnego.service.keytab",
-          keytabfile => 'spnego.service.keytab', 
-          owner => $hdp-hadoop::params::hdfs_user,
-          mode => '0440',
-          group => $hdp::params::user_group
-        }
-      }
-    }
- 
-    Hdp-Hadoop::Configfile<||>{snamenode_host => $hdp::params::host_address}
-  
-    hdp-hadoop::snamenode::create_name_dirs { $fs_checkpoint_dir: 
-      service_state => $service_state
-    }
-    
-    if ($hdp::params::service_exists['hdp-hadoop::namenode'] == true) {
-      $create_pid_dir = false
-      $create_log_dir = false
-    } else {
-      $create_pid_dir = true
-      $create_log_dir = true
-    }
-    
-    hdp-hadoop::service{ 'secondarynamenode':
-      ensure         => $service_state,
-      user           => $hdp-hadoop::params::hdfs_user,
-      create_pid_dir => $create_pid_dir,
-      create_log_dir => $create_log_dir
-    }
-  
-    #top level does not need anchors
-    Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Namenode::Create_name_dirs<||> ->
-      Hdp-hadoop::Service['secondarynamenode'] -> Anchor['hdp-hadoop::end']
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
-
-define hdp-hadoop::snamenode::create_name_dirs($service_state)
-{
-   $dirs = hdp_array_from_comma_list($name)
-   hdp::directory_recursive_create { $dirs :
-     owner => $hdp-hadoop::params::hdfs_user,
-     mode => '0755',
-     service_state => $service_state,
-     force => true
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/tasktracker.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/tasktracker.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/tasktracker.pp
deleted file mode 100644
index e6869d6..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/tasktracker.pp
+++ /dev/null
@@ -1,94 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::tasktracker(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-hadoop::params
-{
-  $hdp::params::service_exists['hdp-hadoop::tasktracker'] = true
-
-  Hdp-hadoop::Common<||>{service_state => $service_state}
-
-  if ($hdp::params::use_32_bits_on_slaves == true) {
-    Hdp-hadoop::Package<||>{include_32_bit => true}
-    Hdp-hadoop::Configfile<||>{sizes +> 32}
-  } else {
-    Hdp-hadoop::Package<||>{include_64_bit => true}
-    Hdp-hadoop::Configfile<||>{sizes +> 64}
-  }
-
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) { 
-    $mapred_local_dir = $hdp-hadoop::params::mapred_local_dir
-  
-    #adds package, users and directories, and common hadoop configs
-    include hdp-hadoop::initialize
-
-    if ( ($service_state == 'installed_and_configured') and
-         ($security_enabled == true) and ($kerberos_install_type == "AMBARI_SET_KERBEROS") ) {
-      $masterHost = $kerberos_adminclient_host[0]
-      hdp::download_keytab { 'tasktracker_service_keytab' :
-        masterhost => $masterHost,
-        keytabdst => "${$keytab_path}/tt.service.keytab",
-        keytabfile => 'tt.service.keytab',
-        owner => $hdp-hadoop::params::mapred_user
-      }
-    }
-
-    hdp-hadoop::tasktracker::create_local_dirs { $mapred_local_dir:
-      service_state => $service_state
-    }
-    
-    if ($hdp::params::service_exists['hdp-hadoop::jobtracker'] == true) {
-      $create_pid_dir = false
-      $create_log_dir = false
-    } else {
-      $create_pid_dir = true
-      $create_log_dir = true
-    }
-
-    hdp-hadoop::service{ 'tasktracker':
-      ensure => $service_state,
-      user   => $hdp-hadoop::params::mapred_user,
-      create_pid_dir => $create_pid_dir,
-      create_log_dir => $create_log_dir
-    }
-  
-    #top level does not need anchors
-    Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Tasktracker::Create_local_dirs<||> -> Hdp-hadoop::Service['tasktracker'] ->
-    Anchor['hdp-hadoop::end']
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}
-
-define hdp-hadoop::tasktracker::create_local_dirs($service_state)
-{
-  if ($hdp::params::service_exists['hdp-hadoop::jobtracker'] != true) {
-    $dirs = hdp_array_from_comma_list($name)
-    hdp::directory_recursive_create_ignore_failure { $dirs :
-      owner => $hdp-hadoop::params::mapred_user,
-      mode => '0755',
-      service_state => $service_state,
-      force => true
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/zkfc.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/zkfc.pp b/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/zkfc.pp
deleted file mode 100644
index 5f74012..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/zkfc.pp
+++ /dev/null
@@ -1,51 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hadoop::zkfc(
-  $service_state = $hdp::params::cluster_service_state,
-  $opts = {}
-) inherits hdp-hadoop::params 
-{
-
-  Hdp-hadoop::Common<||>{service_state => $service_state}
-  Hdp-hadoop::Package<||>{include_64_bit => true}
-  Hdp-hadoop::Configfile<||>{sizes +> 64}
-  
-  $hdfs_user = $hdp-hadoop::params::hdfs_user
-  
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) { 
-  
-    #adds package, users and directories, and common hadoop configs
-    include hdp-hadoop::initialize
-    
-    hdp-hadoop::service{ 'zkfc':
-      ensure         => $service_state,
-      user           => $hdp-hadoop::params::hdfs_user,
-      create_pid_dir => true,
-      create_log_dir => true
-    }
-    
-    #top level does not need anchors
-    Anchor['hdp-hadoop::begin'] -> Hdp-hadoop::Service['zkfc'] -> Anchor['hdp-hadoop::end']
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/commons-logging.properties.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/commons-logging.properties.erb b/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/commons-logging.properties.erb
deleted file mode 100644
index 77e458f..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/commons-logging.properties.erb
+++ /dev/null
@@ -1,25 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-#Logging Implementation
-
-#Log4J
-org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger
-
-#JDK Logger
-#org.apache.commons.logging.Log=org.apache.commons.logging.impl.Jdk14Logger

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/exclude_hosts_list.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/exclude_hosts_list.erb b/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/exclude_hosts_list.erb
deleted file mode 100644
index 750549c..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/exclude_hosts_list.erb
+++ /dev/null
@@ -1,3 +0,0 @@
-<% exlude_hosts_list.each do |val| -%>
-<%= val%>
-<% end -%>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-env.sh.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-env.sh.erb b/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-env.sh.erb
deleted file mode 100644
index ef0d3d4..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-env.sh.erb
+++ /dev/null
@@ -1,122 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# Set Hadoop-specific environment variables here.
-
-# The only required environment variable is JAVA_HOME.  All others are
-# optional.  When running a distributed configuration it is best to
-# set JAVA_HOME in this file, so that it is correctly defined on
-# remote nodes.
-
-# The java implementation to use.  Required.
-export JAVA_HOME=<%=scope.function_hdp_template_var("::hdp::params::java64_home")%>
-export HADOOP_HOME_WARN_SUPPRESS=1
-
-# Hadoop Configuration Directory
-#TODO: if env var set that can cause problems
-export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-<%=scope.function_hdp_template_var("conf_dir")%>}
-
-<% if scope.function_hdp_template_var("::hdp::params::isHadoop2Stack") == true %>
-# Path to jsvc required by secure HDP 2.0 datanode
-export JSVC_HOME=<%=scope.function_hdp_template_var("jsvc_path")%>
-<% end %>
-
-
-# The maximum amount of heap to use, in MB. Default is 1000.
-export HADOOP_HEAPSIZE="<%=scope.function_hdp_template_var("hadoop_heapsize")%>"
-
-export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms<%=scope.function_hdp_template_var("namenode_heapsize")%>"
-
-# Extra Java runtime options.  Empty by default.
-export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
-
-# Command specific options appended to HADOOP_OPTS when specified
-export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER/hs_err_pid%p.log -XX:NewSize=<%=scope.function_hdp_template_var("namenode_opt_newsize")%> -XX:MaxNewSize=<%=scope.function_hdp_template_var("namenode_opt_maxnewsize")%> -Xloggc:<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms<%=scope.function_hdp_template_var("namenode_heapsize")%> -Xmx<%=scope.function_hdp_template_var("namenode_heapsize")%> -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
-HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER/hs_err_pid%p.log -XX:NewSize=<%=scope.function_hdp_template_var("jtnode_opt_newsize")%> -XX:MaxNewSize=<%=scope.function_hdp_template_var("jtnode_opt_maxnewsize")%> -Xloggc:<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx<%=scope.function_hdp_template_var("jtnode_heapsize")%> -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
-
-HADOOP_TASKTRACKER_OPTS="-server -Xmx<%=scope.function_hdp_template_var("ttnode_heapsize")%> -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
-HADOOP_DATANODE_OPTS="-Xmx<%=scope.function_hdp_template_var("dtnode_heapsize")%> -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
-HADOOP_BALANCER_OPTS="-server -Xmx<%=scope.function_hdp_template_var("hadoop_heapsize")%>m ${HADOOP_BALANCER_OPTS}"
-
-export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER/hs_err_pid%p.log -XX:NewSize=<%=scope.function_hdp_template_var("namenode_opt_newsize")%> -XX:MaxNewSize=<%=scope.function_hdp_template_var("namenode_opt_maxnewsize")%> -Xloggc:<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx<%=scope.function_hdp_template_var("namenode_heapsize")%> -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
-
-# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
-# On secure datanodes, user to run the datanode as after dropping privileges
-export HADOOP_SECURE_DN_USER=<%=scope.function_hdp_template_var("hdfs_user")%>
-
-# Extra ssh options.  Empty by default.
-export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
-
-# Where log files are stored.  $HADOOP_HOME/logs by default.
-export HADOOP_LOG_DIR=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER
-
-# History server logs
-export HADOOP_MAPRED_LOG_DIR=<%=scope.function_hdp_template_var("mapred_log_dir_prefix")%>/$USER
-
-# Where log files are stored in the secure data environment.
-export HADOOP_SECURE_DN_LOG_DIR=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$HADOOP_SECURE_DN_USER
-
-# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
-# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
-
-# host:path where hadoop code should be rsync'd from.  Unset by default.
-# export HADOOP_MASTER=master:/home/$USER/src/hadoop
-
-# Seconds to sleep between slave commands.  Unset by default.  This
-# can be useful in large clusters, where, e.g., slave rsyncs can
-# otherwise arrive faster than the master can service them.
-# export HADOOP_SLAVE_SLEEP=0.1
-
-# The directory where pid files are stored. /tmp by default.
-export HADOOP_PID_DIR=<%=scope.function_hdp_template_var("hadoop_pid_dir_prefix")%>/$USER
-export HADOOP_SECURE_DN_PID_DIR=<%=scope.function_hdp_template_var("hadoop_pid_dir_prefix")%>/$HADOOP_SECURE_DN_USER
-
-# History server pid
-export HADOOP_MAPRED_PID_DIR=<%=scope.function_hdp_template_var("mapred_pid_dir_prefix")%>/$USER
-
-YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
-
-# A string representing this instance of hadoop. $USER by default.
-export HADOOP_IDENT_STRING=$USER
-
-# The scheduling priority for daemon processes.  See 'man nice'.
-
-# export HADOOP_NICENESS=10
-
-# Use libraries from standard classpath
-JAVA_JDBC_LIBS=""
-#Add libraries required by mysql connector
-for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`
-do
-  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
-done
-#Add libraries required by oracle connector
-for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`
-do
-  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
-done
-#Add libraries required by nodemanager
-MAPREDUCE_LIBS=<%=scope.function_hdp_template_var("mapreduce_libs_path")%>
-export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
-
-# Setting path to hdfs command line
-export HADOOP_LIBEXEC_DIR=<%=scope.function_hdp_template_var("hadoop_libexec_dir")%>
-
-#Mostly required for hadoop 2.0
-export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties-GANGLIA.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties-GANGLIA.erb b/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties-GANGLIA.erb
deleted file mode 100644
index 65d9767..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties-GANGLIA.erb
+++ /dev/null
@@ -1,45 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# syntax: [prefix].[source|sink|jmx].[instance].[options]
-# See package.html for org.apache.hadoop.metrics2 for details
-
-<%if not scope.function_hdp_is_empty(ganglia_server_host)%>
-*.period=60
-
-*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
-*.sink.ganglia.period=10
-
-# default for supportsparse is false
-*.sink.ganglia.supportsparse=true
-
-.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
-.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
-
-# Hook up to the server
-namenode.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8661
-datanode.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-jobtracker.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8662
-tasktracker.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-maptask.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-reducetask.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-resourcemanager.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8664
-nodemanager.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-historyserver.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8666
-journalnode.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-
-resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
-
-<% end %>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties.erb b/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties.erb
deleted file mode 100644
index 65d9767..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties.erb
+++ /dev/null
@@ -1,45 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# syntax: [prefix].[source|sink|jmx].[instance].[options]
-# See package.html for org.apache.hadoop.metrics2 for details
-
-<%if not scope.function_hdp_is_empty(ganglia_server_host)%>
-*.period=60
-
-*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
-*.sink.ganglia.period=10
-
-# default for supportsparse is false
-*.sink.ganglia.supportsparse=true
-
-.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
-.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
-
-# Hook up to the server
-namenode.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8661
-datanode.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-jobtracker.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8662
-tasktracker.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-maptask.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-reducetask.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-resourcemanager.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8664
-nodemanager.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-historyserver.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8666
-journalnode.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-
-resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
-
-<% end %>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hdfs.conf.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hdfs.conf.erb b/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hdfs.conf.erb
deleted file mode 100644
index b2c3179..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hdfs.conf.erb
+++ /dev/null
@@ -1,17 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-<%=scope.function_hdp_template_var("hdfs_user")%>   - nofile 32768
-<%=scope.function_hdp_template_var("hdfs_user")%>   - nproc  65536

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/health_check-v2.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/health_check-v2.erb b/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/health_check-v2.erb
deleted file mode 100644
index cb7b12b..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/health_check-v2.erb
+++ /dev/null
@@ -1,91 +0,0 @@
-#!/bin/bash
-#
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-err=0;
-
-function check_disks {
-
-  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
-    fsdev=""
-    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
-    if [ -z "$fsdev" -a "$m" != "/mnt" ] ; then
-      msg_="$msg_ $m(u)"
-    else
-      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
-    fi
-  done
-
-  if [ -z "$msg_" ] ; then
-    echo "disks ok" ; exit 0
-  else
-    echo "$msg_" ; exit 2
-  fi
-
-}
-
-function check_link {
-  snmp=/usr/bin/snmpwalk
-  if [ -e $snmp ] ; then
-    $snmp -t 5 -Oe  -Oq  -Os -v 1 -c public localhost if | \
-    awk ' {
-      split($1,a,".") ;
-      if ( a[1] == "ifIndex" ) { ifIndex[a[2]] = $2 }
-      if ( a[1] == "ifDescr" ) { ifDescr[a[2]] = $2 }
-      if ( a[1] == "ifType" ) { ifType[a[2]] = $2 }
-      if ( a[1] == "ifSpeed" ) { ifSpeed[a[2]] = $2 }
-      if ( a[1] == "ifAdminStatus" ) { ifAdminStatus[a[2]] = $2 }
-      if ( a[1] == "ifOperStatus" ) { ifOperStatus[a[2]] = $2 }
-    }
-    END {
-      up=0;
-      for (i in ifIndex ) {
-      if ( ifType[i] == 6 && ifAdminStatus[i] == 1 && ifOperStatus[i] == 1 && ifSpeed[i] == 1000000000 ) {
-      up=i;
-      }
-      }
-      if ( up == 0 ) { print "check link" ; exit 2 }
-      else { print ifDescr[up],"ok" }
-    }'
-    exit $? ;
-  fi
-}
-
-# Run all checks
-# Disabled 'check_link' for now... 
-for check in disks ; do
-  msg=`check_${check}` ;
-  if [ $? -eq 0 ] ; then
-    ok_msg="$ok_msg$msg,"
-  else
-    err_msg="$err_msg$msg,"
-  fi
-done
-
-if [ ! -z "$err_msg" ] ; then
-  echo -n "ERROR $err_msg "
-fi
-if [ ! -z "$ok_msg" ] ; then
-  echo -n "OK: $ok_msg"
-fi
-
-echo
-
-# Success!
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/health_check.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/health_check.erb b/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/health_check.erb
deleted file mode 100644
index b84b336..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/health_check.erb
+++ /dev/null
@@ -1,118 +0,0 @@
-#!/bin/bash
-#
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-err=0;
-
-function check_disks {
-
-  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
-    fsdev=""
-    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
-    if [ -z "$fsdev" ] ; then
-      msg_="$msg_ $m(u)"
-    else
-      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
-    fi
-  done
-
-  if [ -z "$msg_" ] ; then
-    echo "disks ok" ; exit 0
-  else
-    echo "$msg_" ; exit 2
-  fi
-
-}
-
-function check_taskcontroller {
-  if [ "<%=scope.function_hdp_template_var("::hdp::params::security_enabled")%>" == "true" ]; then
-    perm=`stat -c %a:%U:%G <%=scope.function_hdp_template_var("task_bin_exe")%> 2>/dev/null`
-    if [ $? -eq 0 ] && [ "$perm" == "6050:root:hadoop" ] ; then
-      echo "taskcontroller ok"
-    else
-      echo 'check taskcontroller' ; exit 1
-    fi
-  fi
-}
-
-function check_jetty {
-  hname=`hostname`
-  jmx=`curl -s -S -m 5 "http://$hname:<%=scope.function_hdp_template_var("::hdp::tasktracker_port")%>/jmx?qry=Hadoop:service=TaskTracker,name=ShuffleServerMetrics" 2>/dev/null` ;
-  if [ $? -eq 0 ] ; then
-    e=`echo $jmx | awk '/shuffle_exceptions_caught/ {printf"%d",$2}'` ;
-    e=${e:-0} # no jmx servlet ?
-    if [ $e -gt 10 ] ; then
-      echo "check jetty: shuffle_exceptions=$e" ; exit 1
-    else
-      echo "jetty ok"
-    fi
-  else
-    echo "check jetty: ping failed" ; exit 1
-  fi
-}
-
-function check_link {
-  snmp=/usr/bin/snmpwalk
-  if [ -e $snmp ] ; then
-    $snmp -t 5 -Oe  -Oq  -Os -v 1 -c public localhost if | \
-    awk ' {
-      split($1,a,".") ;
-      if ( a[1] == "ifIndex" ) { ifIndex[a[2]] = $2 }
-      if ( a[1] == "ifDescr" ) { ifDescr[a[2]] = $2 }
-      if ( a[1] == "ifType" ) { ifType[a[2]] = $2 }
-      if ( a[1] == "ifSpeed" ) { ifSpeed[a[2]] = $2 }
-      if ( a[1] == "ifAdminStatus" ) { ifAdminStatus[a[2]] = $2 }
-      if ( a[1] == "ifOperStatus" ) { ifOperStatus[a[2]] = $2 }
-    }
-    END {
-      up=0;
-      for (i in ifIndex ) {
-      if ( ifType[i] == 6 && ifAdminStatus[i] == 1 && ifOperStatus[i] == 1 && ifSpeed[i] == 1000000000 ) {
-      up=i;
-      }
-      }
-      if ( up == 0 ) { print "check link" ; exit 2 }
-      else { print ifDescr[up],"ok" }
-    }'
-    exit $? ;
-  fi
-}
-
-# Run all checks
-# Disabled 'check_link' for now... 
-for check in disks taskcontroller jetty; do
-  msg=`check_${check}` ;
-  if [ $? -eq 0 ] ; then
-    ok_msg="$ok_msg$msg,"
-  else
-    err_msg="$err_msg$msg,"
-  fi
-done
-
-if [ ! -z "$err_msg" ] ; then
-  echo -n "ERROR $err_msg "
-fi
-if [ ! -z "$ok_msg" ] ; then
-  echo -n "OK: $ok_msg"
-fi
-
-echo
-
-# Success!
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/include_hosts_list.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/include_hosts_list.erb b/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/include_hosts_list.erb
deleted file mode 100644
index 5b519c6..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/include_hosts_list.erb
+++ /dev/null
@@ -1,3 +0,0 @@
-<% include_hosts_list.each do |val| -%>
-<%= val %>
-<% end -%>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/log4j.properties.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/log4j.properties.erb b/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/log4j.properties.erb
deleted file mode 100644
index 1458f1e..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/log4j.properties.erb
+++ /dev/null
@@ -1,227 +0,0 @@
-# Copyright 2011 The Apache Software Foundation
-# 
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Define some default values that can be overridden by system properties
-hadoop.root.logger=INFO,console
-hadoop.log.dir=.
-hadoop.log.file=hadoop.log
-
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hadoop.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshhold=ALL
-
-#
-# Daily Rolling File Appender
-#
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this 
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-
-#
-# TaskLog Appender
-#
-
-#Default values
-hadoop.tasklog.taskid=null
-hadoop.tasklog.iscleanup=false
-hadoop.tasklog.noKeepSplits=4
-hadoop.tasklog.totalLogFileSize=100
-hadoop.tasklog.purgeLogSplits=true
-hadoop.tasklog.logsRetainHours=12
-
-log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
-log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
-log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
-log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
-
-log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
-log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-
-#
-#Security audit appender
-#
-hadoop.security.logger=INFO,console
-hadoop.security.log.maxfilesize=256MB
-hadoop.security.log.maxbackupindex=20
-log4j.category.SecurityLogger=${hadoop.security.logger}
-hadoop.security.log.file=SecurityAuth.audit
-log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender 
-log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
-log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
-log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
-
-log4j.appender.RFAS=org.apache.log4j.RollingFileAppender 
-log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
-log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
-log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
-log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
-
-#
-# hdfs audit logging
-#
-hdfs.audit.logger=INFO,console
-log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
-log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
-log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
-log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
-log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
-
-#
-# mapred audit logging
-#
-mapred.audit.logger=INFO,console
-log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
-log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
-log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
-log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
-log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
-
-#
-# Rolling File Appender
-#
-
-log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Logfile size and and 30-day backups
-log4j.appender.RFA.MaxFileSize=256MB
-log4j.appender.RFA.MaxBackupIndex=10
-
-log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
-log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-# Custom Logging levels
-
-hadoop.metrics.log.level=INFO
-#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
-#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
-#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
-log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
-
-# Jets3t library
-log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
-
-#
-# Null Appender
-# Trap security logger on the hadoop client side
-#
-log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
-
-<% if (scope.function_hdp_template_var("::hdp::params::is_jtnode_master") || scope.function_hdp_template_var("::hdp::params::is_rmnode_master"))%>
-#
-# Job Summary Appender 
-#
-# Use following logger to send summary to separate file defined by 
-# hadoop.mapreduce.jobsummary.log.file rolled daily:
-# hadoop.mapreduce.jobsummary.logger=INFO,JSA
-# 
-hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
-hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
-log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
-<% if (scope.function_hdp_get_major_stack_version([scope.function_hdp_template_var("stack_version")]) >= 2)%>
-# Set the ResourceManager summary log filename
-yarn.server.resourcemanager.appsummary.log.file=hadoop-mapreduce.jobsummary.log
-# Set the ResourceManager summary log level and appender
-yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}
-#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
-
-# To enable AppSummaryLogging for the RM,
-# set yarn.server.resourcemanager.appsummary.logger to
-# <LEVEL>,RMSUMMARY in hadoop-env.sh
-
-# Appender for ResourceManager Application Summary Log
-# Requires the following properties to be set
-#    - hadoop.log.dir (Hadoop Log directory)
-#    - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
-#    - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
-log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
-log4j.appender.RMSUMMARY.File=<%=scope.function_hdp_template_var("yarn_log_dir_prefix")%>/<%=scope.function_hdp_template_var("yarn_user")%>/${yarn.server.resourcemanager.appsummary.log.file}
-log4j.appender.RMSUMMARY.MaxFileSize=256MB
-log4j.appender.RMSUMMARY.MaxBackupIndex=20
-log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
-log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-<% else %>
-log4j.appender.JSA.File=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/<%=scope.function_hdp_template_var("mapred_user")%>/${hadoop.mapreduce.jobsummary.log.file}
-<%end-%>
-log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
-log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-log4j.appender.JSA.DatePattern=.yyyy-MM-dd
-log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
-<% if (scope.function_hdp_get_major_stack_version([scope.function_hdp_template_var("stack_version")]) >= 2)%>
-log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
-log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
-<% else %>
-log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
-log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
-<%end-%>
-<%end-%>
-
-<%=scope.function_hdp_template_var("rca_prefix")%>ambari.jobhistory.database=<%=scope.function_hdp_host("ambari_db_rca_url")%>
-<%=scope.function_hdp_template_var("rca_prefix")%>ambari.jobhistory.driver=<%=scope.function_hdp_host("ambari_db_rca_driver")%>
-<%=scope.function_hdp_template_var("rca_prefix")%>ambari.jobhistory.user=<%=scope.function_hdp_host("ambari_db_rca_username")%>
-<%=scope.function_hdp_template_var("rca_prefix")%>ambari.jobhistory.password=<%=scope.function_hdp_host("ambari_db_rca_password")%>
-<%=scope.function_hdp_template_var("rca_prefix")%>ambari.jobhistory.logger=DEBUG,JHA
-
-<%=scope.function_hdp_template_var("rca_prefix")%>log4j.appender.JHA=org.apache.ambari.log4j.hadoop.mapreduce.jobhistory.JobHistoryAppender
-<%=scope.function_hdp_template_var("rca_prefix")%>log4j.appender.JHA.database=${ambari.jobhistory.database}
-<%=scope.function_hdp_template_var("rca_prefix")%>log4j.appender.JHA.driver=${ambari.jobhistory.driver}
-<%=scope.function_hdp_template_var("rca_prefix")%>log4j.appender.JHA.user=${ambari.jobhistory.user}
-<%=scope.function_hdp_template_var("rca_prefix")%>log4j.appender.JHA.password=${ambari.jobhistory.password}
-
-<%=scope.function_hdp_template_var("rca_prefix")%>log4j.logger.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger=${ambari.jobhistory.logger}
-<%=scope.function_hdp_template_var("rca_prefix")%>log4j.additivity.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger=true

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/slaves.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/slaves.erb b/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/slaves.erb
deleted file mode 100644
index 3cd38b3..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/slaves.erb
+++ /dev/null
@@ -1,3 +0,0 @@
-<%h=scope.function_hdp_host("slave_hosts"); (h.kind_of?(Array) ? h : []).each do |host|-%>
-<%= host %>
-<%end-%>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/taskcontroller.cfg.erb
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/taskcontroller.cfg.erb b/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/taskcontroller.cfg.erb
deleted file mode 100644
index 78fd75e..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/taskcontroller.cfg.erb
+++ /dev/null
@@ -1,20 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-mapred.local.dir=<%=scope.function_hdp_template_var("mapred_local_dir")%>
-mapreduce.tasktracker.group=<%=scope.function_hdp_template_var("mapred_tt_group")%>
-hadoop.log.dir=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/<%=scope.function_hdp_template_var("mapred_user")%>

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hbase/files/hbaseSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hbase/files/hbaseSmoke.sh b/ambari-agent/src/main/puppet/modules/hdp-hbase/files/hbaseSmoke.sh
deleted file mode 100644
index 0003188..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hbase/files/hbaseSmoke.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-disable 'ambarismoketest'
-drop 'ambarismoketest'
-create 'ambarismoketest','family'
-put 'ambarismoketest','row01','family:col01','value1'
-scan 'ambarismoketest'
-exit

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hbase/files/hbaseSmokeVerify.sh
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hbase/files/hbaseSmokeVerify.sh b/ambari-agent/src/main/puppet/modules/hdp-hbase/files/hbaseSmokeVerify.sh
deleted file mode 100644
index 39fe6e5..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hbase/files/hbaseSmokeVerify.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-conf_dir=$1
-data=$2
-echo "scan 'ambarismoketest'" | hbase --config $conf_dir shell > /tmp/hbase_chk_verify
-cat /tmp/hbase_chk_verify
-echo "Looking for $data"
-grep -q $data /tmp/hbase_chk_verify
-if [ "$?" -ne 0 ]
-then
-  exit 1
-fi
-
-grep -q '1 row(s)' /tmp/hbase_chk_verify
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/client.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/client.pp b/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/client.pp
deleted file mode 100644
index b0931df..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/client.pp
+++ /dev/null
@@ -1,51 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hbase::client(
-  $service_state = $hdp::params::cluster_client_state,
-  $opts = {}
-)
-{
-  include hdp-hbase::params
-  $hbase_tmp_dir = $hdp-hbase::params::hbase_tmp_dir
-
-  #assumption is there are no other hbase components on node
-  if ($service_state == 'no_op') {
-  } elsif ($service_state in ['installed_and_configured','uninstalled']) {
-    if (($hdp::params::service_exists['hdp-hbase::master'] != true) and ($hdp::params::service_exists['hdp-hbase::regionserver'] != true)) {
-      #adds package, users, directories, and common configs
-      class { 'hdp-hbase': 
-        type          => 'client',
-        service_state => $service_state
-      }
-
-      hdp::directory_recursive_create_ignore_failure { "${hbase_tmp_dir}/local/jars":
-        owner => $hdp-hbase::params::hbase_user,
-        context_tag => 'hbase_client',
-        service_state => $service_state,
-        force => true
-      }
-
-      Class[ 'hdp-hbase' ] -> Hdp::Directory_recursive_create_ignore_failure<||>
-    }
-  } else {
-    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
-  }
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/hbase/service_check.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/hbase/service_check.pp b/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/hbase/service_check.pp
deleted file mode 100644
index 6bad593..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/hbase/service_check.pp
+++ /dev/null
@@ -1,113 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hbase::hbase::service_check() inherits hdp-hbase::params
-{
-  $smoke_test_user = $hdp::params::smokeuser
-  $security_enabled = $hdp::params::security_enabled
-  $conf_dir = $hdp::params::hbase_conf_dir
-  $smoke_user_keytab = $hdp::params::smokeuser_keytab
-  $hbase_user = $hdp-hbase::params::hbase_user
-  $hbase_keytab = $hdp::params::hbase_user_keytab
-  $serviceCheckData = hdp_unique_id_and_date()
-  $kinit_cmd = "${hdp::params::kinit_path_local} -kt ${smoke_user_keytab} ${smoke_test_user};"
-
-  anchor { 'hdp-hbase::hbase::service_check::begin':}
-
-  if (hdp_get_major_stack_version($hdp::params::stack_version) >= 2){
-    $output_file = "${hbase_hdfs_root_dir}/data/default/ambarismoketest"
-  } else {
-    $output_file = "${hbase_hdfs_root_dir}/ambarismoketest"
-  }
-
-  $test_cmd = "fs -test -e ${output_file}"
-
-  $hbase_servicecheck_file = '/tmp/hbase-smoke.sh'
-
-  file { '/tmp/hbaseSmokeVerify.sh':
-    ensure => present,
-    source => "puppet:///modules/hdp-hbase/hbaseSmokeVerify.sh",
-    mode => '0755',
-  }
-
-  file { $hbase_servicecheck_file:
-    mode => '0755',
-    content => template('hdp-hbase/hbase-smoke.sh.erb'),
-  }
-  if ($security_enabled == true) {
-    $servicecheckcmd = "su - ${smoke_test_user} -c '$kinit_cmd hbase --config $conf_dir  shell $hbase_servicecheck_file'"
-    $smokeverifycmd = "su - ${smoke_test_user} -c '$kinit_cmd /tmp/hbaseSmokeVerify.sh $conf_dir ${serviceCheckData}'"
-  } else {
-    $servicecheckcmd = "su - ${smoke_test_user} -c 'hbase --config $conf_dir  shell $hbase_servicecheck_file'"
-    $smokeverifycmd = "su - ${smoke_test_user} -c '/tmp/hbaseSmokeVerify.sh $conf_dir ${serviceCheckData}'"
-  }
-
-  exec { $hbase_servicecheck_file:
-    command   => $servicecheckcmd,
-    tries     => 3,
-    try_sleep => 5,
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    logoutput => "true"
-  }
-
-  exec { '/tmp/hbaseSmokeVerify.sh':
-    command   => $smokeverifycmd,
-    tries     => 3,
-    try_sleep => 5,
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    notify    => Hdp-hadoop::Exec-hadoop['hbase::service_check::test'],
-    logoutput => "true"
-  }
-
-  hdp-hadoop::exec-hadoop { 'hbase::service_check::test':
-    command     => $test_cmd,
-    refreshonly => true,
-    require     => Exec['/tmp/hbaseSmokeVerify.sh'],
-    before      => Anchor['hdp-hbase::hbase::service_check::end'] #TODO: remove after testing
-  }
-
-  if ($security_enabled == true) {
-    $hbase_grant_premissions_file = '/tmp/hbase_grant_permissions.sh'
-    $hbase_kinit_cmd = "${hdp::params::kinit_path_local} -kt ${hbase_keytab} ${hbase_user};"
-    $grantprivelegecmd = "$hbase_kinit_cmd hbase shell ${hbase_grant_premissions_file}"
-
-    file { $hbase_grant_premissions_file:
-      owner   => $hbase_user,
-      group   => $hdp::params::user_group,
-      mode => '0644',
-      content => template('hdp-hbase/hbase_grant_permissions.erb')
-      }
-      hdp-hadoop::exec-hadoop { '${smokeuser}_grant_privileges' :
-        command => $grantprivelegecmd,
-        require => File[$hbase_grant_premissions_file],
-        user => $hbase_user
-      }
-     Anchor['hdp-hbase::hbase::service_check::begin'] ->  File['/tmp/hbaseSmokeVerify.sh']
-       File[$hbase_servicecheck_file] ->  File[$hbase_grant_premissions_file] ->
-       Hdp-hadoop::Exec-hadoop['${smokeuser}_grant_privileges'] ->
-       Exec[$hbase_servicecheck_file] ->
-       Exec['/tmp/hbaseSmokeVerify.sh'] -> Anchor['hdp-hbase::hbase::service_check::end']
-  } else {
-    Anchor['hdp-hbase::hbase::service_check::begin'] ->  File['/tmp/hbaseSmokeVerify.sh']
-    File[$hbase_servicecheck_file] -> Exec[$hbase_servicecheck_file] -> Exec['/tmp/hbaseSmokeVerify.sh']
-    -> Anchor['hdp-hbase::hbase::service_check::end']
-  }
-  anchor{ 'hdp-hbase::hbase::service_check::end':}
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/3d1171b0/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/init.pp
----------------------------------------------------------------------
diff --git a/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/init.pp b/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/init.pp
deleted file mode 100644
index 384fff5..0000000
--- a/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/init.pp
+++ /dev/null
@@ -1,155 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-class hdp-hbase(
-  $type,
-  $service_state) 
-{
-  include hdp-hbase::params
- 
-  $hbase_user = $hdp-hbase::params::hbase_user
-  $config_dir = $hdp-hbase::params::conf_dir
-  
-  $hdp::params::component_exists['hdp-hbase'] = true
-  $smokeuser = $hdp::params::smokeuser
-  $security_enabled = $hdp::params::security_enabled
-
-  #Configs generation  
-
-  if has_key($configuration, 'hbase-site') {
-    configgenerator::configfile{'hbase-site': 
-      modulespath => $hdp-hbase::params::conf_dir,
-      filename => 'hbase-site.xml',
-      module => 'hdp-hbase',
-      configuration => $configuration['hbase-site'],
-      owner => $hbase_user,
-      group => $hdp::params::user_group
-    }
-  } else { # Manually overriding ownership of file installed by hadoop package
-    file { "${hdp-hbase::params::conf_dir}/hbase-site.xml":
-      owner => $hbase_user,
-      group => $hdp::params::user_group
-    }
-  }
-
-  if has_key($configuration, 'hdfs-site') {
-    configgenerator::configfile{'hdfs-site':
-      modulespath => $hdp-hbase::params::conf_dir,
-      filename => 'hdfs-site.xml',
-      module => 'hdp-hbase',
-      configuration => $configuration['hdfs-site'],
-      owner => $hbase_user,
-      group => $hdp::params::user_group
-    }
-  } else { # Manually overriding ownership of file installed by hadoop package
-    file { "${hdp-hbase::params::conf_dir}/hdfs-site.xml":
-      owner => $hbase_user,
-      group => $hdp::params::user_group
-    }
-  }
-
-  if has_key($configuration, 'hbase-policy') {
-    configgenerator::configfile{'hbase-policy': 
-      modulespath => $hdp-hbase::params::conf_dir,
-      filename => 'hbase-policy.xml',
-      module => 'hdp-hbase',
-      configuration => $configuration['hbase-policy'],
-      owner => $hbase_user,
-      group => $hdp::params::user_group
-    }
-  } else { # Manually overriding ownership of file installed by hadoop package
-    file { "${hdp-hbase::params::conf_dir}/hbase-policy.xml":
-      owner => $hbase_user,
-      group => $hdp::params::user_group
-    }
-  }
-
-  anchor{'hdp-hbase::begin':}
-  anchor{'hdp-hbase::end':}
-
-  if ($service_state == 'uninstalled') {
-    hdp::package { 'hbase':
-      ensure => 'uninstalled'
-    }
-    hdp::directory { $config_dir:
-      service_state => $service_state,
-      force => true
-    }
-
-    Anchor['hdp-hbase::begin'] -> Hdp::Package['hbase'] -> Hdp::Directory[$config_dir] -> Anchor['hdp-hbase::end']
-
-  } else {  
-    hdp::package { 'hbase': }
-  
-    hdp::directory { $config_dir: 
-      service_state => $service_state,
-      force => true,
-      owner => $hbase_user,
-      group => $hdp::params::user_group,
-      override_owner => true
-    }
-
-   hdp-hbase::configfile { ['hbase-env.sh',  $hdp-hbase::params::metric-prop-file-name ]: 
-      type => $type
-    }
-
-    hdp-hbase::configfile { 'regionservers':}
-
-    if ($security_enabled == true) {
-      if ($type == 'master' and $service_state == 'running') {
-        hdp-hbase::configfile { 'hbase_master_jaas.conf' : }
-      } elsif ($type == 'regionserver' and $service_state == 'running') {
-        hdp-hbase::configfile { 'hbase_regionserver_jaas.conf' : }
-      } elsif ($type == 'client') {
-        hdp-hbase::configfile { 'hbase_client_jaas.conf' : }
-      }
-    }
-    Anchor['hdp-hbase::begin'] -> Hdp::Package['hbase'] -> Hdp::Directory[$config_dir] ->
-    Hdp-hbase::Configfile<||> ->  Anchor['hdp-hbase::end']
-  }
-}
-
-### config files
-define hdp-hbase::configfile(
-  $mode = undef,
-  $hbase_master_hosts = undef,
-  $template_tag = undef,
-  $type = undef,
-  $conf_dir = $hdp-hbase::params::conf_dir
-) 
-{
-  if ($name == $hdp-hbase::params::metric-prop-file-name) {
-    if ($type == 'master') {
-      $tag = GANGLIA-MASTER
-    } else {
-      $tag = GANGLIA-RS
-    }
-  } else {
-    $tag = $template_tag
-  }
-
-  hdp::configfile { "${conf_dir}/${name}":
-    component         => 'hbase',
-    owner             => $hdp-hbase::params::hbase_user,
-    mode              => $mode,
-    hbase_master_hosts => $hbase_master_hosts,
-    template_tag      => $tag
-  }
-}