You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ma...@apache.org on 2012/10/15 07:55:57 UTC

svn commit: r1398196 [4/11] - in /incubator/ambari/branches/branch-0.9-h2-dev: ./ hmc/js/ hmc/package/rpm/ hmc/php/conf/ hmc/php/db/ hmc/php/frontend/ hmc/php/orchestrator/ hmc/php/puppet/ hmc/php/puppet/genmanifest/ hmc/php/util/ hmc/puppet/modules/hd...

Added: incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/hdfs/copyfromlocal.pp
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/hdfs/copyfromlocal.pp?rev=1398196&view=auto
==============================================================================
--- incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/hdfs/copyfromlocal.pp (added)
+++ incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/hdfs/copyfromlocal.pp Mon Oct 15 05:55:46 2012
@@ -0,0 +1,74 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+define hdp2-hadoop::hdfs::copyfromlocal(
+  $service_state,
+  $owner = unset,
+  $group = unset,
+  $recursive_chown = false,
+  $mode = undef,
+  $recursive_chmod = false,
+  $dest_dir = undef 
+) 
+{
+ 
+  if ($service_state == 'running') {
+    $copy_cmd = "fs -copyFromLocal ${name} ${dest_dir}"
+    hdp2-hadoop::exec-hadoop { $copy_cmd:
+      command => $copy_cmd,
+      unless => "hadoop fs -ls ${dest_dir} >/dev/null 2>&1"
+    }
+    if ($owner == unset) {
+      $chown = ""
+    } else {
+      if ($group == unset) {
+        $chown = $owner
+      } else {
+        $chown = "${owner}:${group}"
+     } 
+    }  
+ 
+    if (chown != "") {
+      #TODO: see if there is a good 'unless test'
+      if ($recursive_chown == true) {
+        $chown_cmd = "fs -chown -R ${chown} ${dest_dir}"
+      } else {
+        $chown_cmd = "fs -chown ${chown} ${dest_dir}"
+      }
+      hdp2-hadoop::exec-hadoop {$chown_cmd :
+        command => $chown_cmd
+      }
+      Hdp2-hadoop::Exec-hadoop[$copy_cmd] -> Hdp2-hadoop::Exec-hadoop[$chown_cmd]
+    }
+  
+    if ($mode != undef) {
+      #TODO: see if there is a good 'unless test'
+      if ($recursive_mode == true) {
+        $chmod_cmd = "fs -chmod -R ${mode} ${dest_dir}"
+      } else {
+        $chmod_cmd = "fs -chmod ${mode} ${dest_dir}"
+      }
+      hdp2-hadoop::exec-hadoop {$chmod_cmd :
+        command => $chmod_cmd
+      }
+      Hdp2-hadoop::Exec-hadoop[$copy_cmd] -> Hdp2-hadoop::Exec-hadoop[$chmod_cmd]
+    }
+  }       
+}

Added: incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/hdfs/directory.pp
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/hdfs/directory.pp?rev=1398196&view=auto
==============================================================================
--- incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/hdfs/directory.pp (added)
+++ incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/hdfs/directory.pp Mon Oct 15 05:55:46 2012
@@ -0,0 +1,74 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#TODO: unset should br changed to undef; just to be consistent
+define hdp2-hadoop::hdfs::directory(
+  $service_state = 'running',
+  $owner = unset,
+  $group = unset,
+  $recursive_chown = false,
+  $mode = undef,
+  $recursive_chmod = false
+) 
+{
+ 
+  if ($service_state == 'running') {
+    $mkdir_cmd = "fs -mkdir -p ${name}"
+    hdp2-hadoop::exec-hadoop { $mkdir_cmd:
+      command => $mkdir_cmd,
+      unless => "hadoop fs -ls ${name} >/dev/null 2>&1"
+    }
+    if ($owner == unset) {
+      $chown = ""
+    } else {
+      if ($group == unset) {
+        $chown = $owner
+      } else {
+        $chown = "${owner}:${group}"
+     } 
+    }  
+ 
+    if (chown != "") {
+      #TODO: see if there is a good 'unless test'
+      if ($recursive_chown == true) {
+        $chown_cmd = "fs -chown -R ${chown} ${name}"
+      } else {
+        $chown_cmd = "fs -chown ${chown} ${name}"
+      }
+      hdp2-hadoop::exec-hadoop {$chown_cmd :
+        command => $chown_cmd
+      }
+      Hdp2-hadoop::Exec-hadoop[$mkdir_cmd] -> Hdp2-hadoop::Exec-hadoop[$chown_cmd]
+    }
+  
+    if ($mode != undef) {
+      #TODO: see if there is a good 'unless test'
+      if ($recursive_mode == true) {
+        $chmod_cmd = "fs -chmod -R ${mode} ${name}"
+      } else {
+        $chmod_cmd = "fs -chmod ${mode} ${name}"
+      }
+      hdp2-hadoop::exec-hadoop {$chmod_cmd :
+        command => $chmod_cmd
+      }
+      Hdp2-hadoop::Exec-hadoop[$mkdir_cmd] -> Hdp2-hadoop::Exec-hadoop[$chmod_cmd]
+    }
+  }       
+}

Added: incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/hdfs/service_check.pp
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/hdfs/service_check.pp?rev=1398196&view=auto
==============================================================================
--- incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/hdfs/service_check.pp (added)
+++ incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/hdfs/service_check.pp Mon Oct 15 05:55:46 2012
@@ -0,0 +1,94 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-hadoop::hdfs::service_check()
+{
+  $unique = hdp_unique_id_and_date()
+  $dir = '/tmp'
+  $tmp_file = "${dir}/${unique}"
+  $user_dir='/user'
+  $apps_dir='/apps'
+
+  $safemode_command = "dfsadmin -safemode get | grep OFF"
+
+  $create_dir_cmd = "fs -mkdir ${dir} ; hadoop fs -chmod -R 777 ${dir} ; hadoop fs -mkdir ${user_dir} ; hadoop fs -chmod 755 ${user_dir} ; hadoop fs -mkdir ${apps_dir} ; hadoop fs -chmod 755 ${apps_dir} ;"
+  $test_dir_exists = "hadoop fs -test -e ${dir}" #TODO: may fix up fact that test needs explicit hadoop while omamnd does not
+  $cleanup_cmd = "fs -rm ${tmp_file}"
+  #cleanup put below to handle retries; if retrying there wil be a stale file that needs cleanup; exit code is fn of second command
+  $create_file_cmd = "${cleanup_cmd}; hadoop fs -put /etc/passwd ${tmp_file}" #TODO: inconsistent that second comamnd needs hadoop
+  $test_cmd = "fs -test -e ${tmp_file}"
+
+  anchor { 'hdp2-hadoop::hdfs::service_check::begin':}
+
+  hdp2-hadoop::exec-hadoop { 'hdfs::service_check::check_safemode':
+    command   => $safemode_command,
+    tries     => 40,
+    try_sleep => 15,
+    logoutput => true,
+    require   => Anchor['hdp2-hadoop::hdfs::service_check::begin']
+  }
+
+  hdp2-hadoop::exec-hadoop { 'hdfs::service_check::create_dir':
+    command   => $create_dir_cmd,
+    unless    => $test_dir_exists,
+    tries     => 3,
+    try_sleep => 5,
+    require   => Hdp2-hadoop::Exec-hadoop['hdfs::service_check::check_safemode']
+  }
+
+  hdp2-hadoop::exec-hadoop { 'hdfs::service_check::create_file':
+    command   => $create_file_cmd,
+    tries     => 3,
+    try_sleep => 5,
+    require   => Hdp2-hadoop::Exec-hadoop['hdfs::service_check::create_dir'],
+    notify    => Hdp2-hadoop::Exec-hadoop['hdfs::service_check::test']
+  }
+
+  hdp2-hadoop::exec-hadoop { 'hdfs::service_check::test':
+    command     => $test_cmd,
+    refreshonly => true,
+    require     => Hdp2-hadoop::Exec-hadoop['hdfs::service_check::create_file'],
+    #notify      => Hdp2-hadoop::Exec-hadoop['hdfs::service_check::cleanup']  #TODO: put in after testing
+    before      => Anchor['hdp2-hadoop::hdfs::service_check::end'] #TODO: remove after testing
+  }
+
+   #TODO: put in after testing
+ #  hdp2-hadoop::exec-hadoop { 'hdfs::service_check::cleanup':
+ #   command     => $cleanup_cmd,
+ #   refreshonly => true,
+ #   require     => Hdp2-hadoop::Exec-hadoop['hdfs::service_check::test'],
+ #   before      => Anchor['hdp2-hadoop::hdfs::service_check::end']
+  #}
+  anchor{ 'hdp2-hadoop::hdfs::service_check::end':}
+
+  class { 'hdp2-hadoop::hdfs-directories' :
+    service_state => running  }
+}
+
+class hdp2-hadoop::hdfs-directories($service_state)
+{
+  $smoke_test_user = $hdp2::params::smokeuser
+  hdp2-hadoop::hdfs::directory{ "/user/${smoke_test_user}":
+    service_state => $service_state,
+    owner => $smoke_test_user,
+    mode  => '770',
+    recursive_chmod => true
+  }
+}

Added: incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/init.pp
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/init.pp?rev=1398196&view=auto
==============================================================================
--- incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/init.pp (added)
+++ incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/init.pp Mon Oct 15 05:55:46 2012
@@ -0,0 +1,194 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#singleton for use with <||> form so that namenode, datanode, etc can pass state to hdp2-hadoop and still use include
+define hdp2-hadoop::common(
+  $service_states = []
+)
+{
+  class { 'hdp2-hadoop':
+    service_states => $service_states    
+  }
+  anchor{'hdp2-hadoop::common::begin':} -> Class['hdp2-hadoop'] -> anchor{'hdp2-hadoop::common::end':} 
+}
+
+class hdp2-hadoop::initialize()
+{
+  if ($hdp2::params::component_exists['hdp2-hadoop'] == true) {
+  } else {
+    $hdp2::params::component_exists['hdp2-hadoop'] = true
+  }
+  hdp2-hadoop::common { 'common':}
+  anchor{'hdp2-hadoop::initialize::begin':} -> Hdp2-hadoop::Common['common'] -> anchor{'hdp2-hadoop::initialize::end':}
+}
+
+class hdp2-hadoop(
+  $service_states  = []
+)
+{
+  include hdp2-hadoop::params
+  $hadoop_config_dir = $hdp2-hadoop::params::conf_dir
+  $hdfs_user = $hdp2-hadoop::params::hdfs_user  
+  $yarn_user = $hdp2-hadoop::params::yarn_user  
+  $mapred_user = $hdp2-hadoop::params::mapred_user  
+
+  anchor{'hdp2-hadoop::begin':} 
+  anchor{'hdp2-hadoop::end':} 
+
+  if ('uninstalled' in $service_states) {
+    hdp2-hadoop::package { 'hadoop':
+      ensure => 'uninstalled'
+    }
+
+    hdp2::directory_recursive_create { $hadoop_config_dir:
+      service_state => $service_state,
+      force => true
+    }
+
+    Anchor['hdp2-hadoop::begin'] -> Hdp2-hadoop::Package<||> -> Hdp2::Directory_recursive_create[$hadoop_config_dir] -> Anchor['hdp2-hadoop::end']
+  } else {
+    
+    hdp2-hadoop::package { 'hadoop':}
+
+
+    hdp2::directory_recursive_create { $hadoop_config_dir:
+      service_state => $service_state,
+      force => true
+    }
+ 
+    hdp2::user { $hdfs_user:}
+    hdp2::user { $yarn_user:}
+    hdp2::user { $mapred_user:}
+
+    $logdirprefix = $hdp2-hadoop::params::hadoop_logdirprefix
+    hdp2::directory_recursive_create { $logdirprefix: 
+        owner => 'root'
+    }
+    $piddirprefix = $hdp2-hadoop::params::hadoop_piddirprefix
+    hdp2::directory_recursive_create { $piddirprefix: 
+        owner => 'root'
+    }
+ 
+    $common_hdfs_template_files = ['hadoop-env.sh','core-site.xml','hdfs-site.xml','hadoop-policy.xml','health_check','hadoop-metrics2.properties','commons-logging.properties','log4j.properties','slaves']
+    hdp2-hadoop::configfile { $common_hdfs_template_files:
+      tag   => 'common', 
+      owner => $hdfs_user
+    }
+    
+    $yarn_template_files = ['yarn-site.xml','yarn-env.sh','container-executor.cfg','capacity-scheduler.xml']
+    hdp2-hadoop::configfile { $yarn_template_files: 
+      tag => 'common', 
+      owner => $yarn_user
+    }
+
+    hdp2-hadoop::configfile { 'mapred-site.xml': 
+      tag => 'common', 
+      owner => $mapred_user
+    }
+
+    Anchor['hdp2-hadoop::begin'] -> Hdp2-hadoop::Package<||> ->  Hdp2::Directory_recursive_create[$hadoop_config_dir] ->  Hdp2::User<|title == $hdfs_user or title == $yarn_user or title == $mapred_user|> 
+    -> Hdp2-hadoop::Configfile<|tag == 'common'|> -> Anchor['hdp2-hadoop::end']
+    Anchor['hdp2-hadoop::begin'] -> Hdp2::Directory_recursive_create[$logdirprefix] -> Anchor['hdp2-hadoop::end']
+    Anchor['hdp2-hadoop::begin'] -> Hdp2::Directory_recursive_create[$piddirprefix] -> Anchor['hdp2-hadoop::end']
+  }
+}
+
+class hdp2-hadoop::enable-ganglia()
+{
+  Hdp2-hadoop::Configfile<|title  == 'hadoop-metrics2.properties'|>{template_tag => 'GANGLIA'}
+}
+
+###config file helper
+define hdp2-hadoop::configfile(
+  $owner = undef,
+  $hadoop_conf_dir = $hdp2-hadoop::params::conf_dir,
+  $mode = undef,
+  $namenode_host = undef,
+  $yarn_rm_host = undef,
+  $snamenode_host = undef,
+  $template_tag = undef,
+  $size = undef, #TODO: deprecate
+  $sizes = []
+) 
+{
+  #TODO: may need to be fixed 
+  if ($yarn_rm_host == undef) {
+    $calc_yarn_rm_host = $namenode_host
+  } else {
+    $calc_yarn_rm_host = $yarn_rm_host 
+  }
+ 
+  #only set 32 if theer is a 32 bit component and no 64 bit components
+  if (64 in $sizes) {
+    $common_size = 64
+  } elsif (32 in $sizes) {
+    $common_size = 32
+  } else {
+    $common_size = 6
+  }
+  
+  hdp2::configfile { "${hadoop_conf_dir}/${name}":
+    component      => 'hadoop',
+    owner          => $owner,
+    mode           => $mode,
+    namenode_host  => $namenode_host,
+    snamenode_host => $snamenode_host,
+    yarn_rm_host   => $calc_yarn_rm_host,
+    template_tag   => $template_tag,
+    size           => $common_size
+  }
+}
+
+#####
+define hdp2-hadoop::exec-hadoop(
+  $command,
+  $unless = undef,
+  $refreshonly = undef,
+  $echo_yes = false,
+  $tries = 1,
+  $timeout = 900,
+  $try_sleep = undef,
+  $user = undef,
+  $logoutput = undef
+)
+{
+  include hdp2-hadoop::params
+  $conf_dir = $hdp2-hadoop::params::conf_dir
+  if ($echo_yes == true) {
+    $cmd = "yes Y | hadoop --config ${conf_dir} ${command}"
+  } else {
+    $cmd = "hadoop --config ${conf_dir} ${command}"     
+  }
+  if ($user == undef) {
+   $run_user = $hdp2-hadoop::params::hdfs_user
+  } else {
+    $run_user = $user
+  }
+  hdp2::exec { $cmd:
+    command     => $cmd,
+    user        => $run_user,
+    unless      => $unless,
+    refreshonly => $refreshonly,
+    tries       => $tries,
+    timeout     => $timeout,
+    try_sleep   => $try_sleep,
+    logoutput   => $logoutput
+  }
+}

Added: incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/mapred/service_check.pp
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/mapred/service_check.pp?rev=1398196&view=auto
==============================================================================
--- incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/mapred/service_check.pp (added)
+++ incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/mapred/service_check.pp Mon Oct 15 05:55:46 2012
@@ -0,0 +1,75 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-hadoop::mapred::service_check() 
+{
+  $smoke_test_user = $hdp2::params::smokeuser
+  $mapred_examples_jar = $hdp2::params::mapred_examples_jar
+  $input_file = 'mapredsmokeinput'
+  $output_file = "mapredsmokeoutput"
+
+  $cleanup_cmd = "dfs -rmr ${output_file} ${input_file}"
+  #cleanup put below to handle retries; if retrying there wil be a stale file that needs cleanup; exit code is fn of second command
+  $create_file_cmd = "$cleanup_cmd ; hadoop dfs -put /etc/passwd ${input_file} " #TODO: inconsistent that second comamnd needs hadoop
+  $test_cmd = "fs -test -e ${output_file}" 
+  $run_wordcount_job = "jar ${mapred_examples_jar}  wordcount ${input_file} ${output_file}"
+  
+  anchor { 'hdp2-hadoop::mapred::service_check::begin':}
+
+  hdp2-hadoop::exec-hadoop { 'mapred::service_check::create_file':
+    command   => $create_file_cmd,
+    tries     => 1,
+    try_sleep => 5,
+    require   => Anchor['hdp2-hadoop::mapred::service_check::begin'],
+  #  notify    => Hdp2-hadoop::Exec-hadoop['mapred::service_check::run_wordcount'],
+    user      => $smoke_test_user
+  }
+
+  hdp2-hadoop::exec-hadoop { 'mapred::service_check::run_wordcount':
+    command   => $run_wordcount_job,
+    tries     => 1,
+    try_sleep => 5,
+    require   => Hdp2-hadoop::Exec-hadoop['mapred::service_check::create_file'],
+    notify    => Hdp2-hadoop::Exec-hadoop['mapred::service_check::test'],
+    user      => $smoke_test_user,
+    logoutput => "true"
+  }
+
+#  exec { 'runjob':
+#    command   => "hadoop jar ${jar_location}/hadoop-examples.jar  wordcount ${input_file} ${output_file}",
+#    tries     => 1,
+#    try_sleep => 5,
+#    require   => Hdp2-hadoop::Exec-hadoop['mapred::service_check::create_file'],
+#    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+#    notify    => Hdp2-hadoop::Exec-hadoop['mapred::service_check::test'],
+#    logoutput => "true",
+#    user      => $smoke_test_user
+#  }
+
+  hdp2-hadoop::exec-hadoop { 'mapred::service_check::test':
+    command     => $test_cmd,
+    refreshonly => true,
+    require     => Hdp2-hadoop::Exec-hadoop['mapred::service_check::run_wordcount'],
+    before      => Anchor['hdp2-hadoop::mapred::service_check::end'], #TODO: remove after testing
+    user        => $smoke_test_user
+  }
+  
+  anchor{ 'hdp2-hadoop::mapred::service_check::end':}
+}

Added: incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/namenode.pp
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/namenode.pp?rev=1398196&view=auto
==============================================================================
--- incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/namenode.pp (added)
+++ incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/namenode.pp Mon Oct 15 05:55:46 2012
@@ -0,0 +1,78 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-hadoop::namenode(
+  $service_state = $hdp2::params::cluster_service_state,
+  $slave_hosts = [],
+  $format = true,
+  $opts = {}
+) inherits hdp2-hadoop::params
+{
+  $hdp2::params::service_exists['hdp2-hadoop::namenode'] = true
+
+  Hdp2-hadoop::Common<||>{service_states +> $service_state}
+  Hdp2-hadoop::Package<||>{include_64_bit => true}
+  Hdp2-hadoop::Configfile<||>{sizes +> 64}
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) { 
+    $dfs_name_dir = $hdp2-hadoop::params::dfs_name_dir
+  
+    #adds package, users and directories, and common hadoop configs
+    include hdp2-hadoop::initialize
+ 
+    hdp2-hadoop::namenode::create_name_dirs { $dfs_name_dir: 
+      service_state => $service_state
+    }
+   
+    Hdp2-Hadoop::Configfile<||>{namenode_host => $hdp2::params::host_address}
+    Hdp2::Configfile<||>{namenode_host => $hdp2::params::host_address} #for components other than hadoop (e.g., hbase) 
+  
+    if ($service_state == 'running' and $format == true) {
+      class {'hdp2-hadoop::namenode::format' : }
+    }
+
+    hdp2-hadoop::service{ 'namenode':
+      ensure       => $service_state,
+      user         => $hdp2-hadoop::params::hdfs_user,
+      initial_wait => hdp_option_value($opts,'wait')
+    }
+    #top level does not need anchors
+    Class['hdp2-hadoop'] ->  Hdp2-hadoop::Service['namenode']
+    Hdp2-hadoop::Namenode::Create_name_dirs<||> -> Hdp2-hadoop::Service['namenode']
+    if ($service_state == 'running' and $format == true) {
+      Class['hdp2-hadoop'] -> Class['hdp2-hadoop::namenode::format'] -> Hdp2-hadoop::Service['namenode']
+      Hdp2-hadoop::Namenode::Create_name_dirs<||> -> Class['hdp2-hadoop::namenode::format']
+    } 
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
+
+define hdp2-hadoop::namenode::create_name_dirs($service_state)
+{
+  $dirs = hdp_array_from_comma_list($name)
+  hdp2::directory_recursive_create { $dirs :
+    owner => $hdp2-hadoop::params::hdfs_user,
+    mode => '0755',
+    service_state => $service_state,
+    force => true
+  }
+}

Added: incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/namenode/format.pp
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/namenode/format.pp?rev=1398196&view=auto
==============================================================================
--- incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/namenode/format.pp (added)
+++ incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/namenode/format.pp Mon Oct 15 05:55:46 2012
@@ -0,0 +1,56 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-hadoop::namenode::format(
+  $force = false
+)
+{
+  $mark_dir = $hdp2-hadoop::params::namenode_formatted_mark_dir
+  $dfs_name_dir = $hdp2-hadoop::params::dfs_name_dir
+  $hdfs_user = $hdp2::params::hdfs_user
+  $hadoop_conf_dir = $hdp2-hadoop::params::conf_dir
+
+  if ($force == true) {
+      hdp2-hadoop::exec-hadoop { 'namenode -format' :
+      command => 'namenode -format',
+      notify  => Hdp2::Exec['set namenode mark']
+    }
+  } else {
+      file { '/tmp/checkForFormat.sh':
+      ensure => present,
+      source => "puppet:///modules/hdp2-hadoop/checkForFormat.sh",
+      mode => '0755'
+    }
+
+    exec { '/tmp/checkForFormat.sh':
+      command   => "sh /tmp/checkForFormat.sh ${hdfs_user} ${hadoop_conf_dir} ${mark_dir} ${dfs_name_dir} ",
+      unless   => "test -d ${mark_dir}",
+      require   => File['/tmp/checkForFormat.sh'],
+      path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+      logoutput => "true",
+      notify   => Hdp2::Exec['set namenode mark']
+    }
+  }
+
+  hdp2::exec { 'set namenode mark' :
+    command     => "mkdir -p ${mark_dir}",
+    refreshonly => true
+  }
+}

Added: incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/namenode/service_check.pp
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/namenode/service_check.pp?rev=1398196&view=auto
==============================================================================
--- incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/namenode/service_check.pp (added)
+++ incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/namenode/service_check.pp Mon Oct 15 05:55:46 2012
@@ -0,0 +1,28 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-hadoop::namenode::service_check()
+{
+  hdp2-hadoop::exec-hadoop { 'namenode::service_check':
+    command   => 'dfs -ls /',
+    tries     => 3,
+    try_sleep => 5
+  }
+}

Added: incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/nodemanager.pp
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/nodemanager.pp?rev=1398196&view=auto
==============================================================================
--- incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/nodemanager.pp (added)
+++ incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/nodemanager.pp Mon Oct 15 05:55:46 2012
@@ -0,0 +1,63 @@
+class hdp2-hadoop::nodemanager(
+  $service_state = $hdp2::params::cluster_service_state,
+  $opts = {}
+) inherits hdp2-hadoop::params
+{
+  $hdp2::params::service_exists['hdp2-hadoop::nodemanager'] = true
+
+  Hdp2-hadoop::Common<||>{service_states +> $service_state}
+
+  if ($hdp2::params::use_32_bits_on_slaves == true) {
+    Hdp2-hadoop::Package<||>{include_32_bit => true}
+    Hdp2-hadoop::Configfile<||>{sizes +> 32}
+  } else {
+    Hdp2-hadoop::Package<||>{include_64_bit => true}
+    Hdp2-hadoop::Configfile<||>{sizes +> 64}
+  }
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) { 
+    $yarn_nm_local_dirs = $hdp2-hadoop::params::yarn_nm_local_dirs
+  
+    #adds package, users and directories, and common hadoop configs
+    include hdp2-hadoop::initialize
+  
+    hdp2-hadoop::nodemanager::create_local_dirs { $yarn_nm_local_dirs: 
+      service_state => $service_state
+    }
+    
+    if ($hdp2::params::service_exists['hdp2-hadoop::resourcemanager'] == true) {
+      $create_pid_dir = false
+      $create_log_dir = false
+    } else {
+      $create_pid_dir = true
+      $create_log_dir = true
+    }
+
+    hdp2-hadoop::service{ 'nodemanager':
+      ensure => $service_state,
+      user   => $hdp2-hadoop::params::yarn_user,
+      create_pid_dir => $create_pid_dir,
+      create_log_dir => $create_log_dir
+    }
+  
+    #top level does not need anchors
+    Class['hdp2-hadoop'] -> Hdp2-hadoop::Service['nodemanager']
+    Hdp2-hadoop::Nodemanager::Create_local_dirs<||> -> Hdp2-hadoop::Service['nodemanager']
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
+
+define hdp2-hadoop::nodemanager::create_local_dirs($service_state)
+{
+  if ($hdp2::params::service_exists['hdp2-hadoop::resourcemanager'] != true) {
+    $dirs = hdp_array_from_comma_list($name)
+    hdp2::directory_recursive_create { $dirs :
+      owner => $hdp2-hadoop::params::yarn_user,
+      mode => '0755',
+      service_state => $service_state,
+      force => true
+    }
+  }
+}

Added: incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/package.pp
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/package.pp?rev=1398196&view=auto
==============================================================================
--- incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/package.pp (added)
+++ incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/package.pp Mon Oct 15 05:55:46 2012
@@ -0,0 +1,44 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#singleton, but using define so can use collections to override params
+define hdp2-hadoop::package(
+  $ensure = 'present',
+  $include_32_bit = false,
+  $include_64_bit = false
+)
+{
+  #just use 32 if its specifically requested and no 64 bit requests
+  if ($include_32_bit == true) and ($include_64_bit != true) {
+    $size = 32
+  } else  {
+    $size = 64
+  }
+  $package = "hadoop ${size}"
+  $mapreduce_lzo_enabled = $hdp2::params::mapreduce_lzo_enabled
+
+  hdp2::package{ $package:
+    ensure       => $ensure,
+    package_type => 'hadoop',
+    size         => $size,
+    lzo_needed   => $mapreduce_lzo_enabled
+  }
+  anchor{ 'hdp2-hadoop::package::helper::begin': } -> Hdp2::Package[$package] -> anchor{ 'hdp2-hadoop::package::helper::end': }
+}

Added: incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/params.pp
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/params.pp?rev=1398196&view=auto
==============================================================================
--- incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/params.pp (added)
+++ incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/params.pp Mon Oct 15 05:55:46 2012
@@ -0,0 +1,178 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-hadoop::params(
+) inherits hdp2::params 
+{
+
+  ##TODO: for testing in masterless mode
+  $use_preconditions = false
+  ####  
+  $conf_dir = $hdp2::params::hadoop_conf_dir 
+
+  ####### users
+
+  $hdfs_user = $hdp2::params::hdfs_user
+  $yarn_user = $hdp2::params::yarn_user
+  $mapred_user = $hdp2::params::mapred_user
+  
+  ### hadoop-env
+  
+  $dtnode_heapsize = hdp_default("hadoop/hadoop-env/dtnode_heapsize","1024m")
+
+  $hadoop_heapsize = hdp_default("hadoop/hadoop-env/hadoop_heapsize","1024m")
+
+  $hadoop_logdirprefix = hdp_default("hadoop/hadoop-env/hadoop_logdirprefix","/var/log/hadoop")
+
+  $hadoop_piddirprefix = hdp_default("hadoop/hadoop-env/hadoop_piddirprefix","/var/run/hadoop")
+  $run_dir = $hadoop_piddirprefix
+  
+  $namenode_formatted_mark_dir = "${run_dir}/hdfs/namenode/formatted/"
+  
+  $yarn_heapsize = hdp_default("hadoop/yarn-env/yarn_heapsize","1024")
+
+  $rm_heapsize = hdp_default("hadoop/yarn-env/rm_heapsize","1024")
+
+  $rm_opt_maxnewsize = hdp_default("hadoop/yarn-env/rm_opt_maxnewsize","200m")
+
+  $rm_opt_newsize = hdp_default("hadoop/yarn-env/rm_opt_newsize","200m")
+
+  $namenode_heapsize = hdp_default("hadoop/hadoop-env/namenode_heapsize","1024m")
+
+  $namenode_opt_maxnewsize = hdp_default("hadoop/hadoop-env/namenode_opt_maxnewsize","640m")
+
+  $namenode_opt_newsize = hdp_default("hadoop/hadoop-env/namenode_opt_newsize","640m")
+  
+  ### compression related
+  if (($hdp2::params::mapreduce_lzo_enabled == true) and ($hdp2::params::mapreduce_snappy_enabled == true)) {
+    $mapred_compress_map_output = true
+    $compression_codecs =  "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.SnappyCodec"
+    $mapred_map_output_compression_codec = "org.apache.hadoop.io.compress.SnappyCodec"
+  } elsif ($hdp2::params::mapreduce_snappy_enabled == true) {
+    $mapred_compress_map_output = true
+    $compression_codecs = "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec" 
+    $mapred_map_output_compression_codec = "org.apache.hadoop.io.compress.SnappyCodec"
+  } elsif ($hdp2::params::mapreduce_lzo_enabled == true) {
+    $mapred_compress_map_output = true
+    $compression_codecs = "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec"
+    $mapred_map_output_compression_codec = "com.hadoop.compression.lzo.LzoCodec"
+  } else { 
+    $mapred_compress_map_output = false
+    $compression_codecs = "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec"
+    $mapred_map_output_compression_codec = "org.apache.hadoop.io.compress.DefaultCodec"
+  }
+
+  ### core-site
+  $enable_security_authorization = hdp_default("hadoop/core-site/enable_security_authorization","false")
+
+  $fs_checkpoint_dir = hdp_default("hadoop/core-site/fs_checkpoint_dir","/tmp/dfs/namesecondary")
+
+  $proxyuser_group = hdp_default("hadoop/core-site/proxyuser_group","users")
+
+  $security_type = hdp_default("hadoop/core-site/security_type","simple")
+  
+  ### hdfs-site
+  $datanode_du_reserved = hdp_default("hadoop/hdfs-site/datanode_du_reserved",1073741824)
+
+  $dfs_block_local_path_access_user = hdp_default("hadoop/hdfs-site/dfs_block_local_path_access_user","hbase")
+
+  $dfs_data_dir = $hdp2::params::dfs_data_dir
+
+  $dfs_datanode_address = hdp_default("hadoop/hdfs-site/dfs_datanode_address",50010)
+
+  $dfs_datanode_data_dir_perm = hdp_default("hadoop/hdfs-site/dfs_datanode_data_dir_perm",750)
+
+  $dfs_datanode_failed_volume_tolerated = hdp_default("hadoop/hdfs-site/dfs_datanode_failed_volume_tolerated",0)
+
+  $dfs_datanode_http_address = hdp_default("hadoop/hdfs-site/dfs_datanode_http_address",50075)
+
+  $dfs_exclude = hdp_default("hadoop/hdfs-site/dfs_exclude","dfs.exclude")
+
+  $dfs_include = hdp_default("hadoop/hdfs-site/dfs_include","dfs.include")
+  
+  $dfs_name_dir = hdp_default("hadoop/hdfs-site/dfs_name_dir","/tmp/hadoop-hdfs/dfs/name")
+  
+  $dfs_replication = hdp_default("hadoop/hdfs-site/dfs_replication",3)
+
+  $dfs_support_append = hdp_default("hadoop/hdfs-site/dfs_support_append",true)
+
+  $dfs_webhdfs_enabled = hdp_default("hadoop/hdfs-site/dfs_webhdfs_enabled","false")
+
+
+ ######### yarn #########
+
+   ### yarn-site
+  $yarn_nm_local_dirs = hdp_default("hadoop/yarn-site/yarn_nm_local_dirs","/tmp/hadoop-yarn/nm/local")
+
+  $yarn_nm_log_dirs = hdp_default("hadoop/yarn-site/yarn_nm_log_dirs", "/tmp/hadoop-yarn/nm/logs")
+
+  $scheduler_name = hdp_default("hadoop/yarn-site/scheduler_name","org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler")
+
+  $container_executor = hdp_default("hadoop/yarn-site/container_executor","org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor")
+
+ ######### mapred #######
+   ### mapred-site
+
+  $mapred_system_dir = '/mapred/system'
+
+  $mapreduce_io_sort_mb = hdp_default("hadoop/mapred-site/mapreduce_io_sort_mb","200")
+
+  $io_sort_spill_percent = hdp_default("hadoop/mapred-site/io_sort_spill_percent","0.9")
+
+  $mapred_map_child_java_opts_sz = hdp_default("hadoop/mapred-site/mapred_map_child_java_opts_sz","-Xmx1024m")
+  
+  $mapred_reduce_child_java_opts_sz = hdp_default("hadoop/mapred-site/mapred_reduce_child_java_opts_sz","-Xmx2048m")
+
+  $mapred_cluster_map_mem_mb = hdp_default("hadoop/mapred-site/mapred_cluster_map_mem_mb","-1")
+
+  $mapred_cluster_max_map_mem_mb = hdp_default("hadoop/mapred-site/mapred_cluster_max_map_mem_mb","-1")
+
+  $mapred_cluster_max_red_mem_mb = hdp_default("hadoop/mapred-site/mapred_cluster_max_red_mem_mb","-1")
+
+  $mapred_cluster_red_mem_mb = hdp_default("hadoop/mapred-site/mapred_cluster_red_mem_mb","-1")
+
+  $mapred_hosts_exclude = hdp_default("hadoop/mapred-site/mapred_hosts_exclude","mapred.exclude")
+
+  $mapred_hosts_include = hdp_default("hadoop/mapred-site/mapred_hosts_include","mapred.include")
+
+  $mapred_job_map_mem_mb = hdp_default("hadoop/mapred-site/mapred_job_map_mem_mb","1024")
+
+  $mapred_job_red_mem_mb = hdp_default("hadoop/mapred-site/mapred_job_red_mem_mb","2048")
+
+  $mapred_jobstatus_dir = hdp_default("hadoop/mapred-site/mapred_jobstatus_dir","file:////mapred/jobstatus")
+
+  $mapred_local_dir = hdp_default("hadoop/mapred-site/mapred_local_dir","/tmp/hadoop-mapred/mapred/local")
+   
+  $mapred_map_tasks_max = hdp_default("hadoop/mapred-site/mapred_map_tasks_max",4)
+
+  $mapred_red_tasks_max = hdp_default("hadoop/mapred-site/mapred_red_tasks_max",4)
+
+  $mapreduce_userlog_retainhours = hdp_default("hadoop/mapred-site/mapreduce_userlog_retainhours",24)
+
+  $maxtasks_per_job = hdp_default("hadoop/mapred-site/maxtasks_per_job","-1")
+
+  $task_controller = hdp_default("hadoop/mapred-site/task_controller","org.apache.hadoop.mapred.DefaultTaskController")
+
+  #### health_check
+
+  $security_enabled = hdp_default("hadoop/health_check/security_enabled","false")
+
+  $task_bin_exe = hdp_default("hadoop/health_check/task_bin_exe")
+}

Added: incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/resourcemanager.pp
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/resourcemanager.pp?rev=1398196&view=auto
==============================================================================
--- incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/resourcemanager.pp (added)
+++ incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/resourcemanager.pp Mon Oct 15 05:55:46 2012
@@ -0,0 +1,100 @@
+class hdp2-hadoop::resourcemanager(
+  $service_state = $hdp2::params::cluster_service_state,
+  $opts = {}
+) inherits hdp2-hadoop::params
+{
+  $hdp2::params::service_exists['hdp2-hadoop::resourcemanager'] = true
+  Hdp2-hadoop::Common<||>{service_states +> $service_state}
+  Hdp2-hadoop::Package<||>{include_64_bit => true}
+  Hdp2-hadoop::Configfile<||>{sizes +> 64}
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) {
+    $yarn_user = $hdp2-hadoop::params::yarn_user
+    $mapred_user = $hdp2-hadoop::params::mapred_user
+    $yarn_nm_local_dirs = $hdp2-hadoop::params::yarn_nm_local_dirs 
+  
+    #adds package, users and directories, and common hadoop configs
+    include hdp2-hadoop::initialize
+     
+    hdp2-hadoop::resourcemanager::create_local_dirs { $yarn_nm_local_dirs: 
+      service_state => $service_state
+    }
+
+    #TODO: cleanup 
+    Hdp2-Hadoop::Configfile<||>{yarn_rm_host => $hdp2::params::host_address}
+
+    class { 'hdp2-hadoop::resourcemanager::hdfs-directory' : 
+      service_state => $service_state 
+    }
+
+    #TODO: do we keep precondition here?
+    if ($service_state == 'running' and $hdp2-hadoop::params::use_preconditions == true) {
+      class { 'hdp2-hadoop::hdfs::service_check':
+        before => Hdp2-hadoop::Service['resourcemanager'],
+        require => Class['hdp2-hadoop']
+      }
+    }
+
+    hdp2-hadoop::service{ 'resourcemanager':
+      ensure       => $service_state,
+      user         => $yarn_user
+    }
+  
+    hdp2-hadoop::service{ 'historyserver':
+      ensure         => $service_state,
+      user           => $mapred_user,
+      create_pid_dir => true,
+      create_log_dir => true 
+    }
+
+    #top level does not need anchors
+    Class['hdp2-hadoop'] -> Hdp2-hadoop::Service['resourcemanager'] -> Hdp2-hadoop::Service['historyserver']
+    Class['hdp2-hadoop::resourcemanager::hdfs-directory'] -> Hdp2-hadoop::Service['resourcemanager']
+    Hdp2-hadoop::Resourcemanager::Create_local_dirs<||> -> Hdp2-hadoop::Service['resourcemanager']
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
+
+define hdp2-hadoop::resourcemanager::create_local_dirs($service_state)
+{
+    $dirs = hdp_array_from_comma_list($name)
+    hdp2::directory_recursive_create { $dirs :
+      owner => $hdp2-hadoop::params::yarn_user,
+      mode => '0755',
+      service_state => $service_state,
+      force => true
+    }
+}
+
+class hdp2-hadoop::resourcemanager::hdfs-directory($service_state)
+{
+  hdp2-hadoop::hdfs::directory{ '/app-logs' :
+    service_state => $service_state,
+    owner         => $hdp2-hadoop::params::yarn_user,
+    mode          => 1777
+  }  
+  hdp2-hadoop::hdfs::directory{ '/mapred' :
+    service_state => $service_state,
+    owner         => $hdp2-hadoop::params::mapred_user,
+    mode          => 755
+  }  
+  hdp2-hadoop::hdfs::directory{ '/mapred/history' :
+    service_state => $service_state,
+    owner         => $hdp2-hadoop::params::mapred_user,
+    mode          => 755
+  }  
+  hdp2-hadoop::hdfs::directory{ '/mapred/history/done_intermediate' :
+    service_state => $service_state,
+    owner         => $hdp2-hadoop::params::mapred_user,
+    mode          => 1777
+  }  
+  hdp2-hadoop::hdfs::directory{ '/mapred/history/done' :
+    service_state => $service_state,
+    owner         => $hdp2-hadoop::params::mapred_user,
+    mode          => 770
+  }  
+  Hdp2-hadoop::Hdfs::Directory['/app-logs'] -> Hdp2-hadoop::Hdfs::Directory['/mapred'] -> Hdp2-hadoop::Hdfs::Directory['/mapred/history'] -> Hdp2-hadoop::Hdfs::Directory['/mapred/history/done'] -> Hdp2-hadoop::Hdfs::Directory['/mapred/history/done_intermediate']
+}
+

Added: incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/resourcemanager/service_check.pp
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/resourcemanager/service_check.pp?rev=1398196&view=auto
==============================================================================
--- incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/resourcemanager/service_check.pp (added)
+++ incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/resourcemanager/service_check.pp Mon Oct 15 05:55:46 2012
@@ -0,0 +1,8 @@
+class hdp2-hadoop::resourcemanager::service_check()
+{
+  hdp2-hadoop::exec-hadoop { 'resourcemanager::service_check':
+    command   => 'job -list',
+    tries     => 3,
+    try_sleep => 5
+  }
+}

Added: incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/service.pp
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/service.pp?rev=1398196&view=auto
==============================================================================
--- incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/service.pp (added)
+++ incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/service.pp Mon Oct 15 05:55:46 2012
@@ -0,0 +1,107 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+define hdp2-hadoop::service(
+  $ensure = 'running',
+  $user,
+  $initial_wait = undef,
+  $create_pid_dir = true,
+  $create_log_dir = true
+)
+{
+
+  #NOTE does not work if namenode and datanode are on same host 
+  $pid_dir = "${hdp2-hadoop::params::hadoop_piddirprefix}/${user}"
+  $log_dir = "${hdp2-hadoop::params::hadoop_logdirprefix}/${user}"
+  if (($name == 'resourcemanager') or ($name == 'nodemanager')) {
+    $hadoop_daemon = "${hdp2::params::yarn_sbin}/yarn-daemon.sh"
+    $cmd = "HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec ${hadoop_daemon} --config ${hdp2-hadoop::params::conf_dir}"
+    $pid_file = "${pid_dir}/yarn-${user}-${name}.pid"
+  } elsif ($name == 'historyserver') {
+    $hadoop_daemon = "${hdp2::params::mapred_sbin}/mr-jobhistory-daemon.sh"
+    # Temporary fix to not pass --config till Hadoop fixes it upstream.
+    $cmd = "HADOOP_LIBEXEC_DIR=/usr/lib/hadoop/libexec HADOOP_CONF_DIR=${hdp2-hadoop::params::conf_dir} ${hadoop_daemon}"
+    $pid_file = "${pid_dir}/mapred-${user}-${name}.pid"
+  } else {
+    $hadoop_daemon = "${hdp2::params::hadoop_sbin}/hadoop-daemon.sh"
+    $cmd = "${hadoop_daemon} --config ${hdp2-hadoop::params::conf_dir}"
+    $pid_file = "${pid_dir}/hadoop-${user}-${name}.pid"
+  }
+   
+  if ($ensure == 'running') {
+    $daemon_cmd = "su - ${user} -c  '${cmd} start ${name}'"
+    $service_is_up = "ls ${pid_file} >/dev/null 2>&1 && ps `cat ${pid_file}` >/dev/null 2>&1"
+  } elsif ($ensure == 'stopped') {
+    $daemon_cmd = "su - ${user} -c  '${cmd} stop ${name}'"
+    $service_is_up = undef
+  } else {
+    $daemon_cmd = undef
+  }
+ 
+  if ($create_pid_dir == true) {
+    hdp2::directory_recursive_create { $pid_dir: 
+      owner       => $user,
+      context_tag => 'hadoop_service',
+      service_state => $service_state,
+      force => true
+    }
+  }
+  
+  if ($create_log_dir == true) {
+    hdp2::directory_recursive_create { $log_dir: 
+      owner       => $user,
+      context_tag => 'hadoop_service',
+      service_state => $service_state,
+      force => true
+    }
+  }
+  if ($daemon_cmd != undef) {  
+    hdp2::exec { $daemon_cmd:
+      command      => $daemon_cmd,
+      unless       => $service_is_up,
+      initial_wait => $initial_wait
+    }
+  }
+
+  anchor{"hdp2-hadoop::service::${name}::begin":}
+  anchor{"hdp2-hadoop::service::${name}::end":}
+  if ($daemon_cmd != undef) {
+    Anchor["hdp2-hadoop::service::${name}::begin"] -> Hdp2::Exec[$daemon_cmd] -> Anchor["hdp2-hadoop::service::${name}::end"]
+
+    if ($create_pid_dir == true) {
+      Anchor["hdp2-hadoop::service::${name}::begin"] -> Hdp2::Directory_recursive_create[$pid_dir] -> Hdp2::Exec[$daemon_cmd] 
+    }
+     if ($create_log_dir == true) {
+      Anchor["hdp2-hadoop::service::${name}::begin"] -> Hdp2::Directory_recursive_create[$log_dir] -> Hdp2::Exec[$daemon_cmd] 
+    }
+  }
+  if ($ensure == 'running') {
+    #TODO: look at Puppet resource retry and retry_sleep
+    #TODO: can make sleep contingent on $name
+    $sleep = 5
+    $post_check = "sleep ${sleep}; ${service_is_up}"
+    hdp2::exec { $post_check:
+      command => $post_check,
+      unless  => $service_is_up
+    }
+    Hdp2::Exec[$daemon_cmd] -> Hdp2::Exec[$post_check] -> Anchor["hdp2-hadoop::service::${name}::end"]
+  }  
+}
+

Added: incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/slave/master-conn.pp
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/slave/master-conn.pp?rev=1398196&view=auto
==============================================================================
--- incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/slave/master-conn.pp (added)
+++ incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/slave/master-conn.pp Mon Oct 15 05:55:46 2012
@@ -0,0 +1,27 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-hadoop::slave::master-conn($master_host)
+{
+  Hdp2-Hadoop::Configfile<||>{
+    namenode_host => $master_host,
+    yarn_rm_host   => $master_host
+  }
+}

Added: incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/slave/namenode-conn.pp
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/slave/namenode-conn.pp?rev=1398196&view=auto
==============================================================================
--- incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/slave/namenode-conn.pp (added)
+++ incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/slave/namenode-conn.pp Mon Oct 15 05:55:46 2012
@@ -0,0 +1,27 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#TODO: this might be replaced by just using hdp2::namenode-conn
+class hdp2-hadoop::slave::namenode-conn($namenode_host)
+{
+  #TODO: check if can get rido of both
+  Hdp2-Hadoop::Configfile<||>{namenode_host => $namenode_host}
+  Hdp2::Configfile<||>{namenode_host => $namenode_host} #for components other than hadoop (e.g., hbase) 
+}

Added: incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/slave/resourcemanager-conn.pp
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/slave/resourcemanager-conn.pp?rev=1398196&view=auto
==============================================================================
--- incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/slave/resourcemanager-conn.pp (added)
+++ incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/slave/resourcemanager-conn.pp Mon Oct 15 05:55:46 2012
@@ -0,0 +1,4 @@
+class hdp2-hadoop::slave::resourcemanager-conn($resourcemanager_host)
+{
+  Hdp2-Hadoop::Configfile<||>{yarn_rm_host => $resourcemanager_host}
+}

Added: incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/smoketest.pp
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/smoketest.pp?rev=1398196&view=auto
==============================================================================
--- incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/smoketest.pp (added)
+++ incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/smoketest.pp Mon Oct 15 05:55:46 2012
@@ -0,0 +1,48 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-hadoop::smoketest(
+  $opts={}
+)
+{
+  $mapred_examples_jar = $hdp2::params::mapred_examples_jar
+
+  #TODO: put in wait
+  #TODO: look for better way to compute outname
+  $date_format = '"%M%d%y"'
+  $outname = inline_template("<%=  `date +${date_format}`.chomp %>")
+
+  #TODO: hardwired to run on namenode and to use user hdfs
+
+  $put = "dfs -put /etc/passwd passwd-${outname}"
+  $exec = "jar ${mapred_examples_jar} wordcount passwd-${outname} ${outname}.out"
+  $result = "fs -test -e ${outname}.out /dev/null 2>&1"
+  anchor{ "hdp2-hadoop::smoketest::begin" :} ->
+  hdp2-hadoop::exec-hadoop{ $put:
+    command => $put
+  } ->
+  hdp2-hadoop::exec-hadoop{ $exec:
+    command =>  $exec
+  } ->
+  hdp2-hadoop::exec-hadoop{ $result:
+    command =>  $result
+  } ->
+  anchor{ "hdp2-hadoop::smoketest::end" :}
+}

Added: incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/snamenode.pp
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/snamenode.pp?rev=1398196&view=auto
==============================================================================
--- incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/snamenode.pp (added)
+++ incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/manifests/snamenode.pp Mon Oct 15 05:55:46 2012
@@ -0,0 +1,77 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+class hdp2-hadoop::snamenode(
+  $service_state = $hdp2::params::cluster_service_state,
+  $opts = {}
+) inherits hdp2-hadoop::params  
+{
+  $hdp2::params::service_exists['hdp2-hadoop::snamenode'] = true
+
+  Hdp2-hadoop::Common<||>{service_states +> $service_state}
+  Hdp2-hadoop::Package<||>{include_64_bit => true}
+  Hdp2-hadoop::Configfile<||>{sizes +> 64}
+
+  if ($service_state == 'no_op') {
+  } elsif ($service_state in ['running','stopped','installed_and_configured','uninstalled']) {
+    $fs_checkpoint_dir = $hdp2-hadoop::params::fs_checkpoint_dir
+  
+    #adds package, users and directories, and common hadoop configs
+    include hdp2-hadoop::initialize
+ 
+    Hdp2-Hadoop::Configfile<||>{snamenode_host => $hdp2::params::host_address}
+  
+    hdp2-hadoop::snamenode::create_name_dirs { $fs_checkpoint_dir: 
+      service_state => $service_state
+    }
+    
+    if ($hdp2::params::service_exists['hdp2-hadoop::namenode'] == true) {
+      $create_pid_dir = false
+      $create_log_dir = false
+    } else {
+      $create_pid_dir = true
+      $create_log_dir = true
+    }
+    
+    hdp2-hadoop::service{ 'secondarynamenode':
+      ensure         => $service_state,
+      user           => $hdp2-hadoop::params::hdfs_user,
+      create_pid_dir => $create_pid_dir,
+      create_log_dir => $create_log_dir
+    }
+  
+    #top level does not need anchors
+    Class['hdp2-hadoop'] -> Hdp2-hadoop::Service['secondarynamenode']
+    Hdp2-hadoop::Namenode::Create_name_dirs<||> -> Hdp2-hadoop::Service['secondarynamenode']
+  } else {
+    hdp_fail("TODO not implemented yet: service_state = ${service_state}")
+  }
+}
+
+define hdp2-hadoop::snamenode::create_name_dirs($service_state)
+{
+   $dirs = hdp_array_from_comma_list($name)
+   hdp2::directory_recursive_create { $dirs :
+     owner => $hdp2-hadoop::params::hdfs_user,
+     mode => '0755',
+     service_state => $service_state,
+     force => true
+  }
+}

Added: incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/templates/capacity-scheduler.xml.erb
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/templates/capacity-scheduler.xml.erb?rev=1398196&view=auto
==============================================================================
--- incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/templates/capacity-scheduler.xml.erb (added)
+++ incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/templates/capacity-scheduler.xml.erb Mon Oct 15 05:55:46 2012
@@ -0,0 +1,112 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<configuration>
+
+  <property>
+    <name>yarn.scheduler.capacity.maximum-applications</name>
+    <value>10000</value>
+    <description>
+      Maximum number of applications that can be pending and running.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
+    <value>0.1</value>
+    <description>
+      Maximum percent of resources in the cluster which can be used to run 
+      application masters i.e. controls number of concurrent running
+      applications.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.queues</name>
+    <value>default</value>
+    <description>
+      The queues at the this level (root is the root queue).
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.capacity</name>
+    <value>100</value>
+    <description>
+      The total capacity as a percentage out of 100 for this queue.
+      If it has child queues then this includes their capacity as well.
+      The child queues capacity should add up to their parent queue's capacity
+      or less.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.capacity</name>
+    <value>100</value>
+    <description>Default queue target capacity.</description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
+    <value>1</value>
+    <description>
+      Default queue user limit a percentage from 0.0 to 1.0.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
+    <value>100</value>
+    <description>
+      The maximum capacity of the default queue. 
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.state</name>
+    <value>RUNNING</value>
+    <description>
+      The state of the default queue. State can be one of RUNNING or STOPPED.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.acl_submit_jobs</name>
+    <value>*</value>
+    <description>
+      The ACL of who can submit jobs to the default queue.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.acl_administer_jobs</name>
+    <value>*</value>
+    <description>
+      The ACL of who can administer jobs on the default queue.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.acl_administer_queues</name>
+    <value>*</value>
+    <description>
+      The ACL for who can administer this queue i.e. change sub-queue 
+      allocations.
+    </description>
+  </property>
+
+</configuration>

Added: incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/templates/commons-logging.properties.erb
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/templates/commons-logging.properties.erb?rev=1398196&view=auto
==============================================================================
--- incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/templates/commons-logging.properties.erb (added)
+++ incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/templates/commons-logging.properties.erb Mon Oct 15 05:55:46 2012
@@ -0,0 +1,25 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+#Logging Implementation
+
+#Log4J
+org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger
+
+#JDK Logger
+#org.apache.commons.logging.Log=org.apache.commons.logging.impl.Jdk14Logger

Added: incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/templates/container-executor.cfg.erb
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/templates/container-executor.cfg.erb?rev=1398196&view=auto
==============================================================================
--- incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/templates/container-executor.cfg.erb (added)
+++ incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/templates/container-executor.cfg.erb Mon Oct 15 05:55:46 2012
@@ -0,0 +1,4 @@
+yarn.nodemanager.local-dirs=<%=scope.function_hdp_template_var("yarn_nm_local_dirs")%>
+yarn.nodemanager.linux-container-executor.group=hadoop
+yarn.nodemanager.log-dirs=<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/<%=scope.function_hdp_template_var("yarn_user")%>
+banned.users=hfds,bin,0

Added: incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/templates/core-site.xml.erb
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/templates/core-site.xml.erb?rev=1398196&view=auto
==============================================================================
--- incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/templates/core-site.xml.erb (added)
+++ incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/templates/core-site.xml.erb Mon Oct 15 05:55:46 2012
@@ -0,0 +1,247 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+ <!--
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+ 
+        http://www.apache.org/licenses/LICENSE-2.0
+ 
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+ -->
+ 
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
+
+<!-- i/o properties -->
+
+  <property>
+    <name>io.file.buffer.size</name>
+    <value>131072</value>
+    <description>The size of buffer for use in sequence files.
+  The size of this buffer should probably be a multiple of hardware
+  page size (4096 on Intel x86), and it determines how much data is
+  buffered during read and write operations.</description>
+  </property>
+
+  <property>
+    <name>io.serializations</name>
+    <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
+  </property>
+
+  <property>
+    <name>io.compression.codecs</name>
+    <value><%=scope.function_hdp_template_var("compression_codecs")%></value>
+    <description>A list of the compression codec classes that can be used
+                 for compression/decompression.</description>
+  </property>
+
+  <property>
+    <name>io.compression.codec.lzo.class</name>
+    <value>com.hadoop.compression.lzo.LzoCodec</value>
+    <description>The implementation for lzo codec.</description>
+  </property>
+
+<!-- file system properties -->
+
+  <property>
+    <name>fs.default.name</name>
+    <!-- cluster variant -->
+    <value>hdfs://<%=scope.function_hdp_host("namenode_host")%>:8020</value>
+    <description>The name of the default file system.  Either the
+  literal string "local" or a host:port for NDFS.</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>fs.trash.interval</name>
+    <value>360</value>
+    <description>Number of minutes between trash checkpoints.
+  If zero, the trash feature is disabled.
+  </description>
+  </property>
+
+  <property>
+    <name>fs.checkpoint.dir</name>
+    <value><%=scope.function_hdp_template_var("fs_checkpoint_dir")%></value>
+    <description>Determines where on the local filesystem the DFS secondary
+        name node should store the temporary images to merge.
+        If this is a comma-delimited list of directories then the image is
+        replicated in all of the directories for redundancy.
+    </description>
+  </property>
+
+  <property>
+    <name>fs.checkpoint.edits.dir</name>
+    <value>${fs.checkpoint.dir}</value>
+    <description>Determines where on the local filesystem the DFS secondary
+        name node should store the temporary edits to merge.
+        If this is a comma-delimited list of directoires then teh edits is
+        replicated in all of the directoires for redundancy.
+        Default value is same as fs.checkpoint.dir
+    </description>
+  </property>
+
+  <property>
+    <name>fs.checkpoint.size</name>
+    <value>536870912</value>
+    <description>The size of the current edit log (in bytes) that triggers
+       a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
+  </description>
+  </property>
+
+  <!-- ipc properties: copied from kryptonite configuration -->
+  <property>
+    <name>ipc.client.idlethreshold</name>
+    <value>8000</value>
+    <description>Defines the threshold number of connections after which
+               connections will be inspected for idleness.
+  </description>
+  </property>
+
+  <property>
+    <name>ipc.client.connection.maxidletime</name>
+    <value>30000</value>
+    <description>The maximum time after which a client will bring down the
+               connection to the server.
+  </description>
+  </property>
+
+  <property>
+    <name>ipc.client.connect.max.retries</name>
+    <value>50</value>
+    <description>Defines the maximum number of retries for IPC connections.</description>
+  </property>
+
+  <!-- Web Interface Configuration -->
+  <property>
+    <name>webinterface.private.actions</name>
+    <value>false</value>
+    <description> If set to true, the web interfaces of JT and NN may contain
+                actions, such as kill job, delete file, etc., that should
+                not be exposed to public. Enable this option if the interfaces
+                are only reachable by those who have the right authorization.
+  </description>
+  </property>
+
+ <property>
+   <name>hadoop.security.authentication</name>
+   <value><%=scope.function_hdp_template_var("security_type")%></value>
+   <description>
+   Set the authentication for the cluster. Valid values are: simple or
+   kerberos.
+   </description>
+ </property>
+<property>
+  <name>hadoop.security.authorization</name>
+  <value><%=scope.function_hdp_template_var("enable_security_authorization")%></value>
+  <description>
+     Enable authorization for different protocols.
+  </description>
+</property>
+
+  <property>
+    <name>hadoop.security.auth_to_local</name>
+    <value>
+        RULE:[2:$1@$0]([rn]m@.*<%=scope.function_hdp_template_var("kerberos_domain")%>)s/.*/<%=scope.function_hdp_template_var("yarn_user")%>/
+        RULE:[2:$1@$0]([nd]n@.*<%=scope.function_hdp_template_var("kerberos_domain")%>)s/.*/<%=scope.function_hdp_template_var("hdfs_user")%>/
+        RULE:[2:$1@$0](hm@.*<%=scope.function_hdp_template_var("kerberos_domain")%>)s/.*/<%=scope.function_hdp_template_var("hbase_user")%>/
+        RULE:[2:$1@$0](rs@.*<%=scope.function_hdp_template_var("kerberos_domain")%>)s/.*/<%=scope.function_hdp_template_var("hbase_user")%>/
+        DEFAULT</value>
+<description>The mapping from kerberos principal names to local OS user names.
+  So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
+  "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
+The translations rules have 3 sections:
+      base     filter    substitution
+The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
+
+[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
+[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
+[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
+
+The filter is a regex in parens that must the generated string for the rule to apply.
+
+"(.*%admin)" will take any string that ends in "%admin"
+"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
+
+Finally, the substitution is a sed rule to translate a regex into a fixed string.
+
+"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
+"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
+"s/X/Y/g" replaces all of the "X" in the name with "Y"
+
+So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
+
+RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+DEFAULT
+
+To also translate the names with a second component, you'd make the rules:
+
+RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+RULE:[2:$1@$0](.@ACME.ORG)s/@.//
+DEFAULT
+
+If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
+
+RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
+DEFAULT
+    </description>
+  </property>
+
+<property>
+  <name>hadoop.proxyuser.<%=scope.function_hdp_user("hcat_user")%>.groups</name>
+  <value><%=scope.function_hdp_template_var("proxyuser_group")%></value>
+  <description>
+     Proxy group for Hadoop.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.proxyuser.<%=scope.function_hdp_user("hcat_user")%>.hosts</name>
+  <value><%=scope.function_hdp_host("gateway_host")%></value>
+  <description>
+     Proxy host for Hadoop.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.proxyuser.<%=scope.function_hdp_user("oozie_user")%>.groups</name>
+  <value><%=scope.function_hdp_template_var("proxyuser_group")%></value>
+  <description>
+     Proxy group for Hadoop.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.proxyuser.<%=scope.function_hdp_user("oozie_user")%>.hosts</name>
+  <value><%=scope.function_hdp_host("oozie_server")%></value>
+  <description>
+     Proxy host for Hadoop.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.proxyuser.<%=scope.function_hdp_user("templeton_user")%>.groups</name>
+  <value><%=scope.function_hdp_template_var("proxyuser_group")%></value>
+  <description>
+    Proxy group for templeton.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.proxyuser.<%=scope.function_hdp_user("templeton_user")%>.hosts</name>
+  <value><%=scope.function_hdp_host("templeton_server_host")%></value>
+  <description>
+    Proxy host for templeton.
+  </description>
+</property>
+</configuration>

Added: incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/templates/hadoop-env.sh.erb
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/templates/hadoop-env.sh.erb?rev=1398196&view=auto
==============================================================================
--- incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/templates/hadoop-env.sh.erb (added)
+++ incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/templates/hadoop-env.sh.erb Mon Oct 15 05:55:46 2012
@@ -0,0 +1,95 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.  Required.
+export JAVA_HOME=<%=scope.function_hdp_java_home()%>
+
+#TODO: we shouldn't need to define HADOOP_HOME and HADOOP_LIBEXEC_DIR!
+export HADOOP_HOME_WARN_SUPPRESS=1
+export HADOOP_HOME=${HADOOP_HOME:-<%=scope.function_hdp_template_var("::hdp2::params::hadoop_home")%>}
+export HADOOP_LIBEXEC_DIR=${HADOOP_HOME}/lib/hadoop/libexec
+
+# Hadoop Configuration Directory
+#TODO: if env var set that can cause problems
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-<%=scope.function_hdp_template_var("conf_dir")%>}
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+#export HADOOP_HEAPSIZE=
+
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms<%=scope.function_hdp_template_var("namenode_heapsize")%>"
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile=<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$USER/hs_err_pid%p.log -XX:NewSize=<%=scope.function_hdp_template_var("namenode_opt_newsize")%> -XX:MaxNewSize=<%=scope.function_hdp_template_var("namenode_opt_maxnewsize")%> -Xloggc:<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms<%=scope.function_hdp_template_var("namenode_heapsize")%> -Xmx<%=scope.function_hdp_template_var("namenode_heapsize")%> -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+
+HADOOP_DATANODE_OPTS="-Xmx<%=scope.function_hdp_template_var("dtnode_heapsize")%> -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx<%=scope.function_hdp_template_var("hadoop_heapsize")%> ${HADOOP_BALANCER_OPTS}"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile=<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$USER/hs_err_pid%p.log -XX:NewSize=<%=scope.function_hdp_template_var("namenode_opt_newsize")%> -XX:MaxNewSize=<%=scope.function_hdp_template_var("namenode_opt_maxnewsize")%> -Xloggc:<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx<%=scope.function_hdp_template_var("namenode_heapsize")%> -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx128m ${HADOOP_CLIENT_OPTS}"
+#HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData ${HADOOP_JAVA_PLATFORM_OPTS}"
+
+# On secure datanodes, user to run the datanode as after dropping privileges
+export HADOOP_SECURE_DN_USER=<%=scope.function_hdp_template_var("hdfs_user")%>
+
+# Extra ssh options.  Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR=<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$USER
+# TODO: temporary fix for mapred_log_dir. Scripts should be fixed
+export HADOOP_MAPRED_LOG_DIR=<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$USER
+
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR=<%=scope.function_hdp_template_var("hadoop_logdirprefix")%>/$HADOOP_SECURE_DN_USER
+
+# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from.  Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR=<%=scope.function_hdp_template_var("hadoop_piddirprefix")%>/$USER
+# TODO: temporary fix for mapred_pid_dir. Scripts should be fixed
+export HADOOP_MAPRED_PID_DIR=<%=scope.function_hdp_template_var("hadoop_piddirprefix")%>/$USER
+export HADOOP_SECURE_DN_PID_DIR=<%=scope.function_hdp_template_var("hadoop_piddirprefix")%>/$HADOOP_SECURE_DN_USER
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes.  See 'man nice'.
+
+# export HADOOP_NICENESS=10

Added: incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/templates/hadoop-metrics2.properties-GANGLIA.erb
URL: http://svn.apache.org/viewvc/incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/templates/hadoop-metrics2.properties-GANGLIA.erb?rev=1398196&view=auto
==============================================================================
--- incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/templates/hadoop-metrics2.properties-GANGLIA.erb (added)
+++ incubator/ambari/branches/branch-0.9-h2-dev/hmc/puppet/modules/hdp2-hadoop/templates/hadoop-metrics2.properties-GANGLIA.erb Mon Oct 15 05:55:46 2012
@@ -0,0 +1,37 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# syntax: [prefix].[source|sink|jmx].[instance].[options]
+# See package.html for org.apache.hadoop.metrics2 for details
+
+*.period=60
+
+*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+*.sink.ganglia.period=10
+
+# default for supportsparse is false
+*.sink.ganglia.supportsparse=true
+
+.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+# Hook up to the server
+namenode.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8661
+datanode.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
+resourcemanager.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8662
+tasktracker.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
+maptask.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
+reducetask.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
+