You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@bigtop.apache.org by rv...@apache.org on 2013/02/10 07:04:01 UTC
[9/50] [abbrv] git commit: BIGTOP-797. provide a way to 'rsync' files
into HDFS during puppet deployment
BIGTOP-797. provide a way to 'rsync' files into HDFS during puppet deployment
Project: http://git-wip-us.apache.org/repos/asf/bigtop/repo
Commit: http://git-wip-us.apache.org/repos/asf/bigtop/commit/aa7ba339
Tree: http://git-wip-us.apache.org/repos/asf/bigtop/tree/aa7ba339
Diff: http://git-wip-us.apache.org/repos/asf/bigtop/diff/aa7ba339
Branch: refs/heads/RCs
Commit: aa7ba339bbfb5b9b58f5a3322613b0a14bec13fc
Parents: 9478d0e
Author: Roman Shaposhnik <rv...@cloudera.com>
Authored: Wed Nov 28 17:26:56 2012 -0800
Committer: Roman Shaposhnik <rv...@cloudera.com>
Committed: Fri Nov 30 16:28:10 2012 -0800
----------------------------------------------------------------------
bigtop-deploy/puppet/manifests/cluster.pp | 38 +++++++++++---
.../puppet/modules/hadoop-sqoop/manifests/init.pp | 8 ---
.../puppet/modules/hadoop/manifests/init.pp | 19 +++++++-
3 files changed, 47 insertions(+), 18 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/bigtop/blob/aa7ba339/bigtop-deploy/puppet/manifests/cluster.pp
----------------------------------------------------------------------
diff --git a/bigtop-deploy/puppet/manifests/cluster.pp b/bigtop-deploy/puppet/manifests/cluster.pp
index a948f5e..b4cb9f1 100644
--- a/bigtop-deploy/puppet/manifests/cluster.pp
+++ b/bigtop-deploy/puppet/manifests/cluster.pp
@@ -205,6 +205,7 @@ class hadoop_head_node inherits hadoop_cluster_node {
default_fs => $hadoop_namenode_uri,
kerberos_realm => $kerberos_realm,
}
+ Hadoop::Httpfs<||> -> Hue::Server<||>
hadoop-zookeeper::server { "zookeeper":
myid => "0",
@@ -212,20 +213,41 @@ class hadoop_head_node inherits hadoop_cluster_node {
kerberos_realm => $kerberos_realm,
}
- hadoop::create_hdfs_dirs { [ "/mapred", "/tmp", "/system", "/user", "/hbase", "/benchmarks", "/user/jenkins", "/user/hive", "/user/root", "/user/history", "/user/hue" ]:
+ hadoop::create_hdfs_dirs { [ "/tmp", "/var", "/var/log", "/hbase", "/benchmarks", "/user", "/user/history", "/user/jenkins", "/user/hive", "/user/root", "/user/hue", "/user/oozie" ]:
auth => $hadoop_security_authentication,
- hdfs_dirs_meta => { "/tmp" => { perm => "777", user => "hdfs" },
- "/mapred" => { perm => "755", user => "mapred" },
- "/system" => { perm => "755", user => "hdfs" },
- "/user" => { perm => "755", user => "hdfs" },
+ hdfs_dirs_meta => { "/tmp" => { perm =>"1777", user => "hdfs" },
+ "/var" => { perm => "755", user => "hdfs" },
+ "/var/log" => { perm =>"1775", user => "yarn:mapred" },
"/hbase" => { perm => "755", user => "hbase" },
"/benchmarks" => { perm => "777", user => "hdfs" },
+ "/user" => { perm => "755", user => "hdfs" },
+ "/user/history" => { perm => "775", user => "mapred" },
"/user/jenkins" => { perm => "777", user => "jenkins"},
- "/user/history" => { perm => "777", user => "mapred" },
- "/user/root" => { perm => "777", user => "root" },
"/user/hive" => { perm => "777", user => "hive" },
- "/user/hue" => { perm => "777", user => "hue" }},
+ "/user/root" => { perm => "777", user => "root" },
+ "/user/hue" => { perm => "777", user => "hue" },
+ "/user/oozie" => { perm => "777", user => "oozie" },
+ },
+ }
+ Hadoop::Create_hdfs_dirs<||> -> Hadoop-hbase::Master<||>
+ Hadoop::Create_hdfs_dirs<||> -> Hadoop::Resourcemanager<||>
+ Hadoop::Create_hdfs_dirs<||> -> Hadoop::Historyserver<||>
+ Hadoop::Create_hdfs_dirs<||> -> Hadoop::Httpfs<||>
+ Hadoop::Create_hdfs_dirs<||> -> Hadoop::Rsync_hdfs<||>
+
+ hadoop::rsync_hdfs { [ "/user/oozie/share/lib/hive",
+ "/user/oozie/share/lib/mapreduce-streaming",
+ "/user/oozie/share/lib/distcp",
+ "/user/oozie/share/lib/pig",
+ "/user/oozie/share/lib/sqoop" ]:
+ auth => $hadoop_security_authentication,
+ files => { "/user/oozie/share/lib/hive" => "/usr/lib/hive/lib/*.jar",
+ "/user/oozie/share/lib/mapreduce-streaming" => "/usr/lib/hadoop-mapreduce/hadoop-streaming*.jar",
+ "/user/oozie/share/lib/distcp" => "/usr/lib/hadoop-mapreduce/hadoop-distcp*.jar",
+ "/user/oozie/share/lib/pig" => "/usr/lib/pig/{lib/,}*.jar",
+ "/user/oozie/share/lib/sqoop" => "/usr/lib/sqoop/{lib/,}*.jar" },
}
+ Hadoop::Rsync_hdfs<||> -> Hadoop-oozie::Server<||>
solr::server { "solrcloud server":
collections => $solrcloud_collections,
http://git-wip-us.apache.org/repos/asf/bigtop/blob/aa7ba339/bigtop-deploy/puppet/modules/hadoop-sqoop/manifests/init.pp
----------------------------------------------------------------------
diff --git a/bigtop-deploy/puppet/modules/hadoop-sqoop/manifests/init.pp b/bigtop-deploy/puppet/modules/hadoop-sqoop/manifests/init.pp
index 35edac7..be69572 100644
--- a/bigtop-deploy/puppet/modules/hadoop-sqoop/manifests/init.pp
+++ b/bigtop-deploy/puppet/modules/hadoop-sqoop/manifests/init.pp
@@ -18,14 +18,6 @@ class hadoop-sqoop {
package { "sqoop":
ensure => latest,
}
-
- # FIXME: this is NOT supposed to be needed
- # but for now that's the easiest way to add
- # extra jars to sqoop classpath
- file { "/usr/lib/sqoop/lib/mysql-connector-java-5.1.12.jar":
- ensure => "/usr/share/java/mysql-connector-java-5.1.12.jar",
- require => Package["sqoop"],
- }
}
define metastore {
http://git-wip-us.apache.org/repos/asf/bigtop/blob/aa7ba339/bigtop-deploy/puppet/modules/hadoop/manifests/init.pp
----------------------------------------------------------------------
diff --git a/bigtop-deploy/puppet/modules/hadoop/manifests/init.pp b/bigtop-deploy/puppet/modules/hadoop/manifests/init.pp
index c907851..496c303 100644
--- a/bigtop-deploy/puppet/modules/hadoop/manifests/init.pp
+++ b/bigtop-deploy/puppet/modules/hadoop/manifests/init.pp
@@ -227,13 +227,28 @@ class hadoop {
exec { "HDFS init $title":
user => "hdfs",
- command => "/bin/bash -c 'hadoop fs -mkdir $title && hadoop fs -chmod $perm $title && hadoop fs -chown $user $title'",
- unless => "/bin/bash -c 'hadoop fs -ls $name >/dev/null 2>&1'",
+ command => "/bin/bash -c 'hadoop fs -mkdir $title ; hadoop fs -chmod $perm $title && hadoop fs -chown $user $title'",
require => Service["hadoop-hdfs-namenode"],
}
Exec <| title == "activate nn1" |> -> Exec["HDFS init $title"]
}
+ define rsync_hdfs($files, $auth="simple") {
+ $src = $files[$title]
+
+ if ($auth == "kerberos") {
+ require hadoop::kinit
+ Exec["HDFS kinit"] -> Exec["HDFS init $title"]
+ }
+
+ exec { "HDFS rsync $title":
+ user => "hdfs",
+ command => "/bin/bash -c 'hadoop fs -mkdir -p $title ; hadoop fs -put -f $src $title'",
+ require => Service["hadoop-hdfs-namenode"],
+ }
+ Exec <| title == "activate nn1" |> -> Exec["HDFS rsync $title"]
+ }
+
define namenode ($host = $fqdn , $port = "8020", $thrift_port= "10090", $auth = "simple", $dirs = ["/tmp/nn"], $ha = 'disabled', $zk = '') {
$first_namenode = inline_template("<%= Array(host)[0] %>")