You are viewing a plain text version of this content. The canonical link for it is here.
Posted to mapreduce-user@hadoop.apache.org by Senthil Sekar <se...@gmail.com> on 2014/03/01 15:11:03 UTC
Problem in Submitting a Map-Reduce Job to Remote Hadoop Cluster
Hi ,
I have a remote server (Cent - OS - 6.3 ) with CDH-4.0.1 installed.
I do have another Windows-7.0 machine from which iam trying to submit
simple WordCount Map reduce job (i have included the HADOOP - 2.0.0 lib
Jars in my Eclipse environment)
I am getting the below Exception when i try to run it from ECLIPSE of my
Windows7 Machine
//-------------------
Exception in thread "main" java.io.IOException: Cannot initialize Cluster.
Please check your configuration for mapreduce.framework.name and the
correspond server addresses.
at org.apache.hadoop.mapreduce.Cluster.initialize(Cluster.java:121)
at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:83)
at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:76)
at org.apache.hadoop.mapred.JobClient.init(JobClient.java:487)
at org.apache.hadoop.mapred.JobClient.<init>(JobClient.java:466)
at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:879)
at com.pss.WordCount.main(WordCount.java:79)
//---------------------
Please find the code below
//-----------------------------------------------------
public class WordCount {
public static class Map extends MapReduceBase implements
Mapper<LongWritable, Text, Text, IntWritable>
{
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
@Override
public void map(LongWritable key, Text value,
OutputCollector<Text, IntWritable> output, Reporter reporter)
throws IOException {
String line=value.toString();
StringTokenizer tokenizer = new StringTokenizer(line);
while (tokenizer.hasMoreTokens())
{
word.set(tokenizer.nextToken());
output.collect(word, one);
}
}
}
public static class Reduce extends MapReduceBase implements
Reducer<Text, IntWritable, Text, IntWritable>
{
@Override
public void reduce(Text key, Iterator<IntWritable> values,
OutputCollector<Text, IntWritable> output, Reporter reporter)
throws IOException {
// TODO Auto-generated method stub
int sum=0;
while(values.hasNext())
{
sum+=values.next().get();
}
output.collect(key, new IntWritable(sum));
}
}
public static void main(String[] args) throws IOException {
Configuration config= new Configuration();
config.set("fs.default.name","hdfs://xyz-hostname:9000");
config.set("mapred.job.tracker","xyz-hostname:9001");
JobConf conf= new JobConf(config);
conf.setJarByClass(WordCount.class);
//conf.setJar(jar);
conf.setJobName("WordCount");
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(IntWritable.class);
conf.setMapperClass(Map.class);
//conf.setCombinerClass(Reduce.class);
conf.setReducerClass(Reduce.class);
conf.setInputFormat(TextInputFormat.class);
conf.setOutputFormat(TextOutputFormat.class);
FileInputFormat.setInputPaths(conf, new Path(args[0]));
FileOutputFormat.setOutputPath(conf, new Path(args[1]));
JobClient.runJob(conf);
}
}
//---------------------------------------------------------------------------------------------
Please help me to resolve this issue.
Regards,
Senthil
RE: Problem in Submitting a Map-Reduce Job to Remote Hadoop Cluster
Posted by Rohith Sharma K S <ro...@huawei.com>.
One more configuration to be added
config.set("mapreduce.framework.name<http://mapreduce.framework.name>","yarn");
Thanks
Rohith
From: Rohith Sharma K S [mailto:rohithsharmaks@huawei.com]
Sent: 03 March 2014 09:02
To: user@hadoop.apache.org
Subject: RE: Problem in Submitting a Map-Reduce Job to Remote Hadoop Cluster
Hi,
Set below configuration in your word count job.
Configuration config= new Configuration();
config.set("fs.default.name<http://fs.default.name>","hdfs://xyz-hostname:9000");
config.set("mapred.job.tracker","xyz-hostname:9001");
config.set("yarn.application.classpath ","$HADOOP_CONF_DIR, $HADOOP_COMMON_HOME/share/hadoop/common/*, $HADOOP_COMMON_HOME/share/hadoop/common/lib/*, $HADOOP_HDFS_HOME/share/hadoop/hdfs/*, $HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*, $YARN_HOME/share/hadoop/mapreduce/*, $YARN_HOME/share/hadoop/mapreduce/lib/*, $YARN_HOME/share/hadoop/yarn/*, $YARN_HOME/share/hadoop/yarn/lib/*");
<property>
<name>yarn.application.classpath</name>
<value>$HADOOP_CONF_DIR,
$HADOOP_COMMON_HOME/share/hadoop/common/*,
$HADOOP_COMMON_HOME/share/hadoop/common/lib/*,
$HADOOP_HDFS_HOME/share/hadoop/hdfs/*,
$HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*,
$YARN_HOME/share/hadoop/mapreduce/*,
$YARN_HOME/share/hadoop/mapreduce/lib/*,
$YARN_HOME/share/hadoop/yarn/*,
$YARN_HOME/share/hadoop/yarn/lib/*</value>
</property>
Thanks & Regards
Rohith Sharma K S
From: Senthil Sekar [mailto:senthil.se@gmail.com]
Sent: 01 March 2014 19:41
To: user@hadoop.apache.org<ma...@hadoop.apache.org>
Subject: Problem in Submitting a Map-Reduce Job to Remote Hadoop Cluster
Hi ,
I have a remote server (Cent - OS - 6.3 ) with CDH-4.0.1 installed.
I do have another Windows-7.0 machine from which iam trying to submit simple WordCount Map reduce job (i have included the HADOOP - 2.0.0 lib Jars in my Eclipse environment)
I am getting the below Exception when i try to run it from ECLIPSE of my Windows7 Machine
//-------------------
Exception in thread "main" java.io.IOException: Cannot initialize Cluster. Please check your configuration for mapreduce.framework.name<http://mapreduce.framework.name> and the correspond server addresses.
at org.apache.hadoop.mapreduce.Cluster.initialize(Cluster.java:121)
at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:83)
at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:76)
at org.apache.hadoop.mapred.JobClient.init(JobClient.java:487)
at org.apache.hadoop.mapred.JobClient.<init>(JobClient.java:466)
at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:879)
at com.pss.WordCount.main(WordCount.java:79)
//---------------------
Please find the code below
//-----------------------------------------------------
public class WordCount {
public static class Map extends MapReduceBase implements Mapper<LongWritable, Text, Text, IntWritable>
{
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
@Override
public void map(LongWritable key, Text value,
OutputCollector<Text, IntWritable> output, Reporter reporter)
throws IOException {
String line=value.toString();
StringTokenizer tokenizer = new StringTokenizer(line);
while (tokenizer.hasMoreTokens())
{
word.set(tokenizer.nextToken());
output.collect(word, one);
}
}
}
public static class Reduce extends MapReduceBase implements Reducer<Text, IntWritable, Text, IntWritable>
{
@Override
public void reduce(Text key, Iterator<IntWritable> values,
OutputCollector<Text, IntWritable> output, Reporter reporter)
throws IOException {
// TODO Auto-generated method stub
int sum=0;
while(values.hasNext())
{
sum+=values.next().get();
}
output.collect(key, new IntWritable(sum));
}
}
public static void main(String[] args) throws IOException {
Configuration config= new Configuration();
config.set("fs.default.name<http://fs.default.name>","hdfs://xyz-hostname:9000");
config.set("mapred.job.tracker","xyz-hostname:9001");
JobConf conf= new JobConf(config);
conf.setJarByClass(WordCount.class);
//conf.setJar(jar);
conf.setJobName("WordCount");
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(IntWritable.class);
conf.setMapperClass(Map.class);
//conf.setCombinerClass(Reduce.class);
conf.setReducerClass(Reduce.class);
conf.setInputFormat(TextInputFormat.class);
conf.setOutputFormat(TextOutputFormat.class);
FileInputFormat.setInputPaths(conf, new Path(args[0]));
FileOutputFormat.setOutputPath(conf, new Path(args[1]));
JobClient.runJob(conf);
}
}
//---------------------------------------------------------------------------------------------
Please help me to resolve this issue.
Regards,
Senthil
RE: Problem in Submitting a Map-Reduce Job to Remote Hadoop Cluster
Posted by Rohith Sharma K S <ro...@huawei.com>.
One more configuration to be added
config.set("mapreduce.framework.name<http://mapreduce.framework.name>","yarn");
Thanks
Rohith
From: Rohith Sharma K S [mailto:rohithsharmaks@huawei.com]
Sent: 03 March 2014 09:02
To: user@hadoop.apache.org
Subject: RE: Problem in Submitting a Map-Reduce Job to Remote Hadoop Cluster
Hi,
Set below configuration in your word count job.
Configuration config= new Configuration();
config.set("fs.default.name<http://fs.default.name>","hdfs://xyz-hostname:9000");
config.set("mapred.job.tracker","xyz-hostname:9001");
config.set("yarn.application.classpath ","$HADOOP_CONF_DIR, $HADOOP_COMMON_HOME/share/hadoop/common/*, $HADOOP_COMMON_HOME/share/hadoop/common/lib/*, $HADOOP_HDFS_HOME/share/hadoop/hdfs/*, $HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*, $YARN_HOME/share/hadoop/mapreduce/*, $YARN_HOME/share/hadoop/mapreduce/lib/*, $YARN_HOME/share/hadoop/yarn/*, $YARN_HOME/share/hadoop/yarn/lib/*");
<property>
<name>yarn.application.classpath</name>
<value>$HADOOP_CONF_DIR,
$HADOOP_COMMON_HOME/share/hadoop/common/*,
$HADOOP_COMMON_HOME/share/hadoop/common/lib/*,
$HADOOP_HDFS_HOME/share/hadoop/hdfs/*,
$HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*,
$YARN_HOME/share/hadoop/mapreduce/*,
$YARN_HOME/share/hadoop/mapreduce/lib/*,
$YARN_HOME/share/hadoop/yarn/*,
$YARN_HOME/share/hadoop/yarn/lib/*</value>
</property>
Thanks & Regards
Rohith Sharma K S
From: Senthil Sekar [mailto:senthil.se@gmail.com]
Sent: 01 March 2014 19:41
To: user@hadoop.apache.org<ma...@hadoop.apache.org>
Subject: Problem in Submitting a Map-Reduce Job to Remote Hadoop Cluster
Hi ,
I have a remote server (Cent - OS - 6.3 ) with CDH-4.0.1 installed.
I do have another Windows-7.0 machine from which iam trying to submit simple WordCount Map reduce job (i have included the HADOOP - 2.0.0 lib Jars in my Eclipse environment)
I am getting the below Exception when i try to run it from ECLIPSE of my Windows7 Machine
//-------------------
Exception in thread "main" java.io.IOException: Cannot initialize Cluster. Please check your configuration for mapreduce.framework.name<http://mapreduce.framework.name> and the correspond server addresses.
at org.apache.hadoop.mapreduce.Cluster.initialize(Cluster.java:121)
at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:83)
at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:76)
at org.apache.hadoop.mapred.JobClient.init(JobClient.java:487)
at org.apache.hadoop.mapred.JobClient.<init>(JobClient.java:466)
at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:879)
at com.pss.WordCount.main(WordCount.java:79)
//---------------------
Please find the code below
//-----------------------------------------------------
public class WordCount {
public static class Map extends MapReduceBase implements Mapper<LongWritable, Text, Text, IntWritable>
{
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
@Override
public void map(LongWritable key, Text value,
OutputCollector<Text, IntWritable> output, Reporter reporter)
throws IOException {
String line=value.toString();
StringTokenizer tokenizer = new StringTokenizer(line);
while (tokenizer.hasMoreTokens())
{
word.set(tokenizer.nextToken());
output.collect(word, one);
}
}
}
public static class Reduce extends MapReduceBase implements Reducer<Text, IntWritable, Text, IntWritable>
{
@Override
public void reduce(Text key, Iterator<IntWritable> values,
OutputCollector<Text, IntWritable> output, Reporter reporter)
throws IOException {
// TODO Auto-generated method stub
int sum=0;
while(values.hasNext())
{
sum+=values.next().get();
}
output.collect(key, new IntWritable(sum));
}
}
public static void main(String[] args) throws IOException {
Configuration config= new Configuration();
config.set("fs.default.name<http://fs.default.name>","hdfs://xyz-hostname:9000");
config.set("mapred.job.tracker","xyz-hostname:9001");
JobConf conf= new JobConf(config);
conf.setJarByClass(WordCount.class);
//conf.setJar(jar);
conf.setJobName("WordCount");
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(IntWritable.class);
conf.setMapperClass(Map.class);
//conf.setCombinerClass(Reduce.class);
conf.setReducerClass(Reduce.class);
conf.setInputFormat(TextInputFormat.class);
conf.setOutputFormat(TextOutputFormat.class);
FileInputFormat.setInputPaths(conf, new Path(args[0]));
FileOutputFormat.setOutputPath(conf, new Path(args[1]));
JobClient.runJob(conf);
}
}
//---------------------------------------------------------------------------------------------
Please help me to resolve this issue.
Regards,
Senthil
RE: Problem in Submitting a Map-Reduce Job to Remote Hadoop Cluster
Posted by Rohith Sharma K S <ro...@huawei.com>.
One more configuration to be added
config.set("mapreduce.framework.name<http://mapreduce.framework.name>","yarn");
Thanks
Rohith
From: Rohith Sharma K S [mailto:rohithsharmaks@huawei.com]
Sent: 03 March 2014 09:02
To: user@hadoop.apache.org
Subject: RE: Problem in Submitting a Map-Reduce Job to Remote Hadoop Cluster
Hi,
Set below configuration in your word count job.
Configuration config= new Configuration();
config.set("fs.default.name<http://fs.default.name>","hdfs://xyz-hostname:9000");
config.set("mapred.job.tracker","xyz-hostname:9001");
config.set("yarn.application.classpath ","$HADOOP_CONF_DIR, $HADOOP_COMMON_HOME/share/hadoop/common/*, $HADOOP_COMMON_HOME/share/hadoop/common/lib/*, $HADOOP_HDFS_HOME/share/hadoop/hdfs/*, $HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*, $YARN_HOME/share/hadoop/mapreduce/*, $YARN_HOME/share/hadoop/mapreduce/lib/*, $YARN_HOME/share/hadoop/yarn/*, $YARN_HOME/share/hadoop/yarn/lib/*");
<property>
<name>yarn.application.classpath</name>
<value>$HADOOP_CONF_DIR,
$HADOOP_COMMON_HOME/share/hadoop/common/*,
$HADOOP_COMMON_HOME/share/hadoop/common/lib/*,
$HADOOP_HDFS_HOME/share/hadoop/hdfs/*,
$HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*,
$YARN_HOME/share/hadoop/mapreduce/*,
$YARN_HOME/share/hadoop/mapreduce/lib/*,
$YARN_HOME/share/hadoop/yarn/*,
$YARN_HOME/share/hadoop/yarn/lib/*</value>
</property>
Thanks & Regards
Rohith Sharma K S
From: Senthil Sekar [mailto:senthil.se@gmail.com]
Sent: 01 March 2014 19:41
To: user@hadoop.apache.org<ma...@hadoop.apache.org>
Subject: Problem in Submitting a Map-Reduce Job to Remote Hadoop Cluster
Hi ,
I have a remote server (Cent - OS - 6.3 ) with CDH-4.0.1 installed.
I do have another Windows-7.0 machine from which iam trying to submit simple WordCount Map reduce job (i have included the HADOOP - 2.0.0 lib Jars in my Eclipse environment)
I am getting the below Exception when i try to run it from ECLIPSE of my Windows7 Machine
//-------------------
Exception in thread "main" java.io.IOException: Cannot initialize Cluster. Please check your configuration for mapreduce.framework.name<http://mapreduce.framework.name> and the correspond server addresses.
at org.apache.hadoop.mapreduce.Cluster.initialize(Cluster.java:121)
at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:83)
at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:76)
at org.apache.hadoop.mapred.JobClient.init(JobClient.java:487)
at org.apache.hadoop.mapred.JobClient.<init>(JobClient.java:466)
at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:879)
at com.pss.WordCount.main(WordCount.java:79)
//---------------------
Please find the code below
//-----------------------------------------------------
public class WordCount {
public static class Map extends MapReduceBase implements Mapper<LongWritable, Text, Text, IntWritable>
{
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
@Override
public void map(LongWritable key, Text value,
OutputCollector<Text, IntWritable> output, Reporter reporter)
throws IOException {
String line=value.toString();
StringTokenizer tokenizer = new StringTokenizer(line);
while (tokenizer.hasMoreTokens())
{
word.set(tokenizer.nextToken());
output.collect(word, one);
}
}
}
public static class Reduce extends MapReduceBase implements Reducer<Text, IntWritable, Text, IntWritable>
{
@Override
public void reduce(Text key, Iterator<IntWritable> values,
OutputCollector<Text, IntWritable> output, Reporter reporter)
throws IOException {
// TODO Auto-generated method stub
int sum=0;
while(values.hasNext())
{
sum+=values.next().get();
}
output.collect(key, new IntWritable(sum));
}
}
public static void main(String[] args) throws IOException {
Configuration config= new Configuration();
config.set("fs.default.name<http://fs.default.name>","hdfs://xyz-hostname:9000");
config.set("mapred.job.tracker","xyz-hostname:9001");
JobConf conf= new JobConf(config);
conf.setJarByClass(WordCount.class);
//conf.setJar(jar);
conf.setJobName("WordCount");
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(IntWritable.class);
conf.setMapperClass(Map.class);
//conf.setCombinerClass(Reduce.class);
conf.setReducerClass(Reduce.class);
conf.setInputFormat(TextInputFormat.class);
conf.setOutputFormat(TextOutputFormat.class);
FileInputFormat.setInputPaths(conf, new Path(args[0]));
FileOutputFormat.setOutputPath(conf, new Path(args[1]));
JobClient.runJob(conf);
}
}
//---------------------------------------------------------------------------------------------
Please help me to resolve this issue.
Regards,
Senthil
RE: Problem in Submitting a Map-Reduce Job to Remote Hadoop Cluster
Posted by Rohith Sharma K S <ro...@huawei.com>.
One more configuration to be added
config.set("mapreduce.framework.name<http://mapreduce.framework.name>","yarn");
Thanks
Rohith
From: Rohith Sharma K S [mailto:rohithsharmaks@huawei.com]
Sent: 03 March 2014 09:02
To: user@hadoop.apache.org
Subject: RE: Problem in Submitting a Map-Reduce Job to Remote Hadoop Cluster
Hi,
Set below configuration in your word count job.
Configuration config= new Configuration();
config.set("fs.default.name<http://fs.default.name>","hdfs://xyz-hostname:9000");
config.set("mapred.job.tracker","xyz-hostname:9001");
config.set("yarn.application.classpath ","$HADOOP_CONF_DIR, $HADOOP_COMMON_HOME/share/hadoop/common/*, $HADOOP_COMMON_HOME/share/hadoop/common/lib/*, $HADOOP_HDFS_HOME/share/hadoop/hdfs/*, $HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*, $YARN_HOME/share/hadoop/mapreduce/*, $YARN_HOME/share/hadoop/mapreduce/lib/*, $YARN_HOME/share/hadoop/yarn/*, $YARN_HOME/share/hadoop/yarn/lib/*");
<property>
<name>yarn.application.classpath</name>
<value>$HADOOP_CONF_DIR,
$HADOOP_COMMON_HOME/share/hadoop/common/*,
$HADOOP_COMMON_HOME/share/hadoop/common/lib/*,
$HADOOP_HDFS_HOME/share/hadoop/hdfs/*,
$HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*,
$YARN_HOME/share/hadoop/mapreduce/*,
$YARN_HOME/share/hadoop/mapreduce/lib/*,
$YARN_HOME/share/hadoop/yarn/*,
$YARN_HOME/share/hadoop/yarn/lib/*</value>
</property>
Thanks & Regards
Rohith Sharma K S
From: Senthil Sekar [mailto:senthil.se@gmail.com]
Sent: 01 March 2014 19:41
To: user@hadoop.apache.org<ma...@hadoop.apache.org>
Subject: Problem in Submitting a Map-Reduce Job to Remote Hadoop Cluster
Hi ,
I have a remote server (Cent - OS - 6.3 ) with CDH-4.0.1 installed.
I do have another Windows-7.0 machine from which iam trying to submit simple WordCount Map reduce job (i have included the HADOOP - 2.0.0 lib Jars in my Eclipse environment)
I am getting the below Exception when i try to run it from ECLIPSE of my Windows7 Machine
//-------------------
Exception in thread "main" java.io.IOException: Cannot initialize Cluster. Please check your configuration for mapreduce.framework.name<http://mapreduce.framework.name> and the correspond server addresses.
at org.apache.hadoop.mapreduce.Cluster.initialize(Cluster.java:121)
at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:83)
at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:76)
at org.apache.hadoop.mapred.JobClient.init(JobClient.java:487)
at org.apache.hadoop.mapred.JobClient.<init>(JobClient.java:466)
at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:879)
at com.pss.WordCount.main(WordCount.java:79)
//---------------------
Please find the code below
//-----------------------------------------------------
public class WordCount {
public static class Map extends MapReduceBase implements Mapper<LongWritable, Text, Text, IntWritable>
{
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
@Override
public void map(LongWritable key, Text value,
OutputCollector<Text, IntWritable> output, Reporter reporter)
throws IOException {
String line=value.toString();
StringTokenizer tokenizer = new StringTokenizer(line);
while (tokenizer.hasMoreTokens())
{
word.set(tokenizer.nextToken());
output.collect(word, one);
}
}
}
public static class Reduce extends MapReduceBase implements Reducer<Text, IntWritable, Text, IntWritable>
{
@Override
public void reduce(Text key, Iterator<IntWritable> values,
OutputCollector<Text, IntWritable> output, Reporter reporter)
throws IOException {
// TODO Auto-generated method stub
int sum=0;
while(values.hasNext())
{
sum+=values.next().get();
}
output.collect(key, new IntWritable(sum));
}
}
public static void main(String[] args) throws IOException {
Configuration config= new Configuration();
config.set("fs.default.name<http://fs.default.name>","hdfs://xyz-hostname:9000");
config.set("mapred.job.tracker","xyz-hostname:9001");
JobConf conf= new JobConf(config);
conf.setJarByClass(WordCount.class);
//conf.setJar(jar);
conf.setJobName("WordCount");
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(IntWritable.class);
conf.setMapperClass(Map.class);
//conf.setCombinerClass(Reduce.class);
conf.setReducerClass(Reduce.class);
conf.setInputFormat(TextInputFormat.class);
conf.setOutputFormat(TextOutputFormat.class);
FileInputFormat.setInputPaths(conf, new Path(args[0]));
FileOutputFormat.setOutputPath(conf, new Path(args[1]));
JobClient.runJob(conf);
}
}
//---------------------------------------------------------------------------------------------
Please help me to resolve this issue.
Regards,
Senthil
RE: Problem in Submitting a Map-Reduce Job to Remote Hadoop Cluster
Posted by Rohith Sharma K S <ro...@huawei.com>.
Hi,
Set below configuration in your word count job.
Configuration config= new Configuration();
config.set("fs.default.name<http://fs.default.name>","hdfs://xyz-hostname:9000");
config.set("mapred.job.tracker","xyz-hostname:9001");
config.set("yarn.application.classpath ","$HADOOP_CONF_DIR, $HADOOP_COMMON_HOME/share/hadoop/common/*, $HADOOP_COMMON_HOME/share/hadoop/common/lib/*, $HADOOP_HDFS_HOME/share/hadoop/hdfs/*, $HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*, $YARN_HOME/share/hadoop/mapreduce/*, $YARN_HOME/share/hadoop/mapreduce/lib/*, $YARN_HOME/share/hadoop/yarn/*, $YARN_HOME/share/hadoop/yarn/lib/*");
<property>
<name>yarn.application.classpath</name>
<value>$HADOOP_CONF_DIR,
$HADOOP_COMMON_HOME/share/hadoop/common/*,
$HADOOP_COMMON_HOME/share/hadoop/common/lib/*,
$HADOOP_HDFS_HOME/share/hadoop/hdfs/*,
$HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*,
$YARN_HOME/share/hadoop/mapreduce/*,
$YARN_HOME/share/hadoop/mapreduce/lib/*,
$YARN_HOME/share/hadoop/yarn/*,
$YARN_HOME/share/hadoop/yarn/lib/*</value>
</property>
Thanks & Regards
Rohith Sharma K S
From: Senthil Sekar [mailto:senthil.se@gmail.com]
Sent: 01 March 2014 19:41
To: user@hadoop.apache.org
Subject: Problem in Submitting a Map-Reduce Job to Remote Hadoop Cluster
Hi ,
I have a remote server (Cent - OS - 6.3 ) with CDH-4.0.1 installed.
I do have another Windows-7.0 machine from which iam trying to submit simple WordCount Map reduce job (i have included the HADOOP - 2.0.0 lib Jars in my Eclipse environment)
I am getting the below Exception when i try to run it from ECLIPSE of my Windows7 Machine
//-------------------
Exception in thread "main" java.io.IOException: Cannot initialize Cluster. Please check your configuration for mapreduce.framework.name<http://mapreduce.framework.name> and the correspond server addresses.
at org.apache.hadoop.mapreduce.Cluster.initialize(Cluster.java:121)
at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:83)
at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:76)
at org.apache.hadoop.mapred.JobClient.init(JobClient.java:487)
at org.apache.hadoop.mapred.JobClient.<init>(JobClient.java:466)
at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:879)
at com.pss.WordCount.main(WordCount.java:79)
//---------------------
Please find the code below
//-----------------------------------------------------
public class WordCount {
public static class Map extends MapReduceBase implements Mapper<LongWritable, Text, Text, IntWritable>
{
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
@Override
public void map(LongWritable key, Text value,
OutputCollector<Text, IntWritable> output, Reporter reporter)
throws IOException {
String line=value.toString();
StringTokenizer tokenizer = new StringTokenizer(line);
while (tokenizer.hasMoreTokens())
{
word.set(tokenizer.nextToken());
output.collect(word, one);
}
}
}
public static class Reduce extends MapReduceBase implements Reducer<Text, IntWritable, Text, IntWritable>
{
@Override
public void reduce(Text key, Iterator<IntWritable> values,
OutputCollector<Text, IntWritable> output, Reporter reporter)
throws IOException {
// TODO Auto-generated method stub
int sum=0;
while(values.hasNext())
{
sum+=values.next().get();
}
output.collect(key, new IntWritable(sum));
}
}
public static void main(String[] args) throws IOException {
Configuration config= new Configuration();
config.set("fs.default.name<http://fs.default.name>","hdfs://xyz-hostname:9000");
config.set("mapred.job.tracker","xyz-hostname:9001");
JobConf conf= new JobConf(config);
conf.setJarByClass(WordCount.class);
//conf.setJar(jar);
conf.setJobName("WordCount");
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(IntWritable.class);
conf.setMapperClass(Map.class);
//conf.setCombinerClass(Reduce.class);
conf.setReducerClass(Reduce.class);
conf.setInputFormat(TextInputFormat.class);
conf.setOutputFormat(TextOutputFormat.class);
FileInputFormat.setInputPaths(conf, new Path(args[0]));
FileOutputFormat.setOutputPath(conf, new Path(args[1]));
JobClient.runJob(conf);
}
}
//---------------------------------------------------------------------------------------------
Please help me to resolve this issue.
Regards,
Senthil
RE: Problem in Submitting a Map-Reduce Job to Remote Hadoop Cluster
Posted by Rohith Sharma K S <ro...@huawei.com>.
Hi,
Set below configuration in your word count job.
Configuration config= new Configuration();
config.set("fs.default.name<http://fs.default.name>","hdfs://xyz-hostname:9000");
config.set("mapred.job.tracker","xyz-hostname:9001");
config.set("yarn.application.classpath ","$HADOOP_CONF_DIR, $HADOOP_COMMON_HOME/share/hadoop/common/*, $HADOOP_COMMON_HOME/share/hadoop/common/lib/*, $HADOOP_HDFS_HOME/share/hadoop/hdfs/*, $HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*, $YARN_HOME/share/hadoop/mapreduce/*, $YARN_HOME/share/hadoop/mapreduce/lib/*, $YARN_HOME/share/hadoop/yarn/*, $YARN_HOME/share/hadoop/yarn/lib/*");
<property>
<name>yarn.application.classpath</name>
<value>$HADOOP_CONF_DIR,
$HADOOP_COMMON_HOME/share/hadoop/common/*,
$HADOOP_COMMON_HOME/share/hadoop/common/lib/*,
$HADOOP_HDFS_HOME/share/hadoop/hdfs/*,
$HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*,
$YARN_HOME/share/hadoop/mapreduce/*,
$YARN_HOME/share/hadoop/mapreduce/lib/*,
$YARN_HOME/share/hadoop/yarn/*,
$YARN_HOME/share/hadoop/yarn/lib/*</value>
</property>
Thanks & Regards
Rohith Sharma K S
From: Senthil Sekar [mailto:senthil.se@gmail.com]
Sent: 01 March 2014 19:41
To: user@hadoop.apache.org
Subject: Problem in Submitting a Map-Reduce Job to Remote Hadoop Cluster
Hi ,
I have a remote server (Cent - OS - 6.3 ) with CDH-4.0.1 installed.
I do have another Windows-7.0 machine from which iam trying to submit simple WordCount Map reduce job (i have included the HADOOP - 2.0.0 lib Jars in my Eclipse environment)
I am getting the below Exception when i try to run it from ECLIPSE of my Windows7 Machine
//-------------------
Exception in thread "main" java.io.IOException: Cannot initialize Cluster. Please check your configuration for mapreduce.framework.name<http://mapreduce.framework.name> and the correspond server addresses.
at org.apache.hadoop.mapreduce.Cluster.initialize(Cluster.java:121)
at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:83)
at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:76)
at org.apache.hadoop.mapred.JobClient.init(JobClient.java:487)
at org.apache.hadoop.mapred.JobClient.<init>(JobClient.java:466)
at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:879)
at com.pss.WordCount.main(WordCount.java:79)
//---------------------
Please find the code below
//-----------------------------------------------------
public class WordCount {
public static class Map extends MapReduceBase implements Mapper<LongWritable, Text, Text, IntWritable>
{
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
@Override
public void map(LongWritable key, Text value,
OutputCollector<Text, IntWritable> output, Reporter reporter)
throws IOException {
String line=value.toString();
StringTokenizer tokenizer = new StringTokenizer(line);
while (tokenizer.hasMoreTokens())
{
word.set(tokenizer.nextToken());
output.collect(word, one);
}
}
}
public static class Reduce extends MapReduceBase implements Reducer<Text, IntWritable, Text, IntWritable>
{
@Override
public void reduce(Text key, Iterator<IntWritable> values,
OutputCollector<Text, IntWritable> output, Reporter reporter)
throws IOException {
// TODO Auto-generated method stub
int sum=0;
while(values.hasNext())
{
sum+=values.next().get();
}
output.collect(key, new IntWritable(sum));
}
}
public static void main(String[] args) throws IOException {
Configuration config= new Configuration();
config.set("fs.default.name<http://fs.default.name>","hdfs://xyz-hostname:9000");
config.set("mapred.job.tracker","xyz-hostname:9001");
JobConf conf= new JobConf(config);
conf.setJarByClass(WordCount.class);
//conf.setJar(jar);
conf.setJobName("WordCount");
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(IntWritable.class);
conf.setMapperClass(Map.class);
//conf.setCombinerClass(Reduce.class);
conf.setReducerClass(Reduce.class);
conf.setInputFormat(TextInputFormat.class);
conf.setOutputFormat(TextOutputFormat.class);
FileInputFormat.setInputPaths(conf, new Path(args[0]));
FileOutputFormat.setOutputPath(conf, new Path(args[1]));
JobClient.runJob(conf);
}
}
//---------------------------------------------------------------------------------------------
Please help me to resolve this issue.
Regards,
Senthil
RE: Problem in Submitting a Map-Reduce Job to Remote Hadoop Cluster
Posted by Rohith Sharma K S <ro...@huawei.com>.
Hi,
Set below configuration in your word count job.
Configuration config= new Configuration();
config.set("fs.default.name<http://fs.default.name>","hdfs://xyz-hostname:9000");
config.set("mapred.job.tracker","xyz-hostname:9001");
config.set("yarn.application.classpath ","$HADOOP_CONF_DIR, $HADOOP_COMMON_HOME/share/hadoop/common/*, $HADOOP_COMMON_HOME/share/hadoop/common/lib/*, $HADOOP_HDFS_HOME/share/hadoop/hdfs/*, $HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*, $YARN_HOME/share/hadoop/mapreduce/*, $YARN_HOME/share/hadoop/mapreduce/lib/*, $YARN_HOME/share/hadoop/yarn/*, $YARN_HOME/share/hadoop/yarn/lib/*");
<property>
<name>yarn.application.classpath</name>
<value>$HADOOP_CONF_DIR,
$HADOOP_COMMON_HOME/share/hadoop/common/*,
$HADOOP_COMMON_HOME/share/hadoop/common/lib/*,
$HADOOP_HDFS_HOME/share/hadoop/hdfs/*,
$HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*,
$YARN_HOME/share/hadoop/mapreduce/*,
$YARN_HOME/share/hadoop/mapreduce/lib/*,
$YARN_HOME/share/hadoop/yarn/*,
$YARN_HOME/share/hadoop/yarn/lib/*</value>
</property>
Thanks & Regards
Rohith Sharma K S
From: Senthil Sekar [mailto:senthil.se@gmail.com]
Sent: 01 March 2014 19:41
To: user@hadoop.apache.org
Subject: Problem in Submitting a Map-Reduce Job to Remote Hadoop Cluster
Hi ,
I have a remote server (Cent - OS - 6.3 ) with CDH-4.0.1 installed.
I do have another Windows-7.0 machine from which iam trying to submit simple WordCount Map reduce job (i have included the HADOOP - 2.0.0 lib Jars in my Eclipse environment)
I am getting the below Exception when i try to run it from ECLIPSE of my Windows7 Machine
//-------------------
Exception in thread "main" java.io.IOException: Cannot initialize Cluster. Please check your configuration for mapreduce.framework.name<http://mapreduce.framework.name> and the correspond server addresses.
at org.apache.hadoop.mapreduce.Cluster.initialize(Cluster.java:121)
at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:83)
at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:76)
at org.apache.hadoop.mapred.JobClient.init(JobClient.java:487)
at org.apache.hadoop.mapred.JobClient.<init>(JobClient.java:466)
at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:879)
at com.pss.WordCount.main(WordCount.java:79)
//---------------------
Please find the code below
//-----------------------------------------------------
public class WordCount {
public static class Map extends MapReduceBase implements Mapper<LongWritable, Text, Text, IntWritable>
{
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
@Override
public void map(LongWritable key, Text value,
OutputCollector<Text, IntWritable> output, Reporter reporter)
throws IOException {
String line=value.toString();
StringTokenizer tokenizer = new StringTokenizer(line);
while (tokenizer.hasMoreTokens())
{
word.set(tokenizer.nextToken());
output.collect(word, one);
}
}
}
public static class Reduce extends MapReduceBase implements Reducer<Text, IntWritable, Text, IntWritable>
{
@Override
public void reduce(Text key, Iterator<IntWritable> values,
OutputCollector<Text, IntWritable> output, Reporter reporter)
throws IOException {
// TODO Auto-generated method stub
int sum=0;
while(values.hasNext())
{
sum+=values.next().get();
}
output.collect(key, new IntWritable(sum));
}
}
public static void main(String[] args) throws IOException {
Configuration config= new Configuration();
config.set("fs.default.name<http://fs.default.name>","hdfs://xyz-hostname:9000");
config.set("mapred.job.tracker","xyz-hostname:9001");
JobConf conf= new JobConf(config);
conf.setJarByClass(WordCount.class);
//conf.setJar(jar);
conf.setJobName("WordCount");
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(IntWritable.class);
conf.setMapperClass(Map.class);
//conf.setCombinerClass(Reduce.class);
conf.setReducerClass(Reduce.class);
conf.setInputFormat(TextInputFormat.class);
conf.setOutputFormat(TextOutputFormat.class);
FileInputFormat.setInputPaths(conf, new Path(args[0]));
FileOutputFormat.setOutputPath(conf, new Path(args[1]));
JobClient.runJob(conf);
}
}
//---------------------------------------------------------------------------------------------
Please help me to resolve this issue.
Regards,
Senthil
RE: Problem in Submitting a Map-Reduce Job to Remote Hadoop Cluster
Posted by Rohith Sharma K S <ro...@huawei.com>.
Hi,
Set below configuration in your word count job.
Configuration config= new Configuration();
config.set("fs.default.name<http://fs.default.name>","hdfs://xyz-hostname:9000");
config.set("mapred.job.tracker","xyz-hostname:9001");
config.set("yarn.application.classpath ","$HADOOP_CONF_DIR, $HADOOP_COMMON_HOME/share/hadoop/common/*, $HADOOP_COMMON_HOME/share/hadoop/common/lib/*, $HADOOP_HDFS_HOME/share/hadoop/hdfs/*, $HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*, $YARN_HOME/share/hadoop/mapreduce/*, $YARN_HOME/share/hadoop/mapreduce/lib/*, $YARN_HOME/share/hadoop/yarn/*, $YARN_HOME/share/hadoop/yarn/lib/*");
<property>
<name>yarn.application.classpath</name>
<value>$HADOOP_CONF_DIR,
$HADOOP_COMMON_HOME/share/hadoop/common/*,
$HADOOP_COMMON_HOME/share/hadoop/common/lib/*,
$HADOOP_HDFS_HOME/share/hadoop/hdfs/*,
$HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*,
$YARN_HOME/share/hadoop/mapreduce/*,
$YARN_HOME/share/hadoop/mapreduce/lib/*,
$YARN_HOME/share/hadoop/yarn/*,
$YARN_HOME/share/hadoop/yarn/lib/*</value>
</property>
Thanks & Regards
Rohith Sharma K S
From: Senthil Sekar [mailto:senthil.se@gmail.com]
Sent: 01 March 2014 19:41
To: user@hadoop.apache.org
Subject: Problem in Submitting a Map-Reduce Job to Remote Hadoop Cluster
Hi ,
I have a remote server (Cent - OS - 6.3 ) with CDH-4.0.1 installed.
I do have another Windows-7.0 machine from which iam trying to submit simple WordCount Map reduce job (i have included the HADOOP - 2.0.0 lib Jars in my Eclipse environment)
I am getting the below Exception when i try to run it from ECLIPSE of my Windows7 Machine
//-------------------
Exception in thread "main" java.io.IOException: Cannot initialize Cluster. Please check your configuration for mapreduce.framework.name<http://mapreduce.framework.name> and the correspond server addresses.
at org.apache.hadoop.mapreduce.Cluster.initialize(Cluster.java:121)
at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:83)
at org.apache.hadoop.mapreduce.Cluster.<init>(Cluster.java:76)
at org.apache.hadoop.mapred.JobClient.init(JobClient.java:487)
at org.apache.hadoop.mapred.JobClient.<init>(JobClient.java:466)
at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:879)
at com.pss.WordCount.main(WordCount.java:79)
//---------------------
Please find the code below
//-----------------------------------------------------
public class WordCount {
public static class Map extends MapReduceBase implements Mapper<LongWritable, Text, Text, IntWritable>
{
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
@Override
public void map(LongWritable key, Text value,
OutputCollector<Text, IntWritable> output, Reporter reporter)
throws IOException {
String line=value.toString();
StringTokenizer tokenizer = new StringTokenizer(line);
while (tokenizer.hasMoreTokens())
{
word.set(tokenizer.nextToken());
output.collect(word, one);
}
}
}
public static class Reduce extends MapReduceBase implements Reducer<Text, IntWritable, Text, IntWritable>
{
@Override
public void reduce(Text key, Iterator<IntWritable> values,
OutputCollector<Text, IntWritable> output, Reporter reporter)
throws IOException {
// TODO Auto-generated method stub
int sum=0;
while(values.hasNext())
{
sum+=values.next().get();
}
output.collect(key, new IntWritable(sum));
}
}
public static void main(String[] args) throws IOException {
Configuration config= new Configuration();
config.set("fs.default.name<http://fs.default.name>","hdfs://xyz-hostname:9000");
config.set("mapred.job.tracker","xyz-hostname:9001");
JobConf conf= new JobConf(config);
conf.setJarByClass(WordCount.class);
//conf.setJar(jar);
conf.setJobName("WordCount");
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(IntWritable.class);
conf.setMapperClass(Map.class);
//conf.setCombinerClass(Reduce.class);
conf.setReducerClass(Reduce.class);
conf.setInputFormat(TextInputFormat.class);
conf.setOutputFormat(TextOutputFormat.class);
FileInputFormat.setInputPaths(conf, new Path(args[0]));
FileOutputFormat.setOutputPath(conf, new Path(args[1]));
JobClient.runJob(conf);
}
}
//---------------------------------------------------------------------------------------------
Please help me to resolve this issue.
Regards,
Senthil