You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-user@hadoop.apache.org by unmesha sreeveni <un...@gmail.com> on 2013/12/20 05:17:08 UTC

LOGGING in MapReduce

I want to log my System.out.println() in console
How to do that.
I did the below code but it is not displaying any thing. I am using mapred
api the old one.
Did i do anything wrong?

Code
------------------------------------------------------------

package tech;

import java.io.BufferedWriter;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import com.sun.org.apache.commons.logging.LogFactory;

public  class Reduce extends MapReduceBase
implements Reducer<Text, IntWritable, Text, IntWritable> {

static int cnt =0;
ArrayList<String> ar = new ArrayList<String>();
String data = null;

*public static final com.sun.org.apache.commons.logging.Log LOG =
LogFactory.getLog(Reduce.class);*

public void reduce(Text key, Iterator<IntWritable> values,
OutputCollector<Text, IntWritable> output,
Reporter reporter) throws IOException {

 *System.out.println("In reducer");*
*LOG.info("In Reducer");*

int sum = 0;
String line = key.toString();
StringTokenizer itr = new StringTokenizer(line);
while (values.hasNext()) {
sum += values.next().get();
}
output.collect(key, new IntWritable(sum));
String data = key+" "+sum;
ar.add(data);
*writeToFile(ar);*
System.out.println("Wrote to file");
 *if(LOG.isDebugEnabled()){*
* LOG.debug("Log: In Reducer");*
* }*

 ar.add("\n");
int index=Integer.parseInt(itr.nextToken());
String value=itr.nextToken();
String classLabel=itr.nextToken();
int count=sum;

}

public static void *writeToFile*(ArrayList<String>  text) throws
IOException {
System.out.println("In reduce write to file ");
 C45 id=new C45();
System.out.println("count "+cnt);

Path input = new Path("C45/intermediate"+id.current_index+".txt");
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
BufferedWriter bw = new BufferedWriter(new

OutputStreamWriter(fs.create(input, true)));
for(String str: text) {
bw.write(str);
}
bw.newLine();
bw.close();
 }

}

1. And how to log details: if my reducer data goes into function *writeToFile()
*and creates the file in hdfs.

-- 
*Thanks & Regards*

Unmesha Sreeveni U.B

*Junior Developer*

Re: LOGGING in MapReduce

Posted by unmesha sreeveni <un...@gmail.com>.
Thanks for ur reply i got it :)


On Sat, Dec 21, 2013 at 1:53 AM, Jiayu Ji <ji...@gmail.com> wrote:

> Get on the jobtracker UI and find the corresponding job id. You are
> supposed to see the info there in the mapper or reducer log.
>
>
> On Fri, Dec 20, 2013 at 2:04 AM, Tao Xiao <xi...@gmail.com>wrote:
>
>> You can't log your mappers' System.out.println()  or reducers'
>> System.out.println()  in console, because in console you just submit your
>> job, but actually the map tasks and reduce tasks are running in different
>> processes or different nodes which,  is not running in the console.
>>
>> But you can find you log produced by mappers' System.out.println()  or
>> reducers' System.out.println()  in the corresponding log files.
>>
>>
>> 2013/12/20 unmesha sreeveni <un...@gmail.com>
>>
>>> I want to log my System.out.println() in console
>>> How to do that.
>>> I did the below code but it is not displaying any thing. I am using
>>> mapred api the old one.
>>> Did i do anything wrong?
>>>
>>> Code
>>> ------------------------------------------------------------
>>>
>>> package tech;
>>>
>>> import java.io.BufferedWriter;
>>> import java.io.IOException;
>>> import java.io.OutputStreamWriter;
>>> import java.util.ArrayList;
>>> import java.util.Iterator;
>>> import java.util.StringTokenizer;
>>>
>>> import org.apache.hadoop.conf.Configuration;
>>> import org.apache.hadoop.fs.FileSystem;
>>> import org.apache.hadoop.fs.Path;
>>> import org.apache.hadoop.io.IntWritable;
>>> import org.apache.hadoop.io.Text;
>>> import org.apache.hadoop.mapred.MapReduceBase;
>>> import org.apache.hadoop.mapred.OutputCollector;
>>> import org.apache.hadoop.mapred.Reducer;
>>> import org.apache.hadoop.mapred.Reporter;
>>> import com.sun.org.apache.commons.logging.LogFactory;
>>>
>>> public  class Reduce extends MapReduceBase
>>> implements Reducer<Text, IntWritable, Text, IntWritable> {
>>>
>>> static int cnt =0;
>>>  ArrayList<String> ar = new ArrayList<String>();
>>> String data = null;
>>>
>>> *public static final com.sun.org.apache.commons.logging.Log LOG =
>>> LogFactory.getLog(Reduce.class);*
>>>
>>> public void reduce(Text key, Iterator<IntWritable> values,
>>>  OutputCollector<Text, IntWritable> output,
>>> Reporter reporter) throws IOException {
>>>
>>>  *System.out.println("In reducer");*
>>>  *LOG.info("In Reducer");*
>>>
>>>  int sum = 0;
>>> String line = key.toString();
>>>  StringTokenizer itr = new StringTokenizer(line);
>>> while (values.hasNext()) {
>>>  sum += values.next().get();
>>> }
>>>  output.collect(key, new IntWritable(sum));
>>> String data = key+" "+sum;
>>>  ar.add(data);
>>> *writeToFile(ar);*
>>>  System.out.println("Wrote to file");
>>>  *if(LOG.isDebugEnabled()){*
>>> * LOG.debug("Log: In Reducer");*
>>> * }*
>>>
>>>  ar.add("\n");
>>>  int index=Integer.parseInt(itr.nextToken());
>>> String value=itr.nextToken();
>>>  String classLabel=itr.nextToken();
>>> int count=sum;
>>>
>>> }
>>>
>>> public static void *writeToFile*(ArrayList<String>  text) throws
>>> IOException {
>>>  System.out.println("In reduce write to file ");
>>>   C45 id=new C45();
>>>  System.out.println("count "+cnt);
>>>
>>>  Path input = new Path("C45/intermediate"+id.current_index+".txt");
>>>  Configuration conf = new Configuration();
>>> FileSystem fs = FileSystem.get(conf);
>>>  BufferedWriter bw = new BufferedWriter(new
>>>
>>> OutputStreamWriter(fs.create(input, true)));
>>>  for(String str: text) {
>>> bw.write(str);
>>>  }
>>> bw.newLine();
>>>  bw.close();
>>>  }
>>>
>>> }
>>> 1. And how to log details: if my reducer data goes into function *writeToFile()
>>> *and creates the file in hdfs.
>>>
>>> --
>>> *Thanks & Regards*
>>>
>>> Unmesha Sreeveni U.B
>>>
>>> *Junior Developer*
>>>
>>>
>>>
>>
>
>
> --
> Jiayu (James) Ji,
>
> Cell: (312)823-7393
>
>


-- 
*Thanks & Regards*

Unmesha Sreeveni U.B

*Junior Developer*

Re: LOGGING in MapReduce

Posted by unmesha sreeveni <un...@gmail.com>.
Thanks for ur reply i got it :)


On Sat, Dec 21, 2013 at 1:53 AM, Jiayu Ji <ji...@gmail.com> wrote:

> Get on the jobtracker UI and find the corresponding job id. You are
> supposed to see the info there in the mapper or reducer log.
>
>
> On Fri, Dec 20, 2013 at 2:04 AM, Tao Xiao <xi...@gmail.com>wrote:
>
>> You can't log your mappers' System.out.println()  or reducers'
>> System.out.println()  in console, because in console you just submit your
>> job, but actually the map tasks and reduce tasks are running in different
>> processes or different nodes which,  is not running in the console.
>>
>> But you can find you log produced by mappers' System.out.println()  or
>> reducers' System.out.println()  in the corresponding log files.
>>
>>
>> 2013/12/20 unmesha sreeveni <un...@gmail.com>
>>
>>> I want to log my System.out.println() in console
>>> How to do that.
>>> I did the below code but it is not displaying any thing. I am using
>>> mapred api the old one.
>>> Did i do anything wrong?
>>>
>>> Code
>>> ------------------------------------------------------------
>>>
>>> package tech;
>>>
>>> import java.io.BufferedWriter;
>>> import java.io.IOException;
>>> import java.io.OutputStreamWriter;
>>> import java.util.ArrayList;
>>> import java.util.Iterator;
>>> import java.util.StringTokenizer;
>>>
>>> import org.apache.hadoop.conf.Configuration;
>>> import org.apache.hadoop.fs.FileSystem;
>>> import org.apache.hadoop.fs.Path;
>>> import org.apache.hadoop.io.IntWritable;
>>> import org.apache.hadoop.io.Text;
>>> import org.apache.hadoop.mapred.MapReduceBase;
>>> import org.apache.hadoop.mapred.OutputCollector;
>>> import org.apache.hadoop.mapred.Reducer;
>>> import org.apache.hadoop.mapred.Reporter;
>>> import com.sun.org.apache.commons.logging.LogFactory;
>>>
>>> public  class Reduce extends MapReduceBase
>>> implements Reducer<Text, IntWritable, Text, IntWritable> {
>>>
>>> static int cnt =0;
>>>  ArrayList<String> ar = new ArrayList<String>();
>>> String data = null;
>>>
>>> *public static final com.sun.org.apache.commons.logging.Log LOG =
>>> LogFactory.getLog(Reduce.class);*
>>>
>>> public void reduce(Text key, Iterator<IntWritable> values,
>>>  OutputCollector<Text, IntWritable> output,
>>> Reporter reporter) throws IOException {
>>>
>>>  *System.out.println("In reducer");*
>>>  *LOG.info("In Reducer");*
>>>
>>>  int sum = 0;
>>> String line = key.toString();
>>>  StringTokenizer itr = new StringTokenizer(line);
>>> while (values.hasNext()) {
>>>  sum += values.next().get();
>>> }
>>>  output.collect(key, new IntWritable(sum));
>>> String data = key+" "+sum;
>>>  ar.add(data);
>>> *writeToFile(ar);*
>>>  System.out.println("Wrote to file");
>>>  *if(LOG.isDebugEnabled()){*
>>> * LOG.debug("Log: In Reducer");*
>>> * }*
>>>
>>>  ar.add("\n");
>>>  int index=Integer.parseInt(itr.nextToken());
>>> String value=itr.nextToken();
>>>  String classLabel=itr.nextToken();
>>> int count=sum;
>>>
>>> }
>>>
>>> public static void *writeToFile*(ArrayList<String>  text) throws
>>> IOException {
>>>  System.out.println("In reduce write to file ");
>>>   C45 id=new C45();
>>>  System.out.println("count "+cnt);
>>>
>>>  Path input = new Path("C45/intermediate"+id.current_index+".txt");
>>>  Configuration conf = new Configuration();
>>> FileSystem fs = FileSystem.get(conf);
>>>  BufferedWriter bw = new BufferedWriter(new
>>>
>>> OutputStreamWriter(fs.create(input, true)));
>>>  for(String str: text) {
>>> bw.write(str);
>>>  }
>>> bw.newLine();
>>>  bw.close();
>>>  }
>>>
>>> }
>>> 1. And how to log details: if my reducer data goes into function *writeToFile()
>>> *and creates the file in hdfs.
>>>
>>> --
>>> *Thanks & Regards*
>>>
>>> Unmesha Sreeveni U.B
>>>
>>> *Junior Developer*
>>>
>>>
>>>
>>
>
>
> --
> Jiayu (James) Ji,
>
> Cell: (312)823-7393
>
>


-- 
*Thanks & Regards*

Unmesha Sreeveni U.B

*Junior Developer*

Re: LOGGING in MapReduce

Posted by unmesha sreeveni <un...@gmail.com>.
Thanks for ur reply i got it :)


On Sat, Dec 21, 2013 at 1:53 AM, Jiayu Ji <ji...@gmail.com> wrote:

> Get on the jobtracker UI and find the corresponding job id. You are
> supposed to see the info there in the mapper or reducer log.
>
>
> On Fri, Dec 20, 2013 at 2:04 AM, Tao Xiao <xi...@gmail.com>wrote:
>
>> You can't log your mappers' System.out.println()  or reducers'
>> System.out.println()  in console, because in console you just submit your
>> job, but actually the map tasks and reduce tasks are running in different
>> processes or different nodes which,  is not running in the console.
>>
>> But you can find you log produced by mappers' System.out.println()  or
>> reducers' System.out.println()  in the corresponding log files.
>>
>>
>> 2013/12/20 unmesha sreeveni <un...@gmail.com>
>>
>>> I want to log my System.out.println() in console
>>> How to do that.
>>> I did the below code but it is not displaying any thing. I am using
>>> mapred api the old one.
>>> Did i do anything wrong?
>>>
>>> Code
>>> ------------------------------------------------------------
>>>
>>> package tech;
>>>
>>> import java.io.BufferedWriter;
>>> import java.io.IOException;
>>> import java.io.OutputStreamWriter;
>>> import java.util.ArrayList;
>>> import java.util.Iterator;
>>> import java.util.StringTokenizer;
>>>
>>> import org.apache.hadoop.conf.Configuration;
>>> import org.apache.hadoop.fs.FileSystem;
>>> import org.apache.hadoop.fs.Path;
>>> import org.apache.hadoop.io.IntWritable;
>>> import org.apache.hadoop.io.Text;
>>> import org.apache.hadoop.mapred.MapReduceBase;
>>> import org.apache.hadoop.mapred.OutputCollector;
>>> import org.apache.hadoop.mapred.Reducer;
>>> import org.apache.hadoop.mapred.Reporter;
>>> import com.sun.org.apache.commons.logging.LogFactory;
>>>
>>> public  class Reduce extends MapReduceBase
>>> implements Reducer<Text, IntWritable, Text, IntWritable> {
>>>
>>> static int cnt =0;
>>>  ArrayList<String> ar = new ArrayList<String>();
>>> String data = null;
>>>
>>> *public static final com.sun.org.apache.commons.logging.Log LOG =
>>> LogFactory.getLog(Reduce.class);*
>>>
>>> public void reduce(Text key, Iterator<IntWritable> values,
>>>  OutputCollector<Text, IntWritable> output,
>>> Reporter reporter) throws IOException {
>>>
>>>  *System.out.println("In reducer");*
>>>  *LOG.info("In Reducer");*
>>>
>>>  int sum = 0;
>>> String line = key.toString();
>>>  StringTokenizer itr = new StringTokenizer(line);
>>> while (values.hasNext()) {
>>>  sum += values.next().get();
>>> }
>>>  output.collect(key, new IntWritable(sum));
>>> String data = key+" "+sum;
>>>  ar.add(data);
>>> *writeToFile(ar);*
>>>  System.out.println("Wrote to file");
>>>  *if(LOG.isDebugEnabled()){*
>>> * LOG.debug("Log: In Reducer");*
>>> * }*
>>>
>>>  ar.add("\n");
>>>  int index=Integer.parseInt(itr.nextToken());
>>> String value=itr.nextToken();
>>>  String classLabel=itr.nextToken();
>>> int count=sum;
>>>
>>> }
>>>
>>> public static void *writeToFile*(ArrayList<String>  text) throws
>>> IOException {
>>>  System.out.println("In reduce write to file ");
>>>   C45 id=new C45();
>>>  System.out.println("count "+cnt);
>>>
>>>  Path input = new Path("C45/intermediate"+id.current_index+".txt");
>>>  Configuration conf = new Configuration();
>>> FileSystem fs = FileSystem.get(conf);
>>>  BufferedWriter bw = new BufferedWriter(new
>>>
>>> OutputStreamWriter(fs.create(input, true)));
>>>  for(String str: text) {
>>> bw.write(str);
>>>  }
>>> bw.newLine();
>>>  bw.close();
>>>  }
>>>
>>> }
>>> 1. And how to log details: if my reducer data goes into function *writeToFile()
>>> *and creates the file in hdfs.
>>>
>>> --
>>> *Thanks & Regards*
>>>
>>> Unmesha Sreeveni U.B
>>>
>>> *Junior Developer*
>>>
>>>
>>>
>>
>
>
> --
> Jiayu (James) Ji,
>
> Cell: (312)823-7393
>
>


-- 
*Thanks & Regards*

Unmesha Sreeveni U.B

*Junior Developer*

Re: LOGGING in MapReduce

Posted by unmesha sreeveni <un...@gmail.com>.
Thanks for ur reply i got it :)


On Sat, Dec 21, 2013 at 1:53 AM, Jiayu Ji <ji...@gmail.com> wrote:

> Get on the jobtracker UI and find the corresponding job id. You are
> supposed to see the info there in the mapper or reducer log.
>
>
> On Fri, Dec 20, 2013 at 2:04 AM, Tao Xiao <xi...@gmail.com>wrote:
>
>> You can't log your mappers' System.out.println()  or reducers'
>> System.out.println()  in console, because in console you just submit your
>> job, but actually the map tasks and reduce tasks are running in different
>> processes or different nodes which,  is not running in the console.
>>
>> But you can find you log produced by mappers' System.out.println()  or
>> reducers' System.out.println()  in the corresponding log files.
>>
>>
>> 2013/12/20 unmesha sreeveni <un...@gmail.com>
>>
>>> I want to log my System.out.println() in console
>>> How to do that.
>>> I did the below code but it is not displaying any thing. I am using
>>> mapred api the old one.
>>> Did i do anything wrong?
>>>
>>> Code
>>> ------------------------------------------------------------
>>>
>>> package tech;
>>>
>>> import java.io.BufferedWriter;
>>> import java.io.IOException;
>>> import java.io.OutputStreamWriter;
>>> import java.util.ArrayList;
>>> import java.util.Iterator;
>>> import java.util.StringTokenizer;
>>>
>>> import org.apache.hadoop.conf.Configuration;
>>> import org.apache.hadoop.fs.FileSystem;
>>> import org.apache.hadoop.fs.Path;
>>> import org.apache.hadoop.io.IntWritable;
>>> import org.apache.hadoop.io.Text;
>>> import org.apache.hadoop.mapred.MapReduceBase;
>>> import org.apache.hadoop.mapred.OutputCollector;
>>> import org.apache.hadoop.mapred.Reducer;
>>> import org.apache.hadoop.mapred.Reporter;
>>> import com.sun.org.apache.commons.logging.LogFactory;
>>>
>>> public  class Reduce extends MapReduceBase
>>> implements Reducer<Text, IntWritable, Text, IntWritable> {
>>>
>>> static int cnt =0;
>>>  ArrayList<String> ar = new ArrayList<String>();
>>> String data = null;
>>>
>>> *public static final com.sun.org.apache.commons.logging.Log LOG =
>>> LogFactory.getLog(Reduce.class);*
>>>
>>> public void reduce(Text key, Iterator<IntWritable> values,
>>>  OutputCollector<Text, IntWritable> output,
>>> Reporter reporter) throws IOException {
>>>
>>>  *System.out.println("In reducer");*
>>>  *LOG.info("In Reducer");*
>>>
>>>  int sum = 0;
>>> String line = key.toString();
>>>  StringTokenizer itr = new StringTokenizer(line);
>>> while (values.hasNext()) {
>>>  sum += values.next().get();
>>> }
>>>  output.collect(key, new IntWritable(sum));
>>> String data = key+" "+sum;
>>>  ar.add(data);
>>> *writeToFile(ar);*
>>>  System.out.println("Wrote to file");
>>>  *if(LOG.isDebugEnabled()){*
>>> * LOG.debug("Log: In Reducer");*
>>> * }*
>>>
>>>  ar.add("\n");
>>>  int index=Integer.parseInt(itr.nextToken());
>>> String value=itr.nextToken();
>>>  String classLabel=itr.nextToken();
>>> int count=sum;
>>>
>>> }
>>>
>>> public static void *writeToFile*(ArrayList<String>  text) throws
>>> IOException {
>>>  System.out.println("In reduce write to file ");
>>>   C45 id=new C45();
>>>  System.out.println("count "+cnt);
>>>
>>>  Path input = new Path("C45/intermediate"+id.current_index+".txt");
>>>  Configuration conf = new Configuration();
>>> FileSystem fs = FileSystem.get(conf);
>>>  BufferedWriter bw = new BufferedWriter(new
>>>
>>> OutputStreamWriter(fs.create(input, true)));
>>>  for(String str: text) {
>>> bw.write(str);
>>>  }
>>> bw.newLine();
>>>  bw.close();
>>>  }
>>>
>>> }
>>> 1. And how to log details: if my reducer data goes into function *writeToFile()
>>> *and creates the file in hdfs.
>>>
>>> --
>>> *Thanks & Regards*
>>>
>>> Unmesha Sreeveni U.B
>>>
>>> *Junior Developer*
>>>
>>>
>>>
>>
>
>
> --
> Jiayu (James) Ji,
>
> Cell: (312)823-7393
>
>


-- 
*Thanks & Regards*

Unmesha Sreeveni U.B

*Junior Developer*

Re: LOGGING in MapReduce

Posted by Jiayu Ji <ji...@gmail.com>.
Get on the jobtracker UI and find the corresponding job id. You are
supposed to see the info there in the mapper or reducer log.


On Fri, Dec 20, 2013 at 2:04 AM, Tao Xiao <xi...@gmail.com> wrote:

> You can't log your mappers' System.out.println()  or reducers'
> System.out.println()  in console, because in console you just submit your
> job, but actually the map tasks and reduce tasks are running in different
> processes or different nodes which,  is not running in the console.
>
> But you can find you log produced by mappers' System.out.println()  or
> reducers' System.out.println()  in the corresponding log files.
>
>
> 2013/12/20 unmesha sreeveni <un...@gmail.com>
>
>> I want to log my System.out.println() in console
>> How to do that.
>> I did the below code but it is not displaying any thing. I am using
>> mapred api the old one.
>> Did i do anything wrong?
>>
>> Code
>> ------------------------------------------------------------
>>
>> package tech;
>>
>> import java.io.BufferedWriter;
>> import java.io.IOException;
>> import java.io.OutputStreamWriter;
>> import java.util.ArrayList;
>> import java.util.Iterator;
>> import java.util.StringTokenizer;
>>
>> import org.apache.hadoop.conf.Configuration;
>> import org.apache.hadoop.fs.FileSystem;
>> import org.apache.hadoop.fs.Path;
>> import org.apache.hadoop.io.IntWritable;
>> import org.apache.hadoop.io.Text;
>> import org.apache.hadoop.mapred.MapReduceBase;
>> import org.apache.hadoop.mapred.OutputCollector;
>> import org.apache.hadoop.mapred.Reducer;
>> import org.apache.hadoop.mapred.Reporter;
>> import com.sun.org.apache.commons.logging.LogFactory;
>>
>> public  class Reduce extends MapReduceBase
>> implements Reducer<Text, IntWritable, Text, IntWritable> {
>>
>> static int cnt =0;
>>  ArrayList<String> ar = new ArrayList<String>();
>> String data = null;
>>
>> *public static final com.sun.org.apache.commons.logging.Log LOG =
>> LogFactory.getLog(Reduce.class);*
>>
>> public void reduce(Text key, Iterator<IntWritable> values,
>>  OutputCollector<Text, IntWritable> output,
>> Reporter reporter) throws IOException {
>>
>>  *System.out.println("In reducer");*
>>  *LOG.info("In Reducer");*
>>
>>  int sum = 0;
>> String line = key.toString();
>>  StringTokenizer itr = new StringTokenizer(line);
>> while (values.hasNext()) {
>>  sum += values.next().get();
>> }
>>  output.collect(key, new IntWritable(sum));
>> String data = key+" "+sum;
>>  ar.add(data);
>> *writeToFile(ar);*
>>  System.out.println("Wrote to file");
>>  *if(LOG.isDebugEnabled()){*
>> * LOG.debug("Log: In Reducer");*
>> * }*
>>
>>  ar.add("\n");
>>  int index=Integer.parseInt(itr.nextToken());
>> String value=itr.nextToken();
>>  String classLabel=itr.nextToken();
>> int count=sum;
>>
>> }
>>
>> public static void *writeToFile*(ArrayList<String>  text) throws
>> IOException {
>>  System.out.println("In reduce write to file ");
>>   C45 id=new C45();
>>  System.out.println("count "+cnt);
>>
>>  Path input = new Path("C45/intermediate"+id.current_index+".txt");
>>  Configuration conf = new Configuration();
>> FileSystem fs = FileSystem.get(conf);
>>  BufferedWriter bw = new BufferedWriter(new
>>
>> OutputStreamWriter(fs.create(input, true)));
>>  for(String str: text) {
>> bw.write(str);
>>  }
>> bw.newLine();
>>  bw.close();
>>  }
>>
>> }
>> 1. And how to log details: if my reducer data goes into function *writeToFile()
>> *and creates the file in hdfs.
>>
>> --
>> *Thanks & Regards*
>>
>> Unmesha Sreeveni U.B
>>
>> *Junior Developer*
>>
>>
>>
>


-- 
Jiayu (James) Ji,

Cell: (312)823-7393

Re: LOGGING in MapReduce

Posted by Jiayu Ji <ji...@gmail.com>.
Get on the jobtracker UI and find the corresponding job id. You are
supposed to see the info there in the mapper or reducer log.


On Fri, Dec 20, 2013 at 2:04 AM, Tao Xiao <xi...@gmail.com> wrote:

> You can't log your mappers' System.out.println()  or reducers'
> System.out.println()  in console, because in console you just submit your
> job, but actually the map tasks and reduce tasks are running in different
> processes or different nodes which,  is not running in the console.
>
> But you can find you log produced by mappers' System.out.println()  or
> reducers' System.out.println()  in the corresponding log files.
>
>
> 2013/12/20 unmesha sreeveni <un...@gmail.com>
>
>> I want to log my System.out.println() in console
>> How to do that.
>> I did the below code but it is not displaying any thing. I am using
>> mapred api the old one.
>> Did i do anything wrong?
>>
>> Code
>> ------------------------------------------------------------
>>
>> package tech;
>>
>> import java.io.BufferedWriter;
>> import java.io.IOException;
>> import java.io.OutputStreamWriter;
>> import java.util.ArrayList;
>> import java.util.Iterator;
>> import java.util.StringTokenizer;
>>
>> import org.apache.hadoop.conf.Configuration;
>> import org.apache.hadoop.fs.FileSystem;
>> import org.apache.hadoop.fs.Path;
>> import org.apache.hadoop.io.IntWritable;
>> import org.apache.hadoop.io.Text;
>> import org.apache.hadoop.mapred.MapReduceBase;
>> import org.apache.hadoop.mapred.OutputCollector;
>> import org.apache.hadoop.mapred.Reducer;
>> import org.apache.hadoop.mapred.Reporter;
>> import com.sun.org.apache.commons.logging.LogFactory;
>>
>> public  class Reduce extends MapReduceBase
>> implements Reducer<Text, IntWritable, Text, IntWritable> {
>>
>> static int cnt =0;
>>  ArrayList<String> ar = new ArrayList<String>();
>> String data = null;
>>
>> *public static final com.sun.org.apache.commons.logging.Log LOG =
>> LogFactory.getLog(Reduce.class);*
>>
>> public void reduce(Text key, Iterator<IntWritable> values,
>>  OutputCollector<Text, IntWritable> output,
>> Reporter reporter) throws IOException {
>>
>>  *System.out.println("In reducer");*
>>  *LOG.info("In Reducer");*
>>
>>  int sum = 0;
>> String line = key.toString();
>>  StringTokenizer itr = new StringTokenizer(line);
>> while (values.hasNext()) {
>>  sum += values.next().get();
>> }
>>  output.collect(key, new IntWritable(sum));
>> String data = key+" "+sum;
>>  ar.add(data);
>> *writeToFile(ar);*
>>  System.out.println("Wrote to file");
>>  *if(LOG.isDebugEnabled()){*
>> * LOG.debug("Log: In Reducer");*
>> * }*
>>
>>  ar.add("\n");
>>  int index=Integer.parseInt(itr.nextToken());
>> String value=itr.nextToken();
>>  String classLabel=itr.nextToken();
>> int count=sum;
>>
>> }
>>
>> public static void *writeToFile*(ArrayList<String>  text) throws
>> IOException {
>>  System.out.println("In reduce write to file ");
>>   C45 id=new C45();
>>  System.out.println("count "+cnt);
>>
>>  Path input = new Path("C45/intermediate"+id.current_index+".txt");
>>  Configuration conf = new Configuration();
>> FileSystem fs = FileSystem.get(conf);
>>  BufferedWriter bw = new BufferedWriter(new
>>
>> OutputStreamWriter(fs.create(input, true)));
>>  for(String str: text) {
>> bw.write(str);
>>  }
>> bw.newLine();
>>  bw.close();
>>  }
>>
>> }
>> 1. And how to log details: if my reducer data goes into function *writeToFile()
>> *and creates the file in hdfs.
>>
>> --
>> *Thanks & Regards*
>>
>> Unmesha Sreeveni U.B
>>
>> *Junior Developer*
>>
>>
>>
>


-- 
Jiayu (James) Ji,

Cell: (312)823-7393

Re: LOGGING in MapReduce

Posted by Jiayu Ji <ji...@gmail.com>.
Get on the jobtracker UI and find the corresponding job id. You are
supposed to see the info there in the mapper or reducer log.


On Fri, Dec 20, 2013 at 2:04 AM, Tao Xiao <xi...@gmail.com> wrote:

> You can't log your mappers' System.out.println()  or reducers'
> System.out.println()  in console, because in console you just submit your
> job, but actually the map tasks and reduce tasks are running in different
> processes or different nodes which,  is not running in the console.
>
> But you can find you log produced by mappers' System.out.println()  or
> reducers' System.out.println()  in the corresponding log files.
>
>
> 2013/12/20 unmesha sreeveni <un...@gmail.com>
>
>> I want to log my System.out.println() in console
>> How to do that.
>> I did the below code but it is not displaying any thing. I am using
>> mapred api the old one.
>> Did i do anything wrong?
>>
>> Code
>> ------------------------------------------------------------
>>
>> package tech;
>>
>> import java.io.BufferedWriter;
>> import java.io.IOException;
>> import java.io.OutputStreamWriter;
>> import java.util.ArrayList;
>> import java.util.Iterator;
>> import java.util.StringTokenizer;
>>
>> import org.apache.hadoop.conf.Configuration;
>> import org.apache.hadoop.fs.FileSystem;
>> import org.apache.hadoop.fs.Path;
>> import org.apache.hadoop.io.IntWritable;
>> import org.apache.hadoop.io.Text;
>> import org.apache.hadoop.mapred.MapReduceBase;
>> import org.apache.hadoop.mapred.OutputCollector;
>> import org.apache.hadoop.mapred.Reducer;
>> import org.apache.hadoop.mapred.Reporter;
>> import com.sun.org.apache.commons.logging.LogFactory;
>>
>> public  class Reduce extends MapReduceBase
>> implements Reducer<Text, IntWritable, Text, IntWritable> {
>>
>> static int cnt =0;
>>  ArrayList<String> ar = new ArrayList<String>();
>> String data = null;
>>
>> *public static final com.sun.org.apache.commons.logging.Log LOG =
>> LogFactory.getLog(Reduce.class);*
>>
>> public void reduce(Text key, Iterator<IntWritable> values,
>>  OutputCollector<Text, IntWritable> output,
>> Reporter reporter) throws IOException {
>>
>>  *System.out.println("In reducer");*
>>  *LOG.info("In Reducer");*
>>
>>  int sum = 0;
>> String line = key.toString();
>>  StringTokenizer itr = new StringTokenizer(line);
>> while (values.hasNext()) {
>>  sum += values.next().get();
>> }
>>  output.collect(key, new IntWritable(sum));
>> String data = key+" "+sum;
>>  ar.add(data);
>> *writeToFile(ar);*
>>  System.out.println("Wrote to file");
>>  *if(LOG.isDebugEnabled()){*
>> * LOG.debug("Log: In Reducer");*
>> * }*
>>
>>  ar.add("\n");
>>  int index=Integer.parseInt(itr.nextToken());
>> String value=itr.nextToken();
>>  String classLabel=itr.nextToken();
>> int count=sum;
>>
>> }
>>
>> public static void *writeToFile*(ArrayList<String>  text) throws
>> IOException {
>>  System.out.println("In reduce write to file ");
>>   C45 id=new C45();
>>  System.out.println("count "+cnt);
>>
>>  Path input = new Path("C45/intermediate"+id.current_index+".txt");
>>  Configuration conf = new Configuration();
>> FileSystem fs = FileSystem.get(conf);
>>  BufferedWriter bw = new BufferedWriter(new
>>
>> OutputStreamWriter(fs.create(input, true)));
>>  for(String str: text) {
>> bw.write(str);
>>  }
>> bw.newLine();
>>  bw.close();
>>  }
>>
>> }
>> 1. And how to log details: if my reducer data goes into function *writeToFile()
>> *and creates the file in hdfs.
>>
>> --
>> *Thanks & Regards*
>>
>> Unmesha Sreeveni U.B
>>
>> *Junior Developer*
>>
>>
>>
>


-- 
Jiayu (James) Ji,

Cell: (312)823-7393

Re: LOGGING in MapReduce

Posted by Jiayu Ji <ji...@gmail.com>.
Get on the jobtracker UI and find the corresponding job id. You are
supposed to see the info there in the mapper or reducer log.


On Fri, Dec 20, 2013 at 2:04 AM, Tao Xiao <xi...@gmail.com> wrote:

> You can't log your mappers' System.out.println()  or reducers'
> System.out.println()  in console, because in console you just submit your
> job, but actually the map tasks and reduce tasks are running in different
> processes or different nodes which,  is not running in the console.
>
> But you can find you log produced by mappers' System.out.println()  or
> reducers' System.out.println()  in the corresponding log files.
>
>
> 2013/12/20 unmesha sreeveni <un...@gmail.com>
>
>> I want to log my System.out.println() in console
>> How to do that.
>> I did the below code but it is not displaying any thing. I am using
>> mapred api the old one.
>> Did i do anything wrong?
>>
>> Code
>> ------------------------------------------------------------
>>
>> package tech;
>>
>> import java.io.BufferedWriter;
>> import java.io.IOException;
>> import java.io.OutputStreamWriter;
>> import java.util.ArrayList;
>> import java.util.Iterator;
>> import java.util.StringTokenizer;
>>
>> import org.apache.hadoop.conf.Configuration;
>> import org.apache.hadoop.fs.FileSystem;
>> import org.apache.hadoop.fs.Path;
>> import org.apache.hadoop.io.IntWritable;
>> import org.apache.hadoop.io.Text;
>> import org.apache.hadoop.mapred.MapReduceBase;
>> import org.apache.hadoop.mapred.OutputCollector;
>> import org.apache.hadoop.mapred.Reducer;
>> import org.apache.hadoop.mapred.Reporter;
>> import com.sun.org.apache.commons.logging.LogFactory;
>>
>> public  class Reduce extends MapReduceBase
>> implements Reducer<Text, IntWritable, Text, IntWritable> {
>>
>> static int cnt =0;
>>  ArrayList<String> ar = new ArrayList<String>();
>> String data = null;
>>
>> *public static final com.sun.org.apache.commons.logging.Log LOG =
>> LogFactory.getLog(Reduce.class);*
>>
>> public void reduce(Text key, Iterator<IntWritable> values,
>>  OutputCollector<Text, IntWritable> output,
>> Reporter reporter) throws IOException {
>>
>>  *System.out.println("In reducer");*
>>  *LOG.info("In Reducer");*
>>
>>  int sum = 0;
>> String line = key.toString();
>>  StringTokenizer itr = new StringTokenizer(line);
>> while (values.hasNext()) {
>>  sum += values.next().get();
>> }
>>  output.collect(key, new IntWritable(sum));
>> String data = key+" "+sum;
>>  ar.add(data);
>> *writeToFile(ar);*
>>  System.out.println("Wrote to file");
>>  *if(LOG.isDebugEnabled()){*
>> * LOG.debug("Log: In Reducer");*
>> * }*
>>
>>  ar.add("\n");
>>  int index=Integer.parseInt(itr.nextToken());
>> String value=itr.nextToken();
>>  String classLabel=itr.nextToken();
>> int count=sum;
>>
>> }
>>
>> public static void *writeToFile*(ArrayList<String>  text) throws
>> IOException {
>>  System.out.println("In reduce write to file ");
>>   C45 id=new C45();
>>  System.out.println("count "+cnt);
>>
>>  Path input = new Path("C45/intermediate"+id.current_index+".txt");
>>  Configuration conf = new Configuration();
>> FileSystem fs = FileSystem.get(conf);
>>  BufferedWriter bw = new BufferedWriter(new
>>
>> OutputStreamWriter(fs.create(input, true)));
>>  for(String str: text) {
>> bw.write(str);
>>  }
>> bw.newLine();
>>  bw.close();
>>  }
>>
>> }
>> 1. And how to log details: if my reducer data goes into function *writeToFile()
>> *and creates the file in hdfs.
>>
>> --
>> *Thanks & Regards*
>>
>> Unmesha Sreeveni U.B
>>
>> *Junior Developer*
>>
>>
>>
>


-- 
Jiayu (James) Ji,

Cell: (312)823-7393

Re: LOGGING in MapReduce

Posted by Tao Xiao <xi...@gmail.com>.
You can't log your mappers' System.out.println()  or reducers'
System.out.println()  in console, because in console you just submit your
job, but actually the map tasks and reduce tasks are running in different
processes or different nodes which,  is not running in the console.

But you can find you log produced by mappers' System.out.println()  or
reducers' System.out.println()  in the corresponding log files.


2013/12/20 unmesha sreeveni <un...@gmail.com>

> I want to log my System.out.println() in console
> How to do that.
> I did the below code but it is not displaying any thing. I am using mapred
> api the old one.
> Did i do anything wrong?
>
> Code
> ------------------------------------------------------------
>
> package tech;
>
> import java.io.BufferedWriter;
> import java.io.IOException;
> import java.io.OutputStreamWriter;
> import java.util.ArrayList;
> import java.util.Iterator;
> import java.util.StringTokenizer;
>
> import org.apache.hadoop.conf.Configuration;
> import org.apache.hadoop.fs.FileSystem;
> import org.apache.hadoop.fs.Path;
> import org.apache.hadoop.io.IntWritable;
> import org.apache.hadoop.io.Text;
> import org.apache.hadoop.mapred.MapReduceBase;
> import org.apache.hadoop.mapred.OutputCollector;
> import org.apache.hadoop.mapred.Reducer;
> import org.apache.hadoop.mapred.Reporter;
> import com.sun.org.apache.commons.logging.LogFactory;
>
> public  class Reduce extends MapReduceBase
> implements Reducer<Text, IntWritable, Text, IntWritable> {
>
> static int cnt =0;
>  ArrayList<String> ar = new ArrayList<String>();
> String data = null;
>
> *public static final com.sun.org.apache.commons.logging.Log LOG =
> LogFactory.getLog(Reduce.class);*
>
> public void reduce(Text key, Iterator<IntWritable> values,
>  OutputCollector<Text, IntWritable> output,
> Reporter reporter) throws IOException {
>
>  *System.out.println("In reducer");*
>  *LOG.info("In Reducer");*
>
>  int sum = 0;
> String line = key.toString();
>  StringTokenizer itr = new StringTokenizer(line);
> while (values.hasNext()) {
>  sum += values.next().get();
> }
>  output.collect(key, new IntWritable(sum));
> String data = key+" "+sum;
>  ar.add(data);
> *writeToFile(ar);*
>  System.out.println("Wrote to file");
>  *if(LOG.isDebugEnabled()){*
> * LOG.debug("Log: In Reducer");*
> * }*
>
>  ar.add("\n");
>  int index=Integer.parseInt(itr.nextToken());
> String value=itr.nextToken();
>  String classLabel=itr.nextToken();
> int count=sum;
>
> }
>
> public static void *writeToFile*(ArrayList<String>  text) throws
> IOException {
>  System.out.println("In reduce write to file ");
>   C45 id=new C45();
>  System.out.println("count "+cnt);
>
>  Path input = new Path("C45/intermediate"+id.current_index+".txt");
>  Configuration conf = new Configuration();
> FileSystem fs = FileSystem.get(conf);
>  BufferedWriter bw = new BufferedWriter(new
>
> OutputStreamWriter(fs.create(input, true)));
>  for(String str: text) {
> bw.write(str);
>  }
> bw.newLine();
>  bw.close();
>  }
>
> }
> 1. And how to log details: if my reducer data goes into function *writeToFile()
> *and creates the file in hdfs.
>
> --
> *Thanks & Regards*
>
> Unmesha Sreeveni U.B
>
> *Junior Developer*
>
>
>

Re: LOGGING in MapReduce

Posted by Tao Xiao <xi...@gmail.com>.
You can't log your mappers' System.out.println()  or reducers'
System.out.println()  in console, because in console you just submit your
job, but actually the map tasks and reduce tasks are running in different
processes or different nodes which,  is not running in the console.

But you can find you log produced by mappers' System.out.println()  or
reducers' System.out.println()  in the corresponding log files.


2013/12/20 unmesha sreeveni <un...@gmail.com>

> I want to log my System.out.println() in console
> How to do that.
> I did the below code but it is not displaying any thing. I am using mapred
> api the old one.
> Did i do anything wrong?
>
> Code
> ------------------------------------------------------------
>
> package tech;
>
> import java.io.BufferedWriter;
> import java.io.IOException;
> import java.io.OutputStreamWriter;
> import java.util.ArrayList;
> import java.util.Iterator;
> import java.util.StringTokenizer;
>
> import org.apache.hadoop.conf.Configuration;
> import org.apache.hadoop.fs.FileSystem;
> import org.apache.hadoop.fs.Path;
> import org.apache.hadoop.io.IntWritable;
> import org.apache.hadoop.io.Text;
> import org.apache.hadoop.mapred.MapReduceBase;
> import org.apache.hadoop.mapred.OutputCollector;
> import org.apache.hadoop.mapred.Reducer;
> import org.apache.hadoop.mapred.Reporter;
> import com.sun.org.apache.commons.logging.LogFactory;
>
> public  class Reduce extends MapReduceBase
> implements Reducer<Text, IntWritable, Text, IntWritable> {
>
> static int cnt =0;
>  ArrayList<String> ar = new ArrayList<String>();
> String data = null;
>
> *public static final com.sun.org.apache.commons.logging.Log LOG =
> LogFactory.getLog(Reduce.class);*
>
> public void reduce(Text key, Iterator<IntWritable> values,
>  OutputCollector<Text, IntWritable> output,
> Reporter reporter) throws IOException {
>
>  *System.out.println("In reducer");*
>  *LOG.info("In Reducer");*
>
>  int sum = 0;
> String line = key.toString();
>  StringTokenizer itr = new StringTokenizer(line);
> while (values.hasNext()) {
>  sum += values.next().get();
> }
>  output.collect(key, new IntWritable(sum));
> String data = key+" "+sum;
>  ar.add(data);
> *writeToFile(ar);*
>  System.out.println("Wrote to file");
>  *if(LOG.isDebugEnabled()){*
> * LOG.debug("Log: In Reducer");*
> * }*
>
>  ar.add("\n");
>  int index=Integer.parseInt(itr.nextToken());
> String value=itr.nextToken();
>  String classLabel=itr.nextToken();
> int count=sum;
>
> }
>
> public static void *writeToFile*(ArrayList<String>  text) throws
> IOException {
>  System.out.println("In reduce write to file ");
>   C45 id=new C45();
>  System.out.println("count "+cnt);
>
>  Path input = new Path("C45/intermediate"+id.current_index+".txt");
>  Configuration conf = new Configuration();
> FileSystem fs = FileSystem.get(conf);
>  BufferedWriter bw = new BufferedWriter(new
>
> OutputStreamWriter(fs.create(input, true)));
>  for(String str: text) {
> bw.write(str);
>  }
> bw.newLine();
>  bw.close();
>  }
>
> }
> 1. And how to log details: if my reducer data goes into function *writeToFile()
> *and creates the file in hdfs.
>
> --
> *Thanks & Regards*
>
> Unmesha Sreeveni U.B
>
> *Junior Developer*
>
>
>

Re: LOGGING in MapReduce

Posted by Tao Xiao <xi...@gmail.com>.
You can't log your mappers' System.out.println()  or reducers'
System.out.println()  in console, because in console you just submit your
job, but actually the map tasks and reduce tasks are running in different
processes or different nodes which,  is not running in the console.

But you can find you log produced by mappers' System.out.println()  or
reducers' System.out.println()  in the corresponding log files.


2013/12/20 unmesha sreeveni <un...@gmail.com>

> I want to log my System.out.println() in console
> How to do that.
> I did the below code but it is not displaying any thing. I am using mapred
> api the old one.
> Did i do anything wrong?
>
> Code
> ------------------------------------------------------------
>
> package tech;
>
> import java.io.BufferedWriter;
> import java.io.IOException;
> import java.io.OutputStreamWriter;
> import java.util.ArrayList;
> import java.util.Iterator;
> import java.util.StringTokenizer;
>
> import org.apache.hadoop.conf.Configuration;
> import org.apache.hadoop.fs.FileSystem;
> import org.apache.hadoop.fs.Path;
> import org.apache.hadoop.io.IntWritable;
> import org.apache.hadoop.io.Text;
> import org.apache.hadoop.mapred.MapReduceBase;
> import org.apache.hadoop.mapred.OutputCollector;
> import org.apache.hadoop.mapred.Reducer;
> import org.apache.hadoop.mapred.Reporter;
> import com.sun.org.apache.commons.logging.LogFactory;
>
> public  class Reduce extends MapReduceBase
> implements Reducer<Text, IntWritable, Text, IntWritable> {
>
> static int cnt =0;
>  ArrayList<String> ar = new ArrayList<String>();
> String data = null;
>
> *public static final com.sun.org.apache.commons.logging.Log LOG =
> LogFactory.getLog(Reduce.class);*
>
> public void reduce(Text key, Iterator<IntWritable> values,
>  OutputCollector<Text, IntWritable> output,
> Reporter reporter) throws IOException {
>
>  *System.out.println("In reducer");*
>  *LOG.info("In Reducer");*
>
>  int sum = 0;
> String line = key.toString();
>  StringTokenizer itr = new StringTokenizer(line);
> while (values.hasNext()) {
>  sum += values.next().get();
> }
>  output.collect(key, new IntWritable(sum));
> String data = key+" "+sum;
>  ar.add(data);
> *writeToFile(ar);*
>  System.out.println("Wrote to file");
>  *if(LOG.isDebugEnabled()){*
> * LOG.debug("Log: In Reducer");*
> * }*
>
>  ar.add("\n");
>  int index=Integer.parseInt(itr.nextToken());
> String value=itr.nextToken();
>  String classLabel=itr.nextToken();
> int count=sum;
>
> }
>
> public static void *writeToFile*(ArrayList<String>  text) throws
> IOException {
>  System.out.println("In reduce write to file ");
>   C45 id=new C45();
>  System.out.println("count "+cnt);
>
>  Path input = new Path("C45/intermediate"+id.current_index+".txt");
>  Configuration conf = new Configuration();
> FileSystem fs = FileSystem.get(conf);
>  BufferedWriter bw = new BufferedWriter(new
>
> OutputStreamWriter(fs.create(input, true)));
>  for(String str: text) {
> bw.write(str);
>  }
> bw.newLine();
>  bw.close();
>  }
>
> }
> 1. And how to log details: if my reducer data goes into function *writeToFile()
> *and creates the file in hdfs.
>
> --
> *Thanks & Regards*
>
> Unmesha Sreeveni U.B
>
> *Junior Developer*
>
>
>

Re: LOGGING in MapReduce

Posted by Tao Xiao <xi...@gmail.com>.
You can't log your mappers' System.out.println()  or reducers'
System.out.println()  in console, because in console you just submit your
job, but actually the map tasks and reduce tasks are running in different
processes or different nodes which,  is not running in the console.

But you can find you log produced by mappers' System.out.println()  or
reducers' System.out.println()  in the corresponding log files.


2013/12/20 unmesha sreeveni <un...@gmail.com>

> I want to log my System.out.println() in console
> How to do that.
> I did the below code but it is not displaying any thing. I am using mapred
> api the old one.
> Did i do anything wrong?
>
> Code
> ------------------------------------------------------------
>
> package tech;
>
> import java.io.BufferedWriter;
> import java.io.IOException;
> import java.io.OutputStreamWriter;
> import java.util.ArrayList;
> import java.util.Iterator;
> import java.util.StringTokenizer;
>
> import org.apache.hadoop.conf.Configuration;
> import org.apache.hadoop.fs.FileSystem;
> import org.apache.hadoop.fs.Path;
> import org.apache.hadoop.io.IntWritable;
> import org.apache.hadoop.io.Text;
> import org.apache.hadoop.mapred.MapReduceBase;
> import org.apache.hadoop.mapred.OutputCollector;
> import org.apache.hadoop.mapred.Reducer;
> import org.apache.hadoop.mapred.Reporter;
> import com.sun.org.apache.commons.logging.LogFactory;
>
> public  class Reduce extends MapReduceBase
> implements Reducer<Text, IntWritable, Text, IntWritable> {
>
> static int cnt =0;
>  ArrayList<String> ar = new ArrayList<String>();
> String data = null;
>
> *public static final com.sun.org.apache.commons.logging.Log LOG =
> LogFactory.getLog(Reduce.class);*
>
> public void reduce(Text key, Iterator<IntWritable> values,
>  OutputCollector<Text, IntWritable> output,
> Reporter reporter) throws IOException {
>
>  *System.out.println("In reducer");*
>  *LOG.info("In Reducer");*
>
>  int sum = 0;
> String line = key.toString();
>  StringTokenizer itr = new StringTokenizer(line);
> while (values.hasNext()) {
>  sum += values.next().get();
> }
>  output.collect(key, new IntWritable(sum));
> String data = key+" "+sum;
>  ar.add(data);
> *writeToFile(ar);*
>  System.out.println("Wrote to file");
>  *if(LOG.isDebugEnabled()){*
> * LOG.debug("Log: In Reducer");*
> * }*
>
>  ar.add("\n");
>  int index=Integer.parseInt(itr.nextToken());
> String value=itr.nextToken();
>  String classLabel=itr.nextToken();
> int count=sum;
>
> }
>
> public static void *writeToFile*(ArrayList<String>  text) throws
> IOException {
>  System.out.println("In reduce write to file ");
>   C45 id=new C45();
>  System.out.println("count "+cnt);
>
>  Path input = new Path("C45/intermediate"+id.current_index+".txt");
>  Configuration conf = new Configuration();
> FileSystem fs = FileSystem.get(conf);
>  BufferedWriter bw = new BufferedWriter(new
>
> OutputStreamWriter(fs.create(input, true)));
>  for(String str: text) {
> bw.write(str);
>  }
> bw.newLine();
>  bw.close();
>  }
>
> }
> 1. And how to log details: if my reducer data goes into function *writeToFile()
> *and creates the file in hdfs.
>
> --
> *Thanks & Regards*
>
> Unmesha Sreeveni U.B
>
> *Junior Developer*
>
>
>