You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-user@hadoop.apache.org by psterk <Pa...@Sun.COM> on 2009/03/17 00:51:32 UTC

Problem with com.sun.pinkdots.LogHandler

Hi,

I have been running a hadoop cluster successfully for a few months.  During
today's run, I am seeing a new error and it is not clear to me how to
resolve it. Below are the stack traces and the configure file I am using.
Please share any tips you may have.

Thanks,
Paul

09/03/16 16:28:25 INFO mapred.JobClient: Task Id :
task_200903161455_0003_m_000127_0, Status : FAILED
java.lang.ArrayIndexOutOfBoundsException: 3
        at com.sun.pinkdots.LogHandler$Mapper.map(LogHandler.java:71)
        at com.sun.pinkdots.LogHandler$Mapper.map(LogHandler.java:22)
        at org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:47)
        at org.apache.hadoop.mapred.MapTask.run(MapTask.java:219)
        at
org.apache.hadoop.mapred.TaskTracker$Child.main(TaskTracker.java:2124)

task_200903161455_0003_m_000127_0: Starting
null.task_200903161455_0003_m_000127_0
task_200903161455_0003_m_000127_0: Closing
task_200903161455_0003_m_000127_0: log4j:WARN No appenders could be found
for logger (org.apache.hadoop.mapred.TaskRu
task_200903161455_0003_m_000127_0: log4j:WARN Please initialize the log4j
system properly.
09/03/16 16:28:27 INFO mapred.JobClient: Task Id :
task_200903161455_0003_m_000128_0, Status : FAILED
java.lang.ArrayIndexOutOfBoundsException: 3
        at com.sun.pinkdots.LogHandler$Mapper.map(LogHandler.java:71)
        at com.sun.pinkdots.LogHandler$Mapper.map(LogHandler.java:22)
        at org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:47)
        at org.apache.hadoop.mapred.MapTask.run(MapTask.java:219)
        at
org.apache.hadoop.mapred.TaskTracker$Child.main(TaskTracker.java:2124)

task_200903161455_0003_m_000128_0: Starting
null.task_200903161455_0003_m_000128_0
task_200903161455_0003_m_000128_0: Closing
09/03/16 16:28:32 INFO mapred.JobClient: Task Id :
task_200903161455_0003_m_000128_1, Status : FAILED
java.lang.ArrayIndexOutOfBoundsException: 3
        at com.sun.pinkdots.LogHandler$Mapper.map(LogHandler.java:71)
        at com.sun.pinkdots.LogHandler$Mapper.map(LogHandler.java:22)
        at org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:47)
        at org.apache.hadoop.mapred.MapTask.run(MapTask.java:219)
        at
org.apache.hadoop.mapred.TaskTracker$Child.main(TaskTracker.java:2124)

task_200903161455_0003_m_000128_1: Starting
null.task_200903161455_0003_m_000128_1
task_200903161455_0003_m_000128_1: Closing
09/03/16 16:28:37 INFO mapred.JobClient: Task Id :
task_200903161455_0003_m_000127_1, Status : FAILED
java.lang.ArrayIndexOutOfBoundsException: 3
        at com.sun.pinkdots.LogHandler$Mapper.map(LogHandler.java:71)
        at com.sun.pinkdots.LogHandler$Mapper.map(LogHandler.java:22)
        at org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:47)
        at org.apache.hadoop.mapred.MapTask.run(MapTask.java:219)
        at
org.apache.hadoop.mapred.TaskTracker$Child.main(TaskTracker.java:2124)

:qsk_200903161455_0003_m_000127_1: Starting
null.task_200903161455_0003_m_000127_1
clear200903161455_0003_m_000127_1: Closing
task_200903161455_0003_m_000127_1: log4j:WARN No appenders could be found
for logger (org.apache.hadoop.ipc.Client).
task_200903161455_0003_m_000127_1: log4j:WARN Please initialize the log4j
system properly.
09/03/16 16:28:40 INFO mapred.JobClient: Task Id :
task_200903161455_0003_m_000128_2, Status : FAILED
java.lang.ArrayIndexOutOfBoundsException: 3
        at com.sun.pinkdots.LogHandler$Mapper.map(LogHandler.java:71)
        at com.sun.pinkdots.LogHandler$Mapper.map(LogHandler.java:22)
        at org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:47)
        at org.apache.hadoop.mapred.MapTask.run(MapTask.java:219)
        at
org.apache.hadoop.mapred.TaskTracker$Child.main(TaskTracker.java:2124)

task_200903161455_0003_m_000128_2: Starting
null.task_200903161455_0003_m_000128_2
task_200903161455_0003_m_000128_2: Closing
09/03/16 16:28:46 INFO mapred.JobClient:  map 100% reduce 100%
java.io.IOException: Job failed!
        at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:1062)
        at com.sun.pinkdots.Main.handleLogs(Main.java:63)
        at com.sun.pinkdots.Main.main(Main.java:35)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
        at
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
        at java.lang.reflect.Method.invoke(Method.java:597)
        at org.apache.hadoop.util.RunJar.main(RunJar.java:155)
        at org.apache.hadoop.mapred.JobShell.run(JobShell.java:194)
        at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:65)
        at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:79)
        at org.apache.hadoop.mapred.JobShell.main(JobShell.java:220)


#
# First, setup the necessary filesystem locations
#
${HADOOP}/bin/hadoop dfs -rmr hdfs:///user/${USER}/pinkdots
${HADOOP}/bin/hadoop dfs -copyFromLocal \
  file://${HOME}/pinkdots/config/glassfish_admin.xml \
  hdfs:///user/${USER}/pinkdots/config/glassfish_admin.xml
${HADOOP}/bin/hadoop dfs -copyFromLocal \
  file://${HOME}/pinkdots/data/combined/hits_all.txt \
  hdfs:///user/${USER}/pinkdots/data/combined/hits_all.txt
${HADOOP}/bin/hadoop dfs -mkdir
hdfs:///user/${USER}/pinkdots/work/tile_handler

#
# Then run the actual job
#
${HADOOP}/bin/hadoop --config ${HADOOP}/conf \
  jar ${HOME}/pinkdots/dist/pinkdots.jar \
  com.sun.pinkdots.Main \
  hdfs:///user/${USER}/pinkdots/config/glassfish_admin.xml \
  hdfs:///user/${USER}/pinkdots/data/combined \
  hdfs:///user/${USER}/pinkdots/work/log_handler \
  hdfs:///user/${USER}/pinkdots/work/tile_handler \
  file://${HOME}/pinkdots/tiles.zip


-- 
View this message in context: http://www.nabble.com/Problem-with-com.sun.pinkdots.LogHandler-tp22550022p22550022.html
Sent from the Hadoop core-user mailing list archive at Nabble.com.


Re: Problem with com.sun.pinkdots.LogHandler

Posted by psterk <Pa...@Sun.COM>.
Thanks for the hint! The problem appears to be a corrupted input file. No
hadoop issues.

Paul


Hi Paul,

Looking at the stack trace, the exception is being thrown from your
map method. Can you put some debugging in there to diagnose it?
Detecting and logging the size of the array and the index you are
trying to access should help. You can write to standard error and look
in the task logs. Another way is to use Reporter's setStatus() method
as a quick way to see messages in the web UI.

Cheers,
Tom

On Mon, Mar 16, 2009 at 11:51 PM, psterk <Pa...@sun.com> wrote:
>
> Hi,
>
> I have been running a hadoop cluster successfully for a few months.
>  During
> today's run, I am seeing a new error and it is not clear to me how to
> resolve it. Below are the stack traces and the configure file I am using.
> Please share any tips you may have.
>
> Thanks,
> Paul
>
> 09/03/16 16:28:25 INFO mapred.JobClient: Task Id :
> task_200903161455_0003_m_000127_0, Status : FAILED
> java.lang.ArrayIndexOutOfBoundsException: 3
>        at com.sun.pinkdots.LogHandler$Mapper.map(LogHandler.java:71)
>        at com.sun.pinkdots.LogHandler$Mapper.map(LogHandler.java:22)
>        at org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:47)
>        at org.apache.hadoop.mapred.MapTask.run(MapTask.java:219)
>        at
> org.apache.hadoop.mapred.TaskTracker$Child.main(TaskTracker.java:2124)
>

-- 
View this message in context: http://www.nabble.com/Problem-with-com.sun.pinkdots.LogHandler-tp22550022p22568374.html
Sent from the Hadoop core-user mailing list archive at Nabble.com.


Re: Problem with com.sun.pinkdots.LogHandler

Posted by Tom White <to...@cloudera.com>.
Hi Paul,

Looking at the stack trace, the exception is being thrown from your
map method. Can you put some debugging in there to diagnose it?
Detecting and logging the size of the array and the index you are
trying to access should help. You can write to standard error and look
in the task logs. Another way is to use Reporter's setStatus() method
as a quick way to see messages in the web UI.

Cheers,
Tom

On Mon, Mar 16, 2009 at 11:51 PM, psterk <Pa...@sun.com> wrote:
>
> Hi,
>
> I have been running a hadoop cluster successfully for a few months.  During
> today's run, I am seeing a new error and it is not clear to me how to
> resolve it. Below are the stack traces and the configure file I am using.
> Please share any tips you may have.
>
> Thanks,
> Paul
>
> 09/03/16 16:28:25 INFO mapred.JobClient: Task Id :
> task_200903161455_0003_m_000127_0, Status : FAILED
> java.lang.ArrayIndexOutOfBoundsException: 3
>        at com.sun.pinkdots.LogHandler$Mapper.map(LogHandler.java:71)
>        at com.sun.pinkdots.LogHandler$Mapper.map(LogHandler.java:22)
>        at org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:47)
>        at org.apache.hadoop.mapred.MapTask.run(MapTask.java:219)
>        at
> org.apache.hadoop.mapred.TaskTracker$Child.main(TaskTracker.java:2124)
>
> task_200903161455_0003_m_000127_0: Starting
> null.task_200903161455_0003_m_000127_0
> task_200903161455_0003_m_000127_0: Closing
> task_200903161455_0003_m_000127_0: log4j:WARN No appenders could be found
> for logger (org.apache.hadoop.mapred.TaskRu
> task_200903161455_0003_m_000127_0: log4j:WARN Please initialize the log4j
> system properly.
> 09/03/16 16:28:27 INFO mapred.JobClient: Task Id :
> task_200903161455_0003_m_000128_0, Status : FAILED
> java.lang.ArrayIndexOutOfBoundsException: 3
>        at com.sun.pinkdots.LogHandler$Mapper.map(LogHandler.java:71)
>        at com.sun.pinkdots.LogHandler$Mapper.map(LogHandler.java:22)
>        at org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:47)
>        at org.apache.hadoop.mapred.MapTask.run(MapTask.java:219)
>        at
> org.apache.hadoop.mapred.TaskTracker$Child.main(TaskTracker.java:2124)
>
> task_200903161455_0003_m_000128_0: Starting
> null.task_200903161455_0003_m_000128_0
> task_200903161455_0003_m_000128_0: Closing
> 09/03/16 16:28:32 INFO mapred.JobClient: Task Id :
> task_200903161455_0003_m_000128_1, Status : FAILED
> java.lang.ArrayIndexOutOfBoundsException: 3
>        at com.sun.pinkdots.LogHandler$Mapper.map(LogHandler.java:71)
>        at com.sun.pinkdots.LogHandler$Mapper.map(LogHandler.java:22)
>        at org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:47)
>        at org.apache.hadoop.mapred.MapTask.run(MapTask.java:219)
>        at
> org.apache.hadoop.mapred.TaskTracker$Child.main(TaskTracker.java:2124)
>
> task_200903161455_0003_m_000128_1: Starting
> null.task_200903161455_0003_m_000128_1
> task_200903161455_0003_m_000128_1: Closing
> 09/03/16 16:28:37 INFO mapred.JobClient: Task Id :
> task_200903161455_0003_m_000127_1, Status : FAILED
> java.lang.ArrayIndexOutOfBoundsException: 3
>        at com.sun.pinkdots.LogHandler$Mapper.map(LogHandler.java:71)
>        at com.sun.pinkdots.LogHandler$Mapper.map(LogHandler.java:22)
>        at org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:47)
>        at org.apache.hadoop.mapred.MapTask.run(MapTask.java:219)
>        at
> org.apache.hadoop.mapred.TaskTracker$Child.main(TaskTracker.java:2124)
>
> :qsk_200903161455_0003_m_000127_1: Starting
> null.task_200903161455_0003_m_000127_1
> clear200903161455_0003_m_000127_1: Closing
> task_200903161455_0003_m_000127_1: log4j:WARN No appenders could be found
> for logger (org.apache.hadoop.ipc.Client).
> task_200903161455_0003_m_000127_1: log4j:WARN Please initialize the log4j
> system properly.
> 09/03/16 16:28:40 INFO mapred.JobClient: Task Id :
> task_200903161455_0003_m_000128_2, Status : FAILED
> java.lang.ArrayIndexOutOfBoundsException: 3
>        at com.sun.pinkdots.LogHandler$Mapper.map(LogHandler.java:71)
>        at com.sun.pinkdots.LogHandler$Mapper.map(LogHandler.java:22)
>        at org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:47)
>        at org.apache.hadoop.mapred.MapTask.run(MapTask.java:219)
>        at
> org.apache.hadoop.mapred.TaskTracker$Child.main(TaskTracker.java:2124)
>
> task_200903161455_0003_m_000128_2: Starting
> null.task_200903161455_0003_m_000128_2
> task_200903161455_0003_m_000128_2: Closing
> 09/03/16 16:28:46 INFO mapred.JobClient:  map 100% reduce 100%
> java.io.IOException: Job failed!
>        at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:1062)
>        at com.sun.pinkdots.Main.handleLogs(Main.java:63)
>        at com.sun.pinkdots.Main.main(Main.java:35)
>        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
>        at
> sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
>        at
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
>        at java.lang.reflect.Method.invoke(Method.java:597)
>        at org.apache.hadoop.util.RunJar.main(RunJar.java:155)
>        at org.apache.hadoop.mapred.JobShell.run(JobShell.java:194)
>        at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:65)
>        at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:79)
>        at org.apache.hadoop.mapred.JobShell.main(JobShell.java:220)
>
>
> #
> # First, setup the necessary filesystem locations
> #
> ${HADOOP}/bin/hadoop dfs -rmr hdfs:///user/${USER}/pinkdots
> ${HADOOP}/bin/hadoop dfs -copyFromLocal \
>  file://${HOME}/pinkdots/config/glassfish_admin.xml \
>  hdfs:///user/${USER}/pinkdots/config/glassfish_admin.xml
> ${HADOOP}/bin/hadoop dfs -copyFromLocal \
>  file://${HOME}/pinkdots/data/combined/hits_all.txt \
>  hdfs:///user/${USER}/pinkdots/data/combined/hits_all.txt
> ${HADOOP}/bin/hadoop dfs -mkdir
> hdfs:///user/${USER}/pinkdots/work/tile_handler
>
> #
> # Then run the actual job
> #
> ${HADOOP}/bin/hadoop --config ${HADOOP}/conf \
>  jar ${HOME}/pinkdots/dist/pinkdots.jar \
>  com.sun.pinkdots.Main \
>  hdfs:///user/${USER}/pinkdots/config/glassfish_admin.xml \
>  hdfs:///user/${USER}/pinkdots/data/combined \
>  hdfs:///user/${USER}/pinkdots/work/log_handler \
>  hdfs:///user/${USER}/pinkdots/work/tile_handler \
>  file://${HOME}/pinkdots/tiles.zip
>
>
> --
> View this message in context: http://www.nabble.com/Problem-with-com.sun.pinkdots.LogHandler-tp22550022p22550022.html
> Sent from the Hadoop core-user mailing list archive at Nabble.com.
>
>