You are viewing a plain text version of this content. The canonical link for it is here.
Posted to mapreduce-dev@hadoop.apache.org by "Jason Lowe (JIRA)" <ji...@apache.org> on 2013/12/04 15:40:37 UTC

[jira] [Resolved] (MAPREDUCE-5668) Exception in thread "main" java.lang.IncompatibleClassChangeError: Found class org.apache.hadoop.mapreduce.JobContext, but interface was expected

     [ https://issues.apache.org/jira/browse/MAPREDUCE-5668?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ]

Jason Lowe resolved MAPREDUCE-5668.
-----------------------------------

    Resolution: Invalid

Closing this as invalid based on the evidence from MAPREDUCE-5667.  The code is being compiled against a later release but then run on an earlier release.  The code should be compiled against the Hadoop release being used or an earlier release, keeping in mind the binary compatibility document guidelines.

> Exception in thread "main" java.lang.IncompatibleClassChangeError: Found class org.apache.hadoop.mapreduce.JobContext, but interface was expected
> -------------------------------------------------------------------------------------------------------------------------------------------------
>
>                 Key: MAPREDUCE-5668
>                 URL: https://issues.apache.org/jira/browse/MAPREDUCE-5668
>             Project: Hadoop Map/Reduce
>          Issue Type: Bug
>            Reporter: ranjini
>
> hi
>  pl help
> i have wrote this code , at runtime i got this issue.
> Exception in thread "main" java.lang.IncompatibleClassChangeError: Found class org.apache.hadoop.mapreduce.JobContext, but interface was expected
> 	at org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat.getSplits(CombineFileInputFormat.java:170)
> 	at org.apache.hadoop.mapred.JobClient.writeNewSplits(JobClient.java:885)
> 	at org.apache.hadoop.mapred.JobClient.submitJobInternal(JobClient.java:779)
> 	at org.apache.hadoop.mapreduce.Job.submit(Job.java:432)
> 	at org.apache.hadoop.mapreduce.Job.waitForCompletion(Job.java:447)
> 	at MultiFileWordCount.run(MultiFileWordCount.java:395)
> 	at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:65)
> 	at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:79)
> 	at MultiFileWordCount.main(MultiFileWordCount.java:401)
> 	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
> 	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
> 	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
> 	at java.lang.reflect.Method.invoke(Method.java:597)
> 	at org.apache.hadoop.util.RunJar.main(RunJar.java:156)
> hduser@localhost:~$ 
> I have attached the code.
> import java.io.DataInput;  
> import java.io.DataOutput;  
> import java.io.IOException;  
> import java.util.StringTokenizer;  
> import org.apache.hadoop.conf.Configured;  
> import org.apache.hadoop.fs.FSDataInputStream;  
> import org.apache.hadoop.fs.FileSystem;  
> import org.apache.hadoop.fs.Path;  
> import org.apache.hadoop.io.IntWritable;  
> import org.apache.hadoop.io.Text;  
> import org.apache.hadoop.io.WritableComparable;  
> import org.apache.hadoop.mapreduce.InputSplit;  
> import org.apache.hadoop.mapreduce.Job;  
> import org.apache.hadoop.mapreduce.Mapper;  
> import org.apache.hadoop.mapreduce.RecordReader;  
> import org.apache.hadoop.mapreduce.TaskAttemptContext;  
> import org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat;  
> import org.apache.hadoop.mapreduce.lib.input.CombineFileRecordReader;  
> import org.apache.hadoop.mapreduce.lib.input.CombineFileSplit;  
> import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;  
> import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;  
> import org.apache.hadoop.mapreduce.lib.reduce.IntSumReducer;  
> import org.apache.hadoop.util.LineReader;  
> import org.apache.hadoop.util.Tool;  
> import org.apache.hadoop.util.ToolRunner;  
>  /**  
>   * MultiFileWordCount is an example to demonstrate the usage of   
>   * MultiFileInputFormat. This examples counts the occurrences of  
>   * words in the text files under the given input directory.  
>   */ 
> public class MultiFileWordCount extends Configured implements Tool {  
>    /**  
>     * This record keeps <filename,offset> pairs.  
>     */ 
> public static class WordOffset implements WritableComparable {  
>    private long offset;  
>    private String fileName;  
>   
>    public void readFields(DataInput in) throws IOException {  
>       this.offset = in.readLong();  
>       this.fileName = Text.readString(in);  
>      }  
>      public void write(DataOutput out) throws IOException {  
>        out.writeLong(offset);  
>        Text.writeString(out, fileName);  
>      }  
>       public int compareTo(Object o) {  
>        WordOffset that = (WordOffset)o;  
>        int f = this.fileName.compareTo(that.fileName);  
>        if(f == 0) {  
>          return (int)Math.signum((double)(this.offset - that.offset));  
>        }  
>        return f;  
>      }  
>      @Override 
>      public boolean equals(Object obj) {  
>        if(obj instanceof WordOffset)  
>        return this.compareTo(obj) == 0;  
>        return false;  
>      }  
>      @Override 
>      public int hashCode() {  
>      assert false : "hashCode not designed";  
>      return 42; //an arbitrary constant  
>      }  
>    }  
>    /**  
>     * To use {@link CombineFileInputFormat}, one should extend it, to return a   
>     * (custom) {@link RecordReader}. CombineFileInputFormat uses   
>     * {@link CombineFileSplit}s.   
>     */ 
>    public static class MyInputFormat   
>      extends CombineFileInputFormat  {  
>      public RecordReader createRecordReader(InputSplit split,  
>       TaskAttemptContext context) throws IOException {  
>        return new CombineFileRecordReader(  
>          (CombineFileSplit)split, context, CombineFileLineRecordReader.class);  
>      }  
>    }  
>   
>    /**  
>     * RecordReader is responsible from extracting records from a chunk  
>     * of the CombineFileSplit.   
>     */ 
>    public static class CombineFileLineRecordReader   
>      extends RecordReader {  
>      private long startOffset; //offset of the chunk;  
>      private long end; //end of the chunk;  
>      private long pos; // current pos   
>      private FileSystem fs;  
>      private Path path;  
>      private WordOffset key;  
>      private Text value;  
>      private FSDataInputStream fileIn;  
>      private LineReader reader;  
>      public CombineFileLineRecordReader(CombineFileSplit split,  
>          TaskAttemptContext context, Integer index) throws IOException {  
>       
>        this.path = split.getPath(index);  
>        fs = this.path.getFileSystem(context.getConfiguration());  
>        this.startOffset = split.getOffset(index);  
>        this.end = startOffset + split.getLength(index);  
>        boolean skipFirstLine = false;  
>        //open the file  
>        fileIn = fs.open(path);  
>        if (startOffset != 0) {  
>        skipFirstLine = true;  
>          --startOffset;  
>          fileIn.seek(startOffset);  
>        }  
>        reader = new LineReader(fileIn);  
>        if (skipFirstLine) {  // skip first line and re-establish "startOffset".  
>         startOffset += reader.readLine(new Text(), 0,  
>          (int)Math.min((long)Integer.MAX_VALUE, end - startOffset));  
>        }  
>        this.pos = startOffset;  
>      }  
>      public void initialize(InputSplit split, TaskAttemptContext context)  
>         throws IOException, InterruptedException {  
>      }
>      public void close() throws IOException { }  
>      public float getProgress() throws IOException {  
>        if (startOffset == end) {  
>         return 0.0f;  
>        } else {  
>        return Math.min(1.0f, (pos - startOffset) / (float)(end - startOffset));  
>        }  
>      }  
>     public boolean nextKeyValue() throws IOException {  
>        if (key == null) {  
>          key = new WordOffset();  
>          key.fileName = path.getName();  
>        }  
>        key.offset = pos;  
>        if (value == null) {  
>        value = new Text();  
>        }  
>        int newSize = 0;  
>        if (pos < end) {  
>         newSize = reader.readLine(value);  
>         pos += newSize;  
>        }  
>        if (newSize == 0) {  
>         key = null;  
>         value = null;  
>         return false;  
>        } else {  
>         return true;  
>        }  
>      }  
>          public WordOffset getCurrentKey()   
>          throws IOException, InterruptedException {  
>           return key;  
>      }  
>          public Text getCurrentValue() throws IOException, InterruptedException {  
>          return value;  
>      }  
>    }  
>   
>    /**  
>     * This Mapper is similar to the one in {@link WordCount.MapClass}.  
>     */ 
>    public static class MapClass extends  
>        Mapper {  
>      private final static IntWritable one = new IntWritable(1);  
>      private Text word = new Text();  
>      public void map(WordOffset key, Text value, Context context)  
>          throws IOException, InterruptedException {  
>        String line = value.toString();  
>        StringTokenizer itr = new StringTokenizer(line);  
>        while (itr.hasMoreTokens()) {  
>          word.set(itr.nextToken());  
>          context.write(word, one);  
>        }  
>      }  
>    }  
>    private void printUsage() {  
>      System.out.println("Usage : multifilewc  " );  
>    }  
>    public int run(String[] args) throws Exception {  
>      if(args.length < 2) {  
>        printUsage();  
>        return 2;  
>      }  
>      Job job = new Job(getConf());  
>      job.setJobName("MultiFileWordCount");  
>      job.setJarByClass(MultiFileWordCount.class);  
>     
>      //set the InputFormat of the job to our InputFormat  
>      job.setInputFormatClass(MyInputFormat.class);  
>        
>      // the keys are words (strings)  
>      job.setOutputKeyClass(Text.class);  
>      // the values are counts (ints)  
>      job.setOutputValueClass(IntWritable.class);  
>  
>      //use the defined mapper  
>      job.setMapperClass(MapClass.class);  
>      //use the WordCount Reducer  
>      job.setCombinerClass(IntSumReducer.class);  
>      job.setReducerClass(IntSumReducer.class);  
>    
>      FileInputFormat.addInputPaths(job, args[0]);  
>      FileOutputFormat.setOutputPath(job, new Path(args[1]));  
>      return job.waitForCompletion(true) ? 0 : 1;  
>    }  
>    
>    public static void main(String[] args) throws Exception {  
>      int ret = ToolRunner.run(new MultiFileWordCount(), args);  
>      System.exit(ret);  
>    }  
>  



--
This message was sent by Atlassian JIRA
(v6.1#6144)