You are viewing a plain text version of this content. The canonical link for it is here.
Posted to mapreduce-commits@hadoop.apache.org by to...@apache.org on 2011/07/25 23:42:05 UTC
svn commit: r1150926 - in /hadoop/common/trunk/mapreduce: CHANGES.txt
src/java/org/apache/hadoop/mapred/LineRecordReader.java
src/java/org/apache/hadoop/mapred/TextInputFormat.java
src/test/mapred/org/apache/hadoop/mapred/TestLineRecordReader.java
Author: todd
Date: Mon Jul 25 21:42:04 2011
New Revision: 1150926
URL: http://svn.apache.org/viewvc?rev=1150926&view=rev
Log:
MAPREDUCE-2602. Allow setting of end-of-record delimiter for TextInputFormat for the old API. Contributed by Ahmed Radwan.
Added:
hadoop/common/trunk/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestLineRecordReader.java
Modified:
hadoop/common/trunk/mapreduce/CHANGES.txt
hadoop/common/trunk/mapreduce/src/java/org/apache/hadoop/mapred/LineRecordReader.java
hadoop/common/trunk/mapreduce/src/java/org/apache/hadoop/mapred/TextInputFormat.java
Modified: hadoop/common/trunk/mapreduce/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/mapreduce/CHANGES.txt?rev=1150926&r1=1150925&r2=1150926&view=diff
==============================================================================
--- hadoop/common/trunk/mapreduce/CHANGES.txt (original)
+++ hadoop/common/trunk/mapreduce/CHANGES.txt Mon Jul 25 21:42:04 2011
@@ -203,6 +203,9 @@ Trunk (unreleased changes)
MAPREDUCE-2623. Update ClusterMapReduceTestCase to use
MiniDFSCluster.Builder (Harsh J Chouraria via eli)
+ MAPREDUCE-2602. Allow setting of end-of-record delimiter for
+ TextInputFormat for the old API. (Ahmed Radwan via todd)
+
OPTIMIZATIONS
MAPREDUCE-2026. Make JobTracker.getJobCounters() and
Modified: hadoop/common/trunk/mapreduce/src/java/org/apache/hadoop/mapred/LineRecordReader.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/mapreduce/src/java/org/apache/hadoop/mapred/LineRecordReader.java?rev=1150926&r1=1150925&r2=1150926&view=diff
==============================================================================
--- hadoop/common/trunk/mapreduce/src/java/org/apache/hadoop/mapred/LineRecordReader.java (original)
+++ hadoop/common/trunk/mapreduce/src/java/org/apache/hadoop/mapred/LineRecordReader.java Mon Jul 25 21:42:04 2011
@@ -77,10 +77,25 @@ public class LineRecordReader implements
public LineReader(InputStream in, Configuration conf) throws IOException {
super(in, conf);
}
+ LineReader(InputStream in, byte[] recordDelimiter) {
+ super(in, recordDelimiter);
+ }
+ LineReader(InputStream in, int bufferSize, byte[] recordDelimiter) {
+ super(in, bufferSize, recordDelimiter);
+ }
+ public LineReader(InputStream in, Configuration conf,
+ byte[] recordDelimiter) throws IOException {
+ super(in, conf, recordDelimiter);
+ }
}
public LineRecordReader(Configuration job,
FileSplit split) throws IOException {
+ this(job, split, null);
+ }
+
+ public LineRecordReader(Configuration job, FileSplit split,
+ byte[] recordDelimiter) throws IOException {
this.maxLineLength = job.getInt(org.apache.hadoop.mapreduce.lib.input.
LineRecordReader.MAX_LINE_LENGTH, Integer.MAX_VALUE);
start = split.getStart();
@@ -99,17 +114,17 @@ public class LineRecordReader implements
((SplittableCompressionCodec)codec).createInputStream(
fileIn, decompressor, start, end,
SplittableCompressionCodec.READ_MODE.BYBLOCK);
- in = new LineReader(cIn, job);
+ in = new LineReader(cIn, job, recordDelimiter);
start = cIn.getAdjustedStart();
end = cIn.getAdjustedEnd();
filePosition = cIn; // take pos from compressed stream
} else {
- in = new LineReader(codec.createInputStream(fileIn, decompressor), job);
+ in = new LineReader(codec.createInputStream(fileIn, decompressor), job, recordDelimiter);
filePosition = fileIn;
}
} else {
fileIn.seek(start);
- in = new LineReader(fileIn, job);
+ in = new LineReader(fileIn, job, recordDelimiter);
filePosition = fileIn;
}
// If this is not the first split, we always throw away first record
@@ -120,29 +135,40 @@ public class LineRecordReader implements
}
this.pos = start;
}
-
+
public LineRecordReader(InputStream in, long offset, long endOffset,
int maxLineLength) {
+ this(in, offset, endOffset, maxLineLength, null);
+ }
+
+ public LineRecordReader(InputStream in, long offset, long endOffset,
+ int maxLineLength, byte[] recordDelimiter) {
this.maxLineLength = maxLineLength;
- this.in = new LineReader(in);
+ this.in = new LineReader(in, recordDelimiter);
this.start = offset;
this.pos = offset;
this.end = endOffset;
filePosition = null;
}
+ public LineRecordReader(InputStream in, long offset, long endOffset,
+ Configuration job)
+ throws IOException{
+ this(in, offset, endOffset, job, null);
+ }
+
public LineRecordReader(InputStream in, long offset, long endOffset,
- Configuration job)
+ Configuration job, byte[] recordDelimiter)
throws IOException{
this.maxLineLength = job.getInt(org.apache.hadoop.mapreduce.lib.input.
LineRecordReader.MAX_LINE_LENGTH, Integer.MAX_VALUE);
- this.in = new LineReader(in, job);
+ this.in = new LineReader(in, job, recordDelimiter);
this.start = offset;
this.pos = offset;
this.end = endOffset;
filePosition = null;
}
-
+
public LongWritable createKey() {
return new LongWritable();
}
@@ -171,7 +197,6 @@ public class LineRecordReader implements
return retVal;
}
-
/** Read a line. */
public synchronized boolean next(LongWritable key, Text value)
throws IOException {
Modified: hadoop/common/trunk/mapreduce/src/java/org/apache/hadoop/mapred/TextInputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/mapreduce/src/java/org/apache/hadoop/mapred/TextInputFormat.java?rev=1150926&r1=1150925&r2=1150926&view=diff
==============================================================================
--- hadoop/common/trunk/mapreduce/src/java/org/apache/hadoop/mapred/TextInputFormat.java (original)
+++ hadoop/common/trunk/mapreduce/src/java/org/apache/hadoop/mapred/TextInputFormat.java Mon Jul 25 21:42:04 2011
@@ -59,6 +59,10 @@ public class TextInputFormat extends Fil
throws IOException {
reporter.setStatus(genericSplit.toString());
- return new LineRecordReader(job, (FileSplit) genericSplit);
+ String delimiter = job.get("textinputformat.record.delimiter");
+ byte[] recordDelimiterBytes = null;
+ if (null != delimiter) recordDelimiterBytes = delimiter.getBytes();
+ return new LineRecordReader(job, (FileSplit) genericSplit,
+ recordDelimiterBytes);
}
}
Added: hadoop/common/trunk/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestLineRecordReader.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestLineRecordReader.java?rev=1150926&view=auto
==============================================================================
--- hadoop/common/trunk/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestLineRecordReader.java (added)
+++ hadoop/common/trunk/mapreduce/src/test/mapred/org/apache/hadoop/mapred/TestLineRecordReader.java Mon Jul 25 21:42:04 2011
@@ -0,0 +1,139 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hadoop.mapred;
+
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.OutputStreamWriter;
+import java.io.Reader;
+import java.io.Writer;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.Mapper;
+import org.apache.hadoop.mapred.Reducer;
+import org.apache.hadoop.mapred.lib.IdentityMapper;
+import org.apache.hadoop.mapred.lib.IdentityReducer;
+import org.apache.tools.ant.util.FileUtils;
+import org.junit.Test;
+
+public class TestLineRecordReader extends TestCase {
+
+ private static Path workDir = new Path(new Path(System.getProperty(
+ "test.build.data", "."), "data"), "TestTextInputFormat");
+ private static Path inputDir = new Path(workDir, "input");
+ private static Path outputDir = new Path(workDir, "output");
+
+ /**
+ * Writes the input test file
+ *
+ * @param conf
+ * @throws IOException
+ */
+ public void createInputFile(Configuration conf) throws IOException {
+ FileSystem localFs = FileSystem.getLocal(conf);
+ Path file = new Path(inputDir, "test.txt");
+ Writer writer = new OutputStreamWriter(localFs.create(file));
+ writer.write("abc\ndef\t\nghi\njkl");
+ writer.close();
+ }
+
+ /**
+ * Reads the output file into a string
+ *
+ * @param conf
+ * @return
+ * @throws IOException
+ */
+ public String readOutputFile(Configuration conf) throws IOException {
+ FileSystem localFs = FileSystem.getLocal(conf);
+ Path file = new Path(outputDir, "part-00000");
+ Reader reader = new InputStreamReader(localFs.open(file));
+ String r = FileUtils.readFully(reader);
+ reader.close();
+ return r;
+ }
+
+ /**
+ * Creates and runs an MR job
+ *
+ * @param conf
+ * @throws IOException
+ * @throws InterruptedException
+ * @throws ClassNotFoundException
+ */
+ public void createAndRunJob(Configuration conf) throws IOException,
+ InterruptedException, ClassNotFoundException {
+ JobConf job = new JobConf(conf);
+ job.setJarByClass(TestLineRecordReader.class);
+ job.setMapperClass(IdentityMapper.class);
+ job.setReducerClass(IdentityReducer.class);
+ FileInputFormat.addInputPath(job, inputDir);
+ FileOutputFormat.setOutputPath(job, outputDir);
+ JobClient.runJob(job);
+ }
+
+ /**
+ * Test the case when a custom record delimiter is specified using the
+ * textinputformat.record.delimiter configuration property
+ *
+ * @throws IOException
+ * @throws InterruptedException
+ * @throws ClassNotFoundException
+ */
+ @Test
+ public void testCustomRecordDelimiters() throws IOException,
+ InterruptedException, ClassNotFoundException {
+ Configuration conf = new Configuration();
+ conf.set("textinputformat.record.delimiter", "\t\n");
+ FileSystem localFs = FileSystem.getLocal(conf);
+ // cleanup
+ localFs.delete(workDir, true);
+ // creating input test file
+ createInputFile(conf);
+ createAndRunJob(conf);
+ String expected = "0\tabc\ndef\n9\tghi\njkl\n";
+ this.assertEquals(expected, readOutputFile(conf));
+ }
+
+ /**
+ * Test the default behavior when the textinputformat.record.delimiter
+ * configuration property is not specified
+ *
+ * @throws IOException
+ * @throws InterruptedException
+ * @throws ClassNotFoundException
+ */
+ @Test
+ public void testDefaultRecordDelimiters() throws IOException,
+ InterruptedException, ClassNotFoundException {
+ Configuration conf = new Configuration();
+ FileSystem localFs = FileSystem.getLocal(conf);
+ // cleanup
+ localFs.delete(workDir, true);
+ // creating input test file
+ createInputFile(conf);
+ createAndRunJob(conf);
+ String expected = "0\tabc\n4\tdef\t\n9\tghi\n13\tjkl\n";
+ this.assertEquals(expected, readOutputFile(conf));
+ }
+
+}
\ No newline at end of file