You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by vv...@apache.org on 2016/02/23 10:17:47 UTC

[4/5] hadoop git commit: MAPREDUCE-6635. Unsafe long to int conversion in UncompressedSplitLineReader and IndexOutOfBoundsException. Contributed by Junping Du.

MAPREDUCE-6635. Unsafe long to int conversion in UncompressedSplitLineReader and IndexOutOfBoundsException. Contributed by Junping Du.

(cherry picked from commit c6f2d761d5430eac6b9f07f137a7028de4e0660c)
(cherry picked from commit f1999fe2754cbf11b138fb048c7486cab9b02c97)

 Conflicts:
	hadoop-mapreduce-project/CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d32100d7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d32100d7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d32100d7

Branch: refs/heads/branch-2.6
Commit: d32100d7fcdbdf2a0584a647bf4d7249909cf402
Parents: 3fea7f0
Author: Varun Vasudev <vv...@apache.org>
Authored: Tue Feb 23 13:05:18 2016 +0530
Committer: Varun Vasudev <vv...@apache.org>
Committed: Tue Feb 23 14:08:14 2016 +0530

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt            |  3 ++
 .../lib/input/UncompressedSplitLineReader.java  |  7 ++-
 .../hadoop/mapred/TestLineRecordReader.java     | 53 ++++++++++++++++++++
 3 files changed, 61 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d32100d7/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 97a9f7b..6ab854c 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -18,6 +18,9 @@ Release 2.6.5 - UNRELEASED
     MAPREDUCE-6191. Improve clearing stale state of Java serialization
                     testcase.  (Sam Liu via Eric Yang)
 
+    MAPREDUCE-6635. Unsafe long to int conversion in UncompressedSplitLineReader
+    and IndexOutOfBoundsException. (Junping Du via vvasudev)
+
 Release 2.6.4 - 2016-02-11
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d32100d7/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/UncompressedSplitLineReader.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/UncompressedSplitLineReader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/UncompressedSplitLineReader.java
index 6d495ef..bda0218 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/UncompressedSplitLineReader.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/UncompressedSplitLineReader.java
@@ -53,8 +53,11 @@ public class UncompressedSplitLineReader extends SplitLineReader {
       throws IOException {
     int maxBytesToRead = buffer.length;
     if (totalBytesRead < splitLength) {
-      maxBytesToRead = Math.min(maxBytesToRead,
-                                (int)(splitLength - totalBytesRead));
+      long leftBytesForSplit = splitLength - totalBytesRead;
+      // check if leftBytesForSplit exceed Integer.MAX_VALUE
+      if (leftBytesForSplit <= Integer.MAX_VALUE) {
+        maxBytesToRead = Math.min(maxBytesToRead, (int)leftBytesForSplit);
+      }
     }
     int bytesRead = in.read(buffer, 0, maxBytesToRead);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d32100d7/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestLineRecordReader.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestLineRecordReader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestLineRecordReader.java
index 3e6002b..cb733bd 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestLineRecordReader.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestLineRecordReader.java
@@ -105,6 +105,43 @@ public class TestLineRecordReader {
         numRecordsNoSplits, numRecordsFirstSplit + numRecordsRemainingSplits);
   }
 
+  private void testLargeSplitRecordForFile(Configuration conf,
+      long firstSplitLength, long testFileSize, Path testFilePath)
+      throws IOException {
+    conf.setInt(org.apache.hadoop.mapreduce.lib.input.
+        LineRecordReader.MAX_LINE_LENGTH, Integer.MAX_VALUE);
+    assertTrue("unexpected firstSplitLength:" + firstSplitLength,
+        testFileSize < firstSplitLength);
+    String delimiter = conf.get("textinputformat.record.delimiter");
+    byte[] recordDelimiterBytes = null;
+    if (null != delimiter) {
+      recordDelimiterBytes = delimiter.getBytes(Charsets.UTF_8);
+    }
+    // read the data without splitting to count the records
+    FileSplit split = new FileSplit(testFilePath, 0, testFileSize,
+        (String[])null);
+    LineRecordReader reader = new LineRecordReader(conf, split,
+        recordDelimiterBytes);
+    LongWritable key = new LongWritable();
+    Text value = new Text();
+    int numRecordsNoSplits = 0;
+    while (reader.next(key, value)) {
+      ++numRecordsNoSplits;
+    }
+    reader.close();
+
+    // count the records in the first split
+    split = new FileSplit(testFilePath, 0, firstSplitLength, (String[])null);
+    reader = new LineRecordReader(conf, split, recordDelimiterBytes);
+    int numRecordsFirstSplit = 0;
+    while (reader.next(key, value)) {
+      ++numRecordsFirstSplit;
+    }
+    reader.close();
+    assertEquals("Unexpected number of records in split",
+        numRecordsNoSplits, numRecordsFirstSplit);
+  }
+
   @Test
   public void testBzip2SplitEndsAtCR() throws IOException {
     // the test data contains a carriage-return at the end of the first
@@ -267,6 +304,22 @@ public class TestLineRecordReader {
   }
 
   @Test
+  public void testUncompressedInputWithLargeSplitSize() throws Exception {
+    Configuration conf = new Configuration();
+    // single char delimiter
+    String inputData = "abcde +fghij+ klmno+pqrst+uvwxyz";
+    Path inputFile = createInputFile(conf, inputData);
+    conf.set("textinputformat.record.delimiter", "+");
+    // split size over max value of integer
+    long longSplitSize = (long)Integer.MAX_VALUE + 1;
+    for (int bufferSize = 1; bufferSize <= inputData.length(); bufferSize++) {
+      conf.setInt("io.file.buffer.size", bufferSize);
+      testLargeSplitRecordForFile(conf, longSplitSize, inputData.length(),
+          inputFile);
+    }
+  }
+
+  @Test
   public void testUncompressedInput() throws Exception {
     Configuration conf = new Configuration();
     // single char delimiter, best case