You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by vi...@apache.org on 2015/05/25 12:33:06 UTC

hadoop git commit: HADOOP-12029. Remove chunkSize from ECSchema as its not required for coders (Contributed by Vinayakumar B)

Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 f803fd325 -> f56e19286


HADOOP-12029. Remove chunkSize from ECSchema as its not required for coders (Contributed by Vinayakumar B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f56e1928
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f56e1928
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f56e1928

Branch: refs/heads/HDFS-7285
Commit: f56e1928672c60f9c38c8a843b58cafddb13a99d
Parents: f803fd3
Author: Vinayakumar B <vi...@apache.org>
Authored: Mon May 25 16:02:37 2015 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Mon May 25 16:02:37 2015 +0530

----------------------------------------------------------------------
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt      |  5 ++-
 .../apache/hadoop/io/erasurecode/ECSchema.java  | 40 +-------------------
 .../hadoop/io/erasurecode/TestECSchema.java     |  3 --
 3 files changed, 6 insertions(+), 42 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f56e1928/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 531b8d5..c9b80d3 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -55,4 +55,7 @@
     HADOOP-11938. Enhance ByteBuffer version encode/decode API of raw erasure 
     coder. (Kai Zheng via Zhe Zhang)
 
-    HADOOP-12013. Generate fixed data to perform erasure coder test. (Kai Zheng)
\ No newline at end of file
+    HADOOP-12013. Generate fixed data to perform erasure coder test. (Kai Zheng)
+
+    HADOOP-12029. Remove chunkSize from ECSchema as its not required for coders
+    (vinayakumarb)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f56e1928/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
index f058ea7..fdc569e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
@@ -28,8 +28,6 @@ public final class ECSchema {
   public static final String NUM_DATA_UNITS_KEY = "k";
   public static final String NUM_PARITY_UNITS_KEY = "m";
   public static final String CODEC_NAME_KEY = "codec";
-  public static final String CHUNK_SIZE_KEY = "chunkSize";
-  public static final int DEFAULT_CHUNK_SIZE = 256 * 1024; // 256K
 
   /**
    * A friendly and understandable name that can mean what's it, also serves as
@@ -52,11 +50,6 @@ public final class ECSchema {
    */
   private final int numParityUnits;
 
-  /**
-   * Unit data size for each chunk in a coding
-   */
-  private final int chunkSize;
-
   /*
    * An erasure code can have its own specific advanced parameters, subject to
    * itself to interpret these key-value settings.
@@ -92,17 +85,9 @@ public final class ECSchema {
     this.numDataUnits = tmpNumDataUnits;
     this.numParityUnits = tmpNumParityUnits;
 
-    int tmpChunkSize = extractIntOption(CHUNK_SIZE_KEY, allOptions);
-    if (tmpChunkSize > 0) {
-      this.chunkSize = tmpChunkSize;
-    } else {
-      this.chunkSize = DEFAULT_CHUNK_SIZE;
-    }
-
     allOptions.remove(CODEC_NAME_KEY);
     allOptions.remove(NUM_DATA_UNITS_KEY);
     allOptions.remove(NUM_PARITY_UNITS_KEY);
-    allOptions.remove(CHUNK_SIZE_KEY);
     // After some cleanup
     this.extraOptions = Collections.unmodifiableMap(allOptions);
   }
@@ -144,14 +129,6 @@ public final class ECSchema {
       extraOptions = new HashMap<>();
     }
 
-    int tmpChunkSize = extractIntOption(CHUNK_SIZE_KEY, extraOptions);
-    if (tmpChunkSize > 0) {
-      this.chunkSize = tmpChunkSize;
-    } else {
-      this.chunkSize = DEFAULT_CHUNK_SIZE;
-    }
-
-    extraOptions.remove(CHUNK_SIZE_KEY);
     // After some cleanup
     this.extraOptions = Collections.unmodifiableMap(extraOptions);
   }
@@ -217,14 +194,6 @@ public final class ECSchema {
   }
 
   /**
-   * Get chunk buffer size for the erasure encoding/decoding.
-   * @return chunk buffer size
-   */
-  public int getChunkSize() {
-    return chunkSize;
-  }
-
-  /**
    * Make a meaningful string representation for log output.
    * @return string representation
    */
@@ -235,9 +204,8 @@ public final class ECSchema {
     sb.append("Name=" + schemaName + ", ");
     sb.append("Codec=" + codecName + ", ");
     sb.append(NUM_DATA_UNITS_KEY + "=" + numDataUnits + ", ");
-    sb.append(NUM_PARITY_UNITS_KEY + "=" + numParityUnits + ", ");
-    sb.append(CHUNK_SIZE_KEY + "=" + chunkSize +
-        (extraOptions.isEmpty() ? "" : ", "));
+    sb.append(NUM_PARITY_UNITS_KEY + "=" + numParityUnits);
+    sb.append((extraOptions.isEmpty() ? "" : ", "));
 
     int i = 0;
     for (String opt : extraOptions.keySet()) {
@@ -267,9 +235,6 @@ public final class ECSchema {
     if (numParityUnits != ecSchema.numParityUnits) {
       return false;
     }
-    if (chunkSize != ecSchema.chunkSize) {
-      return false;
-    }
     if (!schemaName.equals(ecSchema.schemaName)) {
       return false;
     }
@@ -286,7 +251,6 @@ public final class ECSchema {
     result = 31 * result + extraOptions.hashCode();
     result = 31 * result + numDataUnits;
     result = 31 * result + numParityUnits;
-    result = 31 * result + chunkSize;
 
     return result;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f56e1928/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestECSchema.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestECSchema.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestECSchema.java
index 15e672f..c362b96 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestECSchema.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestECSchema.java
@@ -29,7 +29,6 @@ public class TestECSchema {
     String schemaName = "goodSchema";
     int numDataUnits = 6;
     int numParityUnits = 3;
-    int chunkSize = 64 * 1024 * 1024;
     String codec = "rs";
     String extraOption = "extraOption";
     String extraOptionValue = "extraOptionValue";
@@ -38,7 +37,6 @@ public class TestECSchema {
     options.put(ECSchema.NUM_DATA_UNITS_KEY, String.valueOf(numDataUnits));
     options.put(ECSchema.NUM_PARITY_UNITS_KEY, String.valueOf(numParityUnits));
     options.put(ECSchema.CODEC_NAME_KEY, codec);
-    options.put(ECSchema.CHUNK_SIZE_KEY, String.valueOf(chunkSize));
     options.put(extraOption, extraOptionValue);
 
     ECSchema schema = new ECSchema(schemaName, options);
@@ -47,7 +45,6 @@ public class TestECSchema {
     assertEquals(schemaName, schema.getSchemaName());
     assertEquals(numDataUnits, schema.getNumDataUnits());
     assertEquals(numParityUnits, schema.getNumParityUnits());
-    assertEquals(chunkSize, schema.getChunkSize());
     assertEquals(codec, schema.getCodecName());
     assertEquals(extraOptionValue, schema.getExtraOptions().get(extraOption));
   }