You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by um...@apache.org on 2021/10/22 18:07:46 UTC

[ozone] branch HDDS-3816-ec updated: HDDS-5364: EC: Adopt EC related utility from Hadoop source repository (#2733)

This is an automated email from the ASF dual-hosted git repository.

umamahesh pushed a commit to branch HDDS-3816-ec
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/HDDS-3816-ec by this push:
     new 914ce67  HDDS-5364: EC: Adopt EC related utility from Hadoop source repository (#2733)
914ce67 is described below

commit 914ce67af30ea174a49c7d4904714095d7061df0
Author: Uma Maheswara Rao G <um...@apache.org>
AuthorDate: Fri Oct 22 11:07:26 2021 -0700

    HDDS-5364: EC: Adopt EC related utility from Hadoop source repository (#2733)
    
    Co-authored-by: Elek Márton <el...@apache.org>
    Co-authored-by: Uma Maheswara Rao G <um...@cloudera.com>
---
 .../client => hadoop-hdds/erasurecode}/pom.xml     |  36 +-
 .../apache/ozone/erasurecode/BufferAllocator.java  |  91 ++++
 .../apache/ozone/erasurecode/CodecRegistry.java    | 163 ++++++
 .../java/org/apache/ozone/erasurecode/ECChunk.java | 113 +++++
 .../org/apache/ozone/erasurecode/package-info.java |  31 ++
 .../rawcoder/AbstractNativeRawDecoder.java         | 109 ++++
 .../rawcoder/AbstractNativeRawEncoder.java         | 107 ++++
 .../rawcoder/ByteArrayDecodingState.java           | 132 +++++
 .../rawcoder/ByteArrayEncodingState.java           | 105 ++++
 .../rawcoder/ByteBufferDecodingState.java          | 143 ++++++
 .../rawcoder/ByteBufferEncodingState.java          | 109 ++++
 .../ozone/erasurecode/rawcoder/CoderUtil.java      | 174 +++++++
 .../ozone/erasurecode/rawcoder/DecodingState.java  |  52 ++
 .../erasurecode/rawcoder/DummyRawDecoder.java      |  45 ++
 .../erasurecode/rawcoder/DummyRawEncoder.java      |  45 ++
 .../rawcoder/DummyRawErasureCoderFactory.java      |  52 ++
 .../ozone/erasurecode/rawcoder/EncodingState.java  |  47 ++
 .../ozone/erasurecode/rawcoder/RSRawDecoder.java   | 177 +++++++
 .../ozone/erasurecode/rawcoder/RSRawEncoder.java   |  77 +++
 .../rawcoder/RSRawErasureCoderFactory.java         |  52 ++
 .../rawcoder/RawErasureCoderFactory.java           |  56 ++
 .../erasurecode/rawcoder/RawErasureDecoder.java    | 217 ++++++++
 .../erasurecode/rawcoder/RawErasureEncoder.java    | 193 +++++++
 .../ozone/erasurecode/rawcoder/XORRawDecoder.java  |  88 ++++
 .../ozone/erasurecode/rawcoder/XORRawEncoder.java  |  86 ++++
 .../rawcoder/XORRawErasureCoderFactory.java        |  53 ++
 .../ozone/erasurecode/rawcoder/package-info.java   |  38 ++
 .../ozone/erasurecode/rawcoder/util/DumpUtil.java  |  99 ++++
 .../ozone/erasurecode/rawcoder/util/GF256.java     | 333 ++++++++++++
 .../erasurecode/rawcoder/util/GaloisField.java     | 564 +++++++++++++++++++++
 .../ozone/erasurecode/rawcoder/util/RSUtil.java    | 187 +++++++
 .../erasurecode/rawcoder/util/package-info.java    |  27 +
 ...one.erasurecode.rawcoder.RawErasureCoderFactory |  15 +
 .../org/apache/ozone/erasurecode/DumpUtil.java     |  96 ++++
 .../ozone/erasurecode/TestCodecRegistry.java       | 114 +++++
 .../apache/ozone/erasurecode/TestCoderBase.java    | 524 +++++++++++++++++++
 .../org/apache/ozone/erasurecode/package-info.java |  23 +
 .../rawcoder/RawErasureCoderBenchmark.java         | 412 +++++++++++++++
 .../erasurecode/rawcoder/TestDummyRawCoder.java    |  96 ++++
 .../ozone/erasurecode/rawcoder/TestRSRawCoder.java |  35 ++
 .../erasurecode/rawcoder/TestRSRawCoderBase.java   | 123 +++++
 .../erasurecode/rawcoder/TestRawCoderBase.java     | 353 +++++++++++++
 .../rawcoder/TestRawErasureCoderBenchmark.java     |  45 ++
 .../erasurecode/rawcoder/TestXORRawCoder.java      |  30 ++
 .../erasurecode/rawcoder/TestXORRawCoderBase.java  |  65 +++
 .../ozone/erasurecode/rawcoder/package-info.java   |  23 +
 hadoop-hdds/pom.xml                                |   1 +
 hadoop-ozone/client/pom.xml                        |   8 +-
 hadoop-ozone/dist/src/main/license/jar-report.txt  |   1 +
 ....io.erasurecode.rawcoder.RawErasureCoderFactory |  18 +
 hadoop-ozone/pom.xml                               |   5 +
 51 files changed, 5761 insertions(+), 27 deletions(-)

diff --git a/hadoop-ozone/client/pom.xml b/hadoop-hdds/erasurecode/pom.xml
similarity index 64%
copy from hadoop-ozone/client/pom.xml
copy to hadoop-hdds/erasurecode/pom.xml
index 56ebe56..40149fe 100644
--- a/hadoop-ozone/client/pom.xml
+++ b/hadoop-hdds/erasurecode/pom.xml
@@ -19,45 +19,35 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
   <parent>
     <groupId>org.apache.ozone</groupId>
-    <artifactId>ozone</artifactId>
+    <artifactId>hdds</artifactId>
     <version>1.2.0-SNAPSHOT</version>
   </parent>
-  <artifactId>ozone-client</artifactId>
+  <artifactId>hdds-erasurecode</artifactId>
   <version>1.2.0-SNAPSHOT</version>
-  <description>Apache Ozone Client</description>
-  <name>Apache Ozone Client</name>
+  <description>Apache Ozone Distributed Data Store Earsurecode utils
+  </description>
+  <name>Apache Ozone HDDS Erasurecode</name>
   <packaging>jar</packaging>
 
   <dependencies>
+
     <dependency>
       <groupId>org.apache.ozone</groupId>
-      <artifactId>hdds-test-utils</artifactId>
-      <scope>test</scope>
+      <artifactId>hdds-common</artifactId>
     </dependency>
     <dependency>
       <groupId>org.apache.ozone</groupId>
-      <artifactId>ozone-common</artifactId>
+      <artifactId>hdds-test-utils</artifactId>
+      <scope>test</scope>
     </dependency>
     <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <scope>test</scope>
+      <groupId>com.github.spotbugs</groupId>
+      <artifactId>spotbugs-annotations</artifactId>
+      <scope>compile</scope>
     </dependency>
-      <dependency>
-          <groupId>org.apache.hadoop</groupId>
-          <artifactId>hadoop-hdfs-client</artifactId>
-      </dependency>
   </dependencies>
 
   <build>
-    <plugins>
-      <plugin>
-        <groupId>com.github.spotbugs</groupId>
-        <artifactId>spotbugs-maven-plugin</artifactId>
-        <configuration>
-          <excludeFilterFile>${basedir}/dev-support/findbugsExcludeFile.xml</excludeFilterFile>
-        </configuration>
-      </plugin>
-    </plugins>
+
   </build>
 </project>
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/BufferAllocator.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/BufferAllocator.java
new file mode 100644
index 0000000..ed2c94e
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/BufferAllocator.java
@@ -0,0 +1,91 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.erasurecode;
+
+
+import java.nio.ByteBuffer;
+
+/**
+ * An abstract buffer allocator used for test.
+ */
+public abstract class BufferAllocator {
+  private boolean usingDirect = false;
+
+  public BufferAllocator(boolean usingDirect) {
+    this.usingDirect = usingDirect;
+  }
+
+  protected boolean isUsingDirect() {
+    return usingDirect;
+  }
+
+  /**
+   * Allocate and return a ByteBuffer of specified length.
+   * @param bufferLen
+   * @return
+   */
+  public abstract ByteBuffer allocate(int bufferLen);
+
+  /**
+   * A simple buffer allocator that just uses ByteBuffer's
+   * allocate/allocateDirect API.
+   */
+  public static class SimpleBufferAllocator extends BufferAllocator {
+
+    public SimpleBufferAllocator(boolean usingDirect) {
+      super(usingDirect);
+    }
+
+    @Override
+    public ByteBuffer allocate(int bufferLen) {
+      return isUsingDirect() ? ByteBuffer.allocateDirect(bufferLen) :
+          ByteBuffer.allocate(bufferLen);
+    }
+  }
+
+  /**
+   * A buffer allocator that allocates a buffer from an existing large buffer by
+   * slice calling, but if no available space just degrades as
+   * SimpleBufferAllocator. So please ensure enough space for it.
+   */
+  public static class SlicedBufferAllocator extends BufferAllocator {
+    private ByteBuffer overallBuffer;
+
+    public SlicedBufferAllocator(boolean usingDirect, int totalBufferLen) {
+      super(usingDirect);
+      overallBuffer = isUsingDirect() ?
+          ByteBuffer.allocateDirect(totalBufferLen) :
+          ByteBuffer.allocate(totalBufferLen);
+    }
+
+    @Override
+    public ByteBuffer allocate(int bufferLen) {
+      if (bufferLen > overallBuffer.capacity() - overallBuffer.position()) {
+        // If no available space for the requested length, then allocate new
+        return isUsingDirect() ? ByteBuffer.allocateDirect(bufferLen) :
+            ByteBuffer.allocate(bufferLen);
+      }
+
+      overallBuffer.limit(overallBuffer.position() + bufferLen);
+      ByteBuffer result = overallBuffer.slice();
+      overallBuffer.position(overallBuffer.position() + bufferLen);
+      return result;
+    }
+  }
+
+}
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/CodecRegistry.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/CodecRegistry.java
new file mode 100644
index 0000000..858c558
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/CodecRegistry.java
@@ -0,0 +1,163 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.erasurecode;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.ozone.erasurecode.rawcoder.RawErasureCoderFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.ServiceLoader;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+/**
+ * This class registers all coder implementations.
+ *
+ * {@link CodecRegistry} maps codec names to coder factories. All coder
+ * factories are dynamically identified and loaded using ServiceLoader.
+ */
+@InterfaceAudience.Private
+public final class CodecRegistry {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(CodecRegistry.class);
+
+  private static CodecRegistry instance = new CodecRegistry();
+  private Map<String, List<RawErasureCoderFactory>> coderMap;
+  private Map<String, String[]> coderNameMap;
+
+  private CodecRegistry() {
+    coderMap = new HashMap<>();
+    coderNameMap = new HashMap<>();
+    final ServiceLoader<RawErasureCoderFactory> coderFactories =
+        ServiceLoader.load(RawErasureCoderFactory.class);
+    updateCoders(coderFactories);
+  }
+
+  public static CodecRegistry getInstance() {
+    return instance;
+  }
+
+  /**
+   * Update coderMap and coderNameMap with iterable type of coder factories.
+   * @param coderFactories
+   */
+  @VisibleForTesting
+  void updateCoders(Iterable<RawErasureCoderFactory> coderFactories) {
+    for (RawErasureCoderFactory coderFactory : coderFactories) {
+      String codecName = coderFactory.getCodecName();
+      List<RawErasureCoderFactory> coders = coderMap.get(codecName);
+      if (coders == null) {
+        coders = new ArrayList<>();
+        coders.add(coderFactory);
+        coderMap.put(codecName, coders);
+        LOG.debug("Codec registered: codec = {}, coder = {}",
+            coderFactory.getCodecName(), coderFactory.getCoderName());
+      } else {
+        Boolean hasConflit = false;
+        for (RawErasureCoderFactory coder : coders) {
+          if (coder.getCoderName().equals(coderFactory.getCoderName())) {
+            hasConflit = true;
+            LOG.error("Coder {} cannot be registered because its coder name " +
+                    "{} has conflict with {}",
+                coderFactory.getClass().getName(),
+                coderFactory.getCoderName(), coder.getClass().getName());
+            break;
+          }
+        }
+        if (!hasConflit) {
+          coders.add(coderFactory);
+          LOG.debug("Codec registered: codec = {}, coder = {}",
+              coderFactory.getCodecName(), coderFactory.getCoderName());
+        }
+      }
+    }
+
+    // update coderNameMap accordingly
+    coderNameMap.clear();
+    for (Map.Entry<String, List<RawErasureCoderFactory>> entry :
+        coderMap.entrySet()) {
+      String codecName = entry.getKey();
+      List<RawErasureCoderFactory> coders = entry.getValue();
+      coderNameMap.put(codecName, coders.stream().
+          map(RawErasureCoderFactory::getCoderName).
+          collect(Collectors.toList()).toArray(new String[0]));
+    }
+  }
+
+  /**
+   * Get all coder names of the given codec.
+   * @param codecName the name of codec
+   * @return an array of all coder names, null if not exist
+   */
+  public String[] getCoderNames(String codecName) {
+    String[] coderNames = coderNameMap.get(codecName);
+    return coderNames;
+  }
+
+  /**
+   * Get all coder factories of the given codec.
+   * @param codecName the name of codec
+   * @return a list of all coder factories, null if not exist
+   */
+  public List<RawErasureCoderFactory> getCoders(String codecName) {
+    List<RawErasureCoderFactory> coders = coderMap.get(codecName);
+    return coders;
+  }
+
+  /**
+   * Get all codec names.
+   * @return a set of all codec names
+   */
+  public Set<String> getCodecNames() {
+    return coderMap.keySet();
+  }
+
+  /**
+   * Get a specific coder factory defined by codec name and coder name.
+   * @param codecName name of the codec
+   * @param coderName name of the coder
+   * @return the specific coder, null if not exist
+   */
+  public RawErasureCoderFactory getCoderByName(
+      String codecName, String coderName) {
+    List<RawErasureCoderFactory> coders = getCoders(codecName);
+
+    // find the RawErasureCoderFactory with the name of coderName
+    for (RawErasureCoderFactory coder : coders) {
+      if (coder.getCoderName().equals(coderName)) {
+        return coder;
+      }
+    }
+    return null;
+  }
+
+  public RawErasureCoderFactory getCodecFactory(String codecName) {
+    for (RawErasureCoderFactory factory : getCoders(codecName)) {
+      return factory;
+    }
+    throw new IllegalArgumentException("There is no registered codec " +
+        "factory for codec " + codecName);
+  }
+}
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/ECChunk.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/ECChunk.java
new file mode 100644
index 0000000..1bf28b3
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/ECChunk.java
@@ -0,0 +1,113 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.erasurecode;
+
+import java.nio.ByteBuffer;
+
+/**
+ * A wrapper for ByteBuffer or bytes array for an erasure code chunk.
+ */
+public class ECChunk {
+
+  private ByteBuffer chunkBuffer;
+
+  // TODO: should be in a more general flags
+  private boolean allZero = false;
+
+  /**
+   * Wrapping a ByteBuffer.
+   *
+   * @param buffer buffer to be wrapped by the chunk
+   */
+  public ECChunk(ByteBuffer buffer) {
+    this.chunkBuffer = buffer;
+  }
+
+  public ECChunk(ByteBuffer buffer, int offset, int len) {
+    ByteBuffer tmp = buffer.duplicate();
+    tmp.position(offset);
+    tmp.limit(offset + len);
+    this.chunkBuffer = tmp.slice();
+  }
+
+  /**
+   * Wrapping a bytes array.
+   *
+   * @param buffer buffer to be wrapped by the chunk
+   */
+  public ECChunk(byte[] buffer) {
+    this.chunkBuffer = ByteBuffer.wrap(buffer);
+  }
+
+  public ECChunk(byte[] buffer, int offset, int len) {
+    this.chunkBuffer = ByteBuffer.wrap(buffer, offset, len);
+  }
+
+  public boolean isAllZero() {
+    return allZero;
+  }
+
+  public void setAllZero(boolean allZero) {
+    this.allZero = allZero;
+  }
+
+  /**
+   * Convert to ByteBuffer.
+   *
+   * @return ByteBuffer
+   */
+  public ByteBuffer getBuffer() {
+    return chunkBuffer;
+  }
+
+  /**
+   * Convert an array of this chunks to an array of ByteBuffers.
+   *
+   * @param chunks chunks to convert into buffers
+   * @return an array of ByteBuffers
+   */
+  public static ByteBuffer[] toBuffers(ECChunk[] chunks) {
+    ByteBuffer[] buffers = new ByteBuffer[chunks.length];
+
+    ECChunk chunk;
+    for (int i = 0; i < chunks.length; i++) {
+      chunk = chunks[i];
+      if (chunk == null) {
+        buffers[i] = null;
+      } else {
+        buffers[i] = chunk.getBuffer();
+      }
+    }
+
+    return buffers;
+  }
+
+  /**
+   * Convert to a bytes array, just for test usage.
+   * @return bytes array
+   */
+  public byte[] toBytesArray() {
+    byte[] bytesArr = new byte[chunkBuffer.remaining()];
+    // Avoid affecting the original one
+    chunkBuffer.mark();
+    chunkBuffer.get(bytesArr);
+    chunkBuffer.reset();
+
+    return bytesArr;
+  }
+}
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/package-info.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/package-info.java
new file mode 100644
index 0000000..fe69252
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/package-info.java
@@ -0,0 +1,31 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ *
+ * Erasure coding utilities for Apache Ozone.
+ *
+ * Initial implementation is imported from Apache Hadoop see that repo for full
+ * history.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.ozone.erasurecode;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/AbstractNativeRawDecoder.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/AbstractNativeRawDecoder.java
new file mode 100644
index 0000000..eb4feec
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/AbstractNativeRawDecoder.java
@@ -0,0 +1,109 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.erasurecode.rawcoder;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.client.ECReplicationConfig;
+import org.apache.hadoop.util.PerformanceAdvisory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+/**
+ * Abstract native raw decoder for all native coders to extend with.
+ */
+@InterfaceAudience.Private
+abstract class AbstractNativeRawDecoder extends RawErasureDecoder {
+  public static final Logger LOG =
+      LoggerFactory.getLogger(AbstractNativeRawDecoder.class);
+
+  // Protect ISA-L coder data structure in native layer from being accessed and
+  // updated concurrently by the init, release and decode functions.
+  private final ReentrantReadWriteLock decoderLock =
+      new ReentrantReadWriteLock();
+
+  // To link with the underlying data structure in the native layer.
+  // No get/set as only used by native codes.
+  private long nativeCoder;
+
+  AbstractNativeRawDecoder(ECReplicationConfig replicationConfig) {
+    super(replicationConfig);
+  }
+
+  @Override
+  protected void doDecode(ByteBufferDecodingState decodingState)
+      throws IOException {
+    decoderLock.readLock().lock();
+    try {
+      if (nativeCoder == 0) {
+        throw new IOException(String.format("%s closed",
+            getClass().getSimpleName()));
+      }
+      int[] inputOffsets = new int[decodingState.inputs.length];
+      int[] outputOffsets = new int[decodingState.outputs.length];
+
+      ByteBuffer buffer;
+      for (int i = 0; i < decodingState.inputs.length; ++i) {
+        buffer = decodingState.inputs[i];
+        if (buffer != null) {
+          inputOffsets[i] = buffer.position();
+        }
+      }
+
+      for (int i = 0; i < decodingState.outputs.length; ++i) {
+        buffer = decodingState.outputs[i];
+        outputOffsets[i] = buffer.position();
+      }
+
+      performDecodeImpl(decodingState.inputs, inputOffsets,
+          decodingState.decodeLength, decodingState.erasedIndexes,
+          decodingState.outputs, outputOffsets);
+    } finally {
+      decoderLock.readLock().unlock();
+    }
+  }
+
+  protected abstract void performDecodeImpl(ByteBuffer[] inputs,
+      int[] inputOffsets, int dataLen,
+      int[] erased, ByteBuffer[] outputs,
+      int[] outputOffsets)
+      throws IOException;
+
+  @Override
+  protected void doDecode(ByteArrayDecodingState decodingState)
+      throws IOException {
+    PerformanceAdvisory.LOG.debug("convertToByteBufferState is invoked, " +
+        "not efficiently. Please use direct ByteBuffer inputs/outputs");
+
+    ByteBufferDecodingState bbdState = decodingState.convertToByteBufferState();
+    doDecode(bbdState);
+
+    for (int i = 0; i < decodingState.outputs.length; i++) {
+      bbdState.outputs[i].get(decodingState.outputs[i],
+          decodingState.outputOffsets[i], decodingState.decodeLength);
+    }
+  }
+
+  @Override
+  public boolean preferDirectBuffer() {
+    return true;
+  }
+}
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/AbstractNativeRawEncoder.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/AbstractNativeRawEncoder.java
new file mode 100644
index 0000000..365e6db
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/AbstractNativeRawEncoder.java
@@ -0,0 +1,107 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.erasurecode.rawcoder;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.client.ECReplicationConfig;
+import org.apache.hadoop.util.PerformanceAdvisory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+/**
+ * Abstract native raw encoder for all native coders to extend with.
+ */
+@InterfaceAudience.Private
+@SuppressWarnings("checkstyle:VisibilityModifier")
+abstract class AbstractNativeRawEncoder extends RawErasureEncoder {
+  public static final Logger LOG =
+      LoggerFactory.getLogger(AbstractNativeRawEncoder.class);
+
+  // Protect ISA-L coder data structure in native layer from being accessed and
+  // updated concurrently by the init, release and encode functions.
+  protected final ReentrantReadWriteLock encoderLock =
+      new ReentrantReadWriteLock();
+
+  AbstractNativeRawEncoder(ECReplicationConfig replicationConfig) {
+    super(replicationConfig);
+  }
+
+  @Override
+  protected void doEncode(ByteBufferEncodingState encodingState)
+      throws IOException {
+    encoderLock.readLock().lock();
+    try {
+      if (nativeCoder == 0) {
+        throw new IOException(String.format("%s closed",
+            getClass().getSimpleName()));
+      }
+      int[] inputOffsets = new int[encodingState.inputs.length];
+      int[] outputOffsets = new int[encodingState.outputs.length];
+      int dataLen = encodingState.inputs[0].remaining();
+
+      ByteBuffer buffer;
+      for (int i = 0; i < encodingState.inputs.length; ++i) {
+        buffer = encodingState.inputs[i];
+        inputOffsets[i] = buffer.position();
+      }
+
+      for (int i = 0; i < encodingState.outputs.length; ++i) {
+        buffer = encodingState.outputs[i];
+        outputOffsets[i] = buffer.position();
+      }
+
+      performEncodeImpl(encodingState.inputs, inputOffsets, dataLen,
+          encodingState.outputs, outputOffsets);
+    } finally {
+      encoderLock.readLock().unlock();
+    }
+  }
+
+  protected abstract void performEncodeImpl(
+          ByteBuffer[] inputs, int[] inputOffsets,
+          int dataLen, ByteBuffer[] outputs, int[] outputOffsets)
+      throws IOException;
+
+  @Override
+  protected void doEncode(ByteArrayEncodingState encodingState)
+      throws IOException {
+    PerformanceAdvisory.LOG.debug("convertToByteBufferState is invoked, " +
+        "not efficiently. Please use direct ByteBuffer inputs/outputs");
+
+    ByteBufferEncodingState bbeState = encodingState.convertToByteBufferState();
+    doEncode(bbeState);
+
+    for (int i = 0; i < encodingState.outputs.length; i++) {
+      bbeState.outputs[i].get(encodingState.outputs[i],
+          encodingState.outputOffsets[i], encodingState.encodeLength);
+    }
+  }
+
+  @Override
+  public boolean preferDirectBuffer() {
+    return true;
+  }
+
+  // To link with the underlying data structure in the native layer.
+  // No get/set as only used by native codes.
+  private long nativeCoder;
+}
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/ByteArrayDecodingState.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/ByteArrayDecodingState.java
new file mode 100644
index 0000000..3053539
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/ByteArrayDecodingState.java
@@ -0,0 +1,132 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.erasurecode.rawcoder;
+
+
+import java.nio.ByteBuffer;
+
+/**
+ * A utility class that maintains decoding state during a decode call using
+ * byte array inputs.
+ */
+@SuppressWarnings("checkstyle:VisibilityModifier")
+class ByteArrayDecodingState extends DecodingState {
+  byte[][] inputs;
+  int[] inputOffsets;
+  int[] erasedIndexes;
+  byte[][] outputs;
+  int[] outputOffsets;
+
+  ByteArrayDecodingState(RawErasureDecoder decoder, byte[][] inputs,
+                         int[] erasedIndexes, byte[][] outputs) {
+    this.decoder = decoder;
+    this.inputs = inputs;
+    this.outputs = outputs;
+    this.erasedIndexes = erasedIndexes;
+    byte[] validInput = CoderUtil.findFirstValidInput(inputs);
+    this.decodeLength = validInput.length;
+
+    checkParameters(inputs, erasedIndexes, outputs);
+    checkInputBuffers(inputs);
+    checkOutputBuffers(outputs);
+
+    this.inputOffsets = new int[inputs.length]; // ALL ZERO
+    this.outputOffsets = new int[outputs.length]; // ALL ZERO
+  }
+
+  ByteArrayDecodingState(RawErasureDecoder decoder,
+                         int decodeLength,
+                         int[] erasedIndexes,
+                         byte[][] inputs,
+                         int[] inputOffsets,
+                         byte[][] outputs,
+                         int[] outputOffsets) {
+    this.decoder = decoder;
+    this.decodeLength = decodeLength;
+    this.erasedIndexes = erasedIndexes;
+    this.inputs = inputs;
+    this.outputs = outputs;
+    this.inputOffsets = inputOffsets;
+    this.outputOffsets = outputOffsets;
+  }
+
+  /**
+   * Convert to a ByteBufferDecodingState when it's backed by on-heap arrays.
+   */
+  ByteBufferDecodingState convertToByteBufferState() {
+    ByteBuffer[] newInputs = new ByteBuffer[inputs.length];
+    ByteBuffer[] newOutputs = new ByteBuffer[outputs.length];
+
+    for (int i = 0; i < inputs.length; i++) {
+      newInputs[i] = CoderUtil.cloneAsDirectByteBuffer(inputs[i],
+          inputOffsets[i], decodeLength);
+    }
+
+    for (int i = 0; i < outputs.length; i++) {
+      newOutputs[i] = ByteBuffer.allocateDirect(decodeLength);
+    }
+
+    ByteBufferDecodingState bbdState = new ByteBufferDecodingState(decoder,
+        decodeLength, erasedIndexes, newInputs, newOutputs);
+    return bbdState;
+  }
+
+  /**
+   * Check and ensure the buffers are of the desired length.
+   * @param buffers the buffers to check
+   */
+  void checkInputBuffers(byte[][] buffers) {
+    int validInputs = 0;
+
+    for (byte[] buffer : buffers) {
+      if (buffer == null) {
+        continue;
+      }
+
+      if (buffer.length != decodeLength) {
+        throw new IllegalArgumentException(
+            "Invalid buffer, not of length " + decodeLength);
+      }
+
+      validInputs++;
+    }
+
+    if (validInputs < decoder.getNumDataUnits()) {
+      throw new IllegalArgumentException(
+          "No enough valid inputs are provided, not recoverable");
+    }
+  }
+
+  /**
+   * Check and ensure the buffers are of the desired length.
+   * @param buffers the buffers to check
+   */
+  void checkOutputBuffers(byte[][] buffers) {
+    for (byte[] buffer : buffers) {
+      if (buffer == null) {
+        throw new IllegalArgumentException(
+            "Invalid buffer found, not allowing null");
+      }
+
+      if (buffer.length != decodeLength) {
+        throw new IllegalArgumentException(
+            "Invalid buffer not of length " + decodeLength);
+      }
+    }
+  }
+}
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/ByteArrayEncodingState.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/ByteArrayEncodingState.java
new file mode 100644
index 0000000..0e0def8
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/ByteArrayEncodingState.java
@@ -0,0 +1,105 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.erasurecode.rawcoder;
+
+import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.classification.InterfaceAudience;
+
+import java.nio.ByteBuffer;
+
+/**
+ * A utility class that maintains encoding state during an encode call using
+ * byte array inputs.
+ */
+@InterfaceAudience.Private
+@SuppressWarnings("checkstyle:VisibilityModifier")
+class ByteArrayEncodingState extends EncodingState {
+  byte[][] inputs;
+  byte[][] outputs;
+  int[] inputOffsets;
+  int[] outputOffsets;
+
+  ByteArrayEncodingState(RawErasureEncoder encoder,
+                         byte[][] inputs, byte[][] outputs) {
+    this.encoder = encoder;
+    byte[] validInput = CoderUtil.findFirstValidInput(inputs);
+    this.encodeLength = validInput.length;
+    this.inputs = inputs;
+    this.outputs = outputs;
+
+    checkParameters(inputs, outputs);
+    checkBuffers(inputs);
+    checkBuffers(outputs);
+
+    this.inputOffsets = new int[inputs.length]; // ALL ZERO
+    this.outputOffsets = new int[outputs.length]; // ALL ZERO
+  }
+
+  ByteArrayEncodingState(RawErasureEncoder encoder,
+                         int encodeLength,
+                         byte[][] inputs,
+                         int[] inputOffsets,
+                         byte[][] outputs,
+                         int[] outputOffsets) {
+    this.encoder = encoder;
+    this.encodeLength = encodeLength;
+    this.inputs = inputs;
+    this.outputs = outputs;
+    this.inputOffsets = inputOffsets;
+    this.outputOffsets = outputOffsets;
+  }
+
+  /**
+   * Convert to a ByteBufferEncodingState when it's backed by on-heap arrays.
+   */
+  ByteBufferEncodingState convertToByteBufferState() {
+    ByteBuffer[] newInputs = new ByteBuffer[inputs.length];
+    ByteBuffer[] newOutputs = new ByteBuffer[outputs.length];
+
+    for (int i = 0; i < inputs.length; i++) {
+      newInputs[i] = CoderUtil.cloneAsDirectByteBuffer(inputs[i],
+          inputOffsets[i], encodeLength);
+    }
+
+    for (int i = 0; i < outputs.length; i++) {
+      newOutputs[i] = ByteBuffer.allocateDirect(encodeLength);
+    }
+
+    ByteBufferEncodingState bbeState = new ByteBufferEncodingState(encoder,
+        encodeLength, newInputs, newOutputs);
+    return bbeState;
+  }
+
+  /**
+   * Check and ensure the buffers are of the desired length.
+   * @param buffers the buffers to check
+   */
+  void checkBuffers(byte[][] buffers) {
+    for (byte[] buffer : buffers) {
+      if (buffer == null) {
+        throw new HadoopIllegalArgumentException(
+            "Invalid buffer found, not allowing null");
+      }
+
+      if (buffer.length != encodeLength) {
+        throw new HadoopIllegalArgumentException(
+            "Invalid buffer not of length " + encodeLength);
+      }
+    }
+  }
+}
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/ByteBufferDecodingState.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/ByteBufferDecodingState.java
new file mode 100644
index 0000000..7b9ebf6
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/ByteBufferDecodingState.java
@@ -0,0 +1,143 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.erasurecode.rawcoder;
+
+import java.nio.ByteBuffer;
+
+/**
+ * A utility class that maintains decoding state during a decode call using
+ * ByteBuffer inputs.
+ */
+@SuppressWarnings("checkstyle:VisibilityModifier")
+class ByteBufferDecodingState extends DecodingState {
+  ByteBuffer[] inputs;
+  ByteBuffer[] outputs;
+  int[] erasedIndexes;
+  boolean usingDirectBuffer;
+
+  ByteBufferDecodingState(RawErasureDecoder decoder, ByteBuffer[] inputs,
+                          int[] erasedIndexes, ByteBuffer[] outputs) {
+    this.decoder = decoder;
+    this.inputs = inputs;
+    this.outputs = outputs;
+    this.erasedIndexes = erasedIndexes;
+    ByteBuffer validInput = CoderUtil.findFirstValidInput(inputs);
+    this.decodeLength = validInput.remaining();
+    this.usingDirectBuffer = validInput.isDirect();
+
+    checkParameters(inputs, erasedIndexes, outputs);
+    checkInputBuffers(inputs);
+    checkOutputBuffers(outputs);
+  }
+
+  ByteBufferDecodingState(RawErasureDecoder decoder,
+                         int decodeLength,
+                         int[] erasedIndexes,
+                         ByteBuffer[] inputs,
+                          ByteBuffer[] outputs) {
+    this.decoder = decoder;
+    this.decodeLength = decodeLength;
+    this.erasedIndexes = erasedIndexes;
+    this.inputs = inputs;
+    this.outputs = outputs;
+  }
+
+  /**
+   * Convert to a ByteArrayDecodingState when it's backed by on-heap arrays.
+   */
+  ByteArrayDecodingState convertToByteArrayState() {
+    int[] inputOffsets = new int[inputs.length];
+    int[] outputOffsets = new int[outputs.length];
+    byte[][] newInputs = new byte[inputs.length][];
+    byte[][] newOutputs = new byte[outputs.length][];
+
+    ByteBuffer buffer;
+    for (int i = 0; i < inputs.length; ++i) {
+      buffer = inputs[i];
+      if (buffer != null) {
+        inputOffsets[i] = buffer.arrayOffset() + buffer.position();
+        newInputs[i] = buffer.array();
+      }
+    }
+
+    for (int i = 0; i < outputs.length; ++i) {
+      buffer = outputs[i];
+      outputOffsets[i] = buffer.arrayOffset() + buffer.position();
+      newOutputs[i] = buffer.array();
+    }
+
+    ByteArrayDecodingState baeState = new ByteArrayDecodingState(decoder,
+        decodeLength, erasedIndexes, newInputs,
+        inputOffsets, newOutputs, outputOffsets);
+    return baeState;
+  }
+
+  /**
+   * Check and ensure the buffers are of the desired length and type, direct
+   * buffers or not.
+   * @param buffers the buffers to check
+   */
+  void checkInputBuffers(ByteBuffer[] buffers) {
+    int validInputs = 0;
+
+    for (ByteBuffer buffer : buffers) {
+      if (buffer == null) {
+        continue;
+      }
+
+      if (buffer.remaining() != decodeLength) {
+        throw new IllegalArgumentException(
+            "Invalid buffer, not of length " + decodeLength);
+      }
+      if (buffer.isDirect() != usingDirectBuffer) {
+        throw new IllegalArgumentException(
+            "Invalid buffer, isDirect should be " + usingDirectBuffer);
+      }
+
+      validInputs++;
+    }
+
+    if (validInputs < decoder.getNumDataUnits()) {
+      throw new IllegalArgumentException(
+          "No enough valid inputs are provided, not recoverable");
+    }
+  }
+
+  /**
+   * Check and ensure the buffers are of the desired length and type, direct
+   * buffers or not.
+   * @param buffers the buffers to check
+   */
+  void checkOutputBuffers(ByteBuffer[] buffers) {
+    for (ByteBuffer buffer : buffers) {
+      if (buffer == null) {
+        throw new IllegalArgumentException(
+            "Invalid buffer found, not allowing null");
+      }
+
+      if (buffer.remaining() != decodeLength) {
+        throw new IllegalArgumentException(
+            "Invalid buffer, not of length " + decodeLength);
+      }
+      if (buffer.isDirect() != usingDirectBuffer) {
+        throw new IllegalArgumentException(
+            "Invalid buffer, isDirect should be " + usingDirectBuffer);
+      }
+    }
+  }
+}
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/ByteBufferEncodingState.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/ByteBufferEncodingState.java
new file mode 100644
index 0000000..a8ffec1
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/ByteBufferEncodingState.java
@@ -0,0 +1,109 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.erasurecode.rawcoder;
+
+import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.classification.InterfaceAudience;
+
+import java.nio.ByteBuffer;
+
+/**
+ * A utility class that maintains encoding state during an encode call using
+ * ByteBuffer inputs.
+ */
+@InterfaceAudience.Private
+@SuppressWarnings("checkstyle:VisibilityModifier")
+class ByteBufferEncodingState extends EncodingState {
+  ByteBuffer[] inputs;
+  ByteBuffer[] outputs;
+  boolean usingDirectBuffer;
+
+  ByteBufferEncodingState(RawErasureEncoder encoder,
+                          ByteBuffer[] inputs, ByteBuffer[] outputs) {
+    this.encoder = encoder;
+    ByteBuffer validInput = CoderUtil.findFirstValidInput(inputs);
+    this.encodeLength = validInput.remaining();
+    this.usingDirectBuffer = validInput.isDirect();
+    this.inputs = inputs;
+    this.outputs = outputs;
+
+    checkParameters(inputs, outputs);
+    checkBuffers(inputs);
+    checkBuffers(outputs);
+  }
+
+  ByteBufferEncodingState(RawErasureEncoder encoder,
+                          int encodeLength,
+                          ByteBuffer[] inputs,
+                          ByteBuffer[] outputs) {
+    this.encoder = encoder;
+    this.encodeLength = encodeLength;
+    this.inputs = inputs;
+    this.outputs = outputs;
+  }
+
+  /**
+   * Convert to a ByteArrayEncodingState when it's backed by on-heap arrays.
+   */
+  ByteArrayEncodingState convertToByteArrayState() {
+    int[] inputOffsets = new int[inputs.length];
+    int[] outputOffsets = new int[outputs.length];
+    byte[][] newInputs = new byte[inputs.length][];
+    byte[][] newOutputs = new byte[outputs.length][];
+
+    ByteBuffer buffer;
+    for (int i = 0; i < inputs.length; ++i) {
+      buffer = inputs[i];
+      inputOffsets[i] = buffer.arrayOffset() + buffer.position();
+      newInputs[i] = buffer.array();
+    }
+
+    for (int i = 0; i < outputs.length; ++i) {
+      buffer = outputs[i];
+      outputOffsets[i] = buffer.arrayOffset() + buffer.position();
+      newOutputs[i] = buffer.array();
+    }
+
+    ByteArrayEncodingState baeState = new ByteArrayEncodingState(encoder,
+        encodeLength, newInputs, inputOffsets, newOutputs, outputOffsets);
+    return baeState;
+  }
+
+  /**
+   * Check and ensure the buffers are of the desired length and type, direct
+   * buffers or not.
+   * @param buffers the buffers to check
+   */
+  void checkBuffers(ByteBuffer[] buffers) {
+    for (ByteBuffer buffer : buffers) {
+      if (buffer == null) {
+        throw new HadoopIllegalArgumentException(
+            "Invalid buffer found, not allowing null");
+      }
+
+      if (buffer.remaining() != encodeLength) {
+        throw new HadoopIllegalArgumentException(
+            "Invalid buffer, not of length " + encodeLength);
+      }
+      if (buffer.isDirect() != usingDirectBuffer) {
+        throw new HadoopIllegalArgumentException(
+            "Invalid buffer, isDirect should be " + usingDirectBuffer);
+      }
+    }
+  }
+}
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/CoderUtil.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/CoderUtil.java
new file mode 100644
index 0000000..04bb980
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/CoderUtil.java
@@ -0,0 +1,174 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.erasurecode.rawcoder;
+
+import org.apache.ozone.erasurecode.ECChunk;
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+
+/**
+ * Helpful utilities for implementing some raw erasure coders.
+ */
+public final class CoderUtil {
+
+  private CoderUtil() {
+    // No called
+  }
+
+  private static byte[] emptyChunk = new byte[4096];
+
+  /**
+   * Make sure to return an empty chunk buffer for the desired length.
+   * @param leastLength
+   * @return empty chunk of zero bytes
+   */
+  static byte[] getEmptyChunk(int leastLength) {
+    if (emptyChunk.length >= leastLength) {
+      return emptyChunk; // In most time
+    }
+
+    synchronized (CoderUtil.class) {
+      emptyChunk = new byte[leastLength];
+    }
+
+    return emptyChunk;
+  }
+
+  /**
+   * Ensure a buffer filled with ZERO bytes from current readable/writable
+   * position.
+   * @param buffer a buffer ready to read / write certain size bytes
+   * @return the buffer itself, with ZERO bytes written, the position and limit
+   *         are not changed after the call
+   */
+  static ByteBuffer resetBuffer(ByteBuffer buffer, int len) {
+    int pos = buffer.position();
+    buffer.put(getEmptyChunk(len), 0, len);
+    buffer.position(pos);
+
+    return buffer;
+  }
+
+  /**
+   * Ensure the buffer (either input or output) ready to read or write with ZERO
+   * bytes fully in specified length of len.
+   * @param buffer bytes array buffer
+   * @return the buffer itself
+   */
+  static byte[] resetBuffer(byte[] buffer, int offset, int len) {
+    byte[] empty = getEmptyChunk(len);
+    System.arraycopy(empty, 0, buffer, offset, len);
+
+    return buffer;
+  }
+
+  /**
+   * Initialize the output buffers with ZERO bytes.
+   */
+  static void resetOutputBuffers(ByteBuffer[] buffers, int dataLen) {
+    for (ByteBuffer buffer : buffers) {
+      resetBuffer(buffer, dataLen);
+    }
+  }
+
+  /**
+   * Initialize the output buffers with ZERO bytes.
+   */
+  static void resetOutputBuffers(byte[][] buffers, int[] offsets,
+                                 int dataLen) {
+    for (int i = 0; i < buffers.length; i++) {
+      resetBuffer(buffers[i], offsets[i], dataLen);
+    }
+  }
+
+
+  /**
+   * Convert an array of this chunks to an array of ByteBuffers.
+   *
+   * @param chunks chunks to convertToByteArrayState into buffers
+   * @return an array of ByteBuffers
+   */
+  static ByteBuffer[] toBuffers(ECChunk[] chunks) {
+    ByteBuffer[] buffers = new ByteBuffer[chunks.length];
+
+    ECChunk chunk;
+    for (int i = 0; i < chunks.length; i++) {
+      chunk = chunks[i];
+      if (chunk == null) {
+        buffers[i] = null;
+      } else {
+        buffers[i] = chunk.getBuffer();
+        if (chunk.isAllZero()) {
+          CoderUtil.resetBuffer(buffers[i], buffers[i].remaining());
+        }
+      }
+    }
+
+    return buffers;
+  }
+
+  /**
+   * Clone an input bytes array as direct ByteBuffer.
+   */
+  static ByteBuffer cloneAsDirectByteBuffer(byte[] input, int offset, int len) {
+    if (input == null) { // an input can be null, if erased or not to read
+      return null;
+    }
+
+    ByteBuffer directBuffer = ByteBuffer.allocateDirect(len);
+    directBuffer.put(input, offset, len);
+    directBuffer.flip();
+    return directBuffer;
+  }
+
+
+
+  /**
+   * Find the valid input from all the inputs.
+   * @param inputs input buffers to look for valid input
+   * @return the first valid input
+   */
+  static <T> T findFirstValidInput(T[] inputs) {
+    for (T input : inputs) {
+      if (input != null) {
+        return input;
+      }
+    }
+
+    throw new IllegalArgumentException(
+        "Invalid inputs are found, all being null");
+  }
+
+  /**
+   * Picking up indexes of valid inputs.
+   * @param inputs decoding input buffers
+   * @param <T>
+   */
+  static <T> int[] getValidIndexes(T[] inputs) {
+    int[] validIndexes = new int[inputs.length];
+    int idx = 0;
+    for (int i = 0; i < inputs.length; i++) {
+      if (inputs[i] != null) {
+        validIndexes[idx++] = i;
+      }
+    }
+
+    return Arrays.copyOf(validIndexes, idx);
+  }
+}
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/DecodingState.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/DecodingState.java
new file mode 100644
index 0000000..7a407b8
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/DecodingState.java
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.erasurecode.rawcoder;
+
+/**
+ * A utility class that maintains decoding state during a decode call.
+ */
+@SuppressWarnings("checkstyle:VisibilityModifier")
+class DecodingState {
+  RawErasureDecoder decoder;
+  int decodeLength;
+
+  /**
+   * Check and validate decoding parameters, throw exception accordingly. The
+   * checking assumes it's a MDS code. Other code  can override this.
+   * @param inputs input buffers to check
+   * @param erasedIndexes indexes of erased units in the inputs array
+   * @param outputs output buffers to check
+   */
+  <T> void checkParameters(T[] inputs, int[] erasedIndexes,
+                           T[] outputs) {
+    if (inputs.length != decoder.getNumParityUnits() +
+        decoder.getNumDataUnits()) {
+      throw new IllegalArgumentException("Invalid inputs length");
+    }
+
+    if (erasedIndexes.length != outputs.length) {
+      throw new IllegalArgumentException(
+          "erasedIndexes and outputs mismatch in length");
+    }
+
+    if (erasedIndexes.length > decoder.getNumParityUnits()) {
+      throw new IllegalArgumentException(
+          "Too many erased, not recoverable");
+    }
+  }
+}
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/DummyRawDecoder.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/DummyRawDecoder.java
new file mode 100644
index 0000000..4a11dbc
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/DummyRawDecoder.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.erasurecode.rawcoder;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.client.ECReplicationConfig;
+
+/**
+ * A dummy raw decoder that does no real computation.
+ * Instead, it just returns zero bytes.
+ * This decoder can be used to isolate the performance issue to HDFS side logic
+ * instead of codec, and is intended for test only.
+ */
+@InterfaceAudience.Private
+public class DummyRawDecoder extends RawErasureDecoder {
+
+  public DummyRawDecoder(ECReplicationConfig ecReplicationConfig) {
+    super(ecReplicationConfig);
+  }
+
+  @Override
+  protected void doDecode(ByteBufferDecodingState decodingState) {
+    // Nothing to do. Output buffers have already been reset
+  }
+
+  @Override
+  protected void doDecode(ByteArrayDecodingState decodingState) {
+    // Nothing to do. Output buffers have already been reset
+  }
+}
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/DummyRawEncoder.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/DummyRawEncoder.java
new file mode 100644
index 0000000..9b178e6
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/DummyRawEncoder.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.erasurecode.rawcoder;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.client.ECReplicationConfig;
+
+/**
+ * A dummy raw encoder that does no real computation.
+ * Instead, it just returns zero bytes.
+ * This encoder can be used to isolate the performance issue to HDFS side logic
+ * instead of codec, and is intended for test only.
+ */
+@InterfaceAudience.Private
+public class DummyRawEncoder extends RawErasureEncoder {
+
+  public DummyRawEncoder(ECReplicationConfig ecReplicationConfig) {
+    super(ecReplicationConfig);
+  }
+
+  @Override
+  protected void doEncode(ByteArrayEncodingState encodingState) {
+    // Nothing to do. Output buffers have already been reset
+  }
+
+  @Override
+  protected void doEncode(ByteBufferEncodingState encodingState) {
+    // Nothing to do. Output buffers have already been reset
+  }
+}
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/DummyRawErasureCoderFactory.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/DummyRawErasureCoderFactory.java
new file mode 100644
index 0000000..bf74871
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/DummyRawErasureCoderFactory.java
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.erasurecode.rawcoder;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.client.ECReplicationConfig;
+
+/**
+ * A raw erasure coder factory for dummy raw coders.
+ */
+@InterfaceAudience.Private
+public class DummyRawErasureCoderFactory implements RawErasureCoderFactory {
+  public static final String CODER_NAME = "dummy_dummy";
+  public static final String DUMMY_CODEC_NAME = "dummy";
+
+  @Override
+  public RawErasureEncoder createEncoder(
+      ECReplicationConfig ecReplicationConfig) {
+    return new DummyRawEncoder(ecReplicationConfig);
+  }
+
+  @Override
+  public RawErasureDecoder createDecoder(
+      ECReplicationConfig ecReplicationConfig) {
+    return new DummyRawDecoder(ecReplicationConfig);
+  }
+
+  @Override
+  public String getCoderName() {
+    return CODER_NAME;
+  }
+
+  @Override
+  public String getCodecName() {
+    return DUMMY_CODEC_NAME;
+  }
+}
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/EncodingState.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/EncodingState.java
new file mode 100644
index 0000000..2339342
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/EncodingState.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.erasurecode.rawcoder;
+
+import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * A utility class that maintains encoding state during an encode call.
+ */
+@InterfaceAudience.Private
+@SuppressWarnings("checkstyle:VisibilityModifier")
+abstract class EncodingState {
+  RawErasureEncoder encoder;
+  int encodeLength;
+
+  /**
+   * Check and validate decoding parameters, throw exception accordingly.
+   * @param inputs input buffers to check
+   * @param outputs output buffers to check
+   */
+  <T> void checkParameters(T[] inputs, T[] outputs) {
+    if (inputs.length != encoder.getNumDataUnits()) {
+      throw new HadoopIllegalArgumentException("Invalid inputs length "
+          + inputs.length + " !=" + encoder.getNumDataUnits());
+    }
+    if (outputs.length != encoder.getNumParityUnits()) {
+      throw new HadoopIllegalArgumentException("Invalid outputs length "
+          + outputs.length + " !=" + encoder.getNumParityUnits());
+    }
+  }
+}
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/RSRawDecoder.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/RSRawDecoder.java
new file mode 100644
index 0000000..a61e1ea
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/RSRawDecoder.java
@@ -0,0 +1,177 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.erasurecode.rawcoder;
+
+import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.client.ECReplicationConfig;
+import org.apache.ozone.erasurecode.rawcoder.util.DumpUtil;
+import org.apache.ozone.erasurecode.rawcoder.util.GF256;
+import org.apache.ozone.erasurecode.rawcoder.util.RSUtil;
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+
+/**
+ * A raw erasure decoder in RS code scheme in pure Java in case native one
+ * isn't available in some environment. Please always use native implementations
+ * when possible. This new Java coder is about 5X faster than the one originated
+ * from HDFS-RAID, and also compatible with the native/ISA-L coder.
+ */
+@InterfaceAudience.Private
+public class RSRawDecoder extends RawErasureDecoder {
+  //relevant to schema and won't change during decode calls
+  private byte[] encodeMatrix;
+
+  /**
+   * Below are relevant to schema and erased indexes, thus may change during
+   * decode calls.
+   */
+  private byte[] decodeMatrix;
+  private byte[] invertMatrix;
+  /**
+   * Array of input tables generated from coding coefficients previously.
+   * Must be of size 32*k*rows
+   */
+  private byte[] gfTables;
+  private int[] cachedErasedIndexes;
+  private int[] validIndexes;
+  private int numErasedDataUnits;
+  private boolean[] erasureFlags;
+
+  public RSRawDecoder(ECReplicationConfig ecReplicationConfig) {
+    super(ecReplicationConfig);
+
+    int numAllUnits = getNumAllUnits();
+    if (getNumAllUnits() >= RSUtil.GF.getFieldSize()) {
+      throw new HadoopIllegalArgumentException(
+              "Invalid getNumDataUnits() and numParityUnits");
+    }
+
+    encodeMatrix = new byte[numAllUnits * getNumDataUnits()];
+    RSUtil.genCauchyMatrix(encodeMatrix, numAllUnits, getNumDataUnits());
+    if (allowVerboseDump()) {
+      DumpUtil.dumpMatrix(encodeMatrix, getNumDataUnits(), numAllUnits);
+    }
+  }
+
+  @Override
+  protected void doDecode(ByteBufferDecodingState decodingState) {
+    CoderUtil.resetOutputBuffers(decodingState.outputs,
+        decodingState.decodeLength);
+    prepareDecoding(decodingState.inputs, decodingState.erasedIndexes);
+
+    ByteBuffer[] realInputs = new ByteBuffer[getNumDataUnits()];
+    for (int i = 0; i < getNumDataUnits(); i++) {
+      realInputs[i] = decodingState.inputs[validIndexes[i]];
+    }
+    RSUtil.encodeData(gfTables, realInputs, decodingState.outputs);
+  }
+
+  @Override
+  protected void doDecode(ByteArrayDecodingState decodingState) {
+    int dataLen = decodingState.decodeLength;
+    CoderUtil.resetOutputBuffers(decodingState.outputs,
+        decodingState.outputOffsets, dataLen);
+    prepareDecoding(decodingState.inputs, decodingState.erasedIndexes);
+
+    byte[][] realInputs = new byte[getNumDataUnits()][];
+    int[] realInputOffsets = new int[getNumDataUnits()];
+    for (int i = 0; i < getNumDataUnits(); i++) {
+      realInputs[i] = decodingState.inputs[validIndexes[i]];
+      realInputOffsets[i] = decodingState.inputOffsets[validIndexes[i]];
+    }
+    RSUtil.encodeData(gfTables, dataLen, realInputs, realInputOffsets,
+        decodingState.outputs, decodingState.outputOffsets);
+  }
+
+  private <T> void prepareDecoding(T[] inputs, int[] erasedIndexes) {
+    int[] tmpValidIndexes = CoderUtil.getValidIndexes(inputs);
+    if (Arrays.equals(this.cachedErasedIndexes, erasedIndexes) &&
+        Arrays.equals(this.validIndexes, tmpValidIndexes)) {
+      return; // Optimization. Nothing to do
+    }
+    this.cachedErasedIndexes =
+            Arrays.copyOf(erasedIndexes, erasedIndexes.length);
+    this.validIndexes =
+            Arrays.copyOf(tmpValidIndexes, tmpValidIndexes.length);
+
+    processErasures(erasedIndexes);
+  }
+
+  private void processErasures(int[] erasedIndexes) {
+    this.decodeMatrix = new byte[getNumAllUnits() * getNumDataUnits()];
+    this.invertMatrix = new byte[getNumAllUnits() * getNumDataUnits()];
+    this.gfTables = new byte[getNumAllUnits() * getNumDataUnits() * 32];
+
+    this.erasureFlags = new boolean[getNumAllUnits()];
+    this.numErasedDataUnits = 0;
+
+    for (int i = 0; i < erasedIndexes.length; i++) {
+      int index = erasedIndexes[i];
+      erasureFlags[index] = true;
+      if (index < getNumDataUnits()) {
+        numErasedDataUnits++;
+      }
+    }
+
+    generateDecodeMatrix(erasedIndexes);
+
+    RSUtil.initTables(getNumDataUnits(), erasedIndexes.length,
+        decodeMatrix, 0, gfTables);
+    if (allowVerboseDump()) {
+      System.out.println(DumpUtil.bytesToHex(gfTables, -1));
+    }
+  }
+
+  // Generate decode matrix from encode matrix
+  private void generateDecodeMatrix(int[] erasedIndexes) {
+    int i, j, r, p;
+    byte s;
+    byte[] tmpMatrix = new byte[getNumAllUnits() * getNumDataUnits()];
+
+    // Construct matrix tmpMatrix by removing error rows
+    for (i = 0; i < getNumDataUnits(); i++) {
+      r = validIndexes[i];
+      for (j = 0; j < getNumDataUnits(); j++) {
+        tmpMatrix[getNumDataUnits() * i + j] =
+                encodeMatrix[getNumDataUnits() * r + j];
+      }
+    }
+
+    GF256.gfInvertMatrix(tmpMatrix, invertMatrix, getNumDataUnits());
+
+    for (i = 0; i < numErasedDataUnits; i++) {
+      for (j = 0; j < getNumDataUnits(); j++) {
+        decodeMatrix[getNumDataUnits() * i + j] =
+                invertMatrix[getNumDataUnits() * erasedIndexes[i] + j];
+      }
+    }
+
+    for (p = numErasedDataUnits; p < erasedIndexes.length; p++) {
+      for (i = 0; i < getNumDataUnits(); i++) {
+        s = 0;
+        for (j = 0; j < getNumDataUnits(); j++) {
+          s ^= GF256.gfMul(invertMatrix[j * getNumDataUnits() + i],
+                  encodeMatrix[getNumDataUnits() * erasedIndexes[p] + j]);
+        }
+        decodeMatrix[getNumDataUnits() * p + i] = s;
+      }
+    }
+  }
+}
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/RSRawEncoder.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/RSRawEncoder.java
new file mode 100644
index 0000000..086b1c5
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/RSRawEncoder.java
@@ -0,0 +1,77 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.erasurecode.rawcoder;
+
+import org.apache.hadoop.hdds.client.ECReplicationConfig;
+import org.apache.ozone.erasurecode.rawcoder.util.DumpUtil;
+import org.apache.ozone.erasurecode.rawcoder.util.RSUtil;
+
+/**
+ * A raw erasure encoder in RS code scheme in pure Java in case native one
+ * isn't available in some environment. Please always use native implementations
+ * when possible. This new Java coder is about 5X faster than the one originated
+ * from HDFS-RAID, and also compatible with the native/ISA-L coder.
+ */
+public class RSRawEncoder extends RawErasureEncoder {
+  // relevant to schema and won't change during encode calls.
+  private byte[] encodeMatrix;
+  /**
+   * Array of input tables generated from coding coefficients previously.
+   * Must be of size 32*k*rows
+   */
+  private byte[] gfTables;
+
+  public RSRawEncoder(ECReplicationConfig ecReplicationConfig) {
+    super(ecReplicationConfig);
+
+    if (getNumAllUnits() >= RSUtil.GF.getFieldSize()) {
+      throw new IllegalArgumentException(
+          "Invalid numDataUnits and numParityUnits");
+    }
+
+    encodeMatrix = new byte[getNumAllUnits() * getNumDataUnits()];
+    RSUtil.genCauchyMatrix(encodeMatrix, getNumAllUnits(), getNumDataUnits());
+    if (allowVerboseDump()) {
+      DumpUtil.dumpMatrix(encodeMatrix, getNumDataUnits(), getNumAllUnits());
+    }
+    gfTables = new byte[getNumAllUnits() * getNumDataUnits() * 32];
+    RSUtil.initTables(getNumDataUnits(), getNumParityUnits(), encodeMatrix,
+        getNumDataUnits() * getNumDataUnits(), gfTables);
+    if (allowVerboseDump()) {
+      System.out.println(DumpUtil.bytesToHex(gfTables, -1));
+    }
+  }
+
+  @Override
+  protected void doEncode(ByteBufferEncodingState encodingState) {
+    CoderUtil.resetOutputBuffers(encodingState.outputs,
+        encodingState.encodeLength);
+    RSUtil.encodeData(gfTables, encodingState.inputs, encodingState.outputs);
+  }
+
+  @Override
+  protected void doEncode(ByteArrayEncodingState encodingState) {
+    CoderUtil.resetOutputBuffers(encodingState.outputs,
+        encodingState.outputOffsets,
+        encodingState.encodeLength);
+    RSUtil.encodeData(gfTables, encodingState.encodeLength,
+        encodingState.inputs,
+        encodingState.inputOffsets, encodingState.outputs,
+        encodingState.outputOffsets);
+  }
+}
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/RSRawErasureCoderFactory.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/RSRawErasureCoderFactory.java
new file mode 100644
index 0000000..ece5d9d
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/RSRawErasureCoderFactory.java
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.erasurecode.rawcoder;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.client.ECReplicationConfig;
+
+/**
+ * A raw coder factory for the new raw Reed-Solomon coder in Java.
+ */
+@InterfaceAudience.Private
+public class RSRawErasureCoderFactory implements RawErasureCoderFactory {
+
+  public static final String CODER_NAME = "rs_java";
+
+  @Override
+  public RawErasureEncoder createEncoder(
+      ECReplicationConfig ecReplicationConfig) {
+    return new RSRawEncoder(ecReplicationConfig);
+  }
+
+  @Override
+  public RawErasureDecoder createDecoder(
+      ECReplicationConfig ecReplicationConfig) {
+    return new RSRawDecoder(ecReplicationConfig);
+  }
+
+  @Override
+  public String getCoderName() {
+    return CODER_NAME;
+  }
+
+  @Override
+  public String getCodecName() {
+    return ECReplicationConfig.EcCodec.RS.name().toLowerCase();
+  }
+}
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/RawErasureCoderFactory.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/RawErasureCoderFactory.java
new file mode 100644
index 0000000..27cc049
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/RawErasureCoderFactory.java
@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.erasurecode.rawcoder;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.client.ECReplicationConfig;
+
+/**
+ * Raw erasure coder factory that can be used to create raw encoder and decoder.
+ * It helps in configuration since only one factory class is needed to be
+ * configured.
+ */
+@InterfaceAudience.Private
+public interface RawErasureCoderFactory {
+
+  /**
+   * Create raw erasure encoder.
+   * @param ecReplicationConfig the config used to create the encoder
+   * @return raw erasure encoder
+   */
+  RawErasureEncoder createEncoder(ECReplicationConfig ecReplicationConfig);
+
+  /**
+   * Create raw erasure decoder.
+   * @param ecReplicationConfig the config used to create the encoder
+   * @return raw erasure decoder
+   */
+  RawErasureDecoder createDecoder(ECReplicationConfig ecReplicationConfig);
+
+  /**
+   * Get the name of the coder.
+   * @return coder name
+   */
+  String getCoderName();
+
+  /**
+   * Get the name of its codec.
+   * @return codec name
+   */
+  String getCodecName();
+}
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/RawErasureDecoder.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/RawErasureDecoder.java
new file mode 100644
index 0000000..36ae245
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/RawErasureDecoder.java
@@ -0,0 +1,217 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.erasurecode.rawcoder;
+
+import org.apache.hadoop.hdds.client.ECReplicationConfig;
+import org.apache.ozone.erasurecode.ECChunk;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+/**
+ * An abstract raw erasure decoder that's to be inherited by new decoders.
+ *
+ * Raw erasure coder is part of erasure codec framework, where erasure coder is
+ * used to encode/decode a group of blocks (BlockGroup) according to the codec
+ * specific BlockGroup layout and logic. An erasure coder extracts chunks of
+ * data from the blocks and can employ various low level raw erasure coders to
+ * perform encoding/decoding against the chunks.
+ *
+ * To distinguish from erasure coder, here raw erasure coder is used to mean the
+ * low level constructs, since it only takes care of the math calculation with
+ * a group of byte buffers.
+ *
+ * Note it mainly provides decode() calls, which should be stateless and may be
+ * made thread-safe in future.
+ */
+public abstract class RawErasureDecoder {
+
+  private final ECReplicationConfig coderOptions;
+
+  public RawErasureDecoder(ECReplicationConfig coderOptions) {
+    this.coderOptions = coderOptions;
+  }
+
+  /**
+   * Decode with inputs and erasedIndexes, generates outputs.
+   * How to prepare for inputs:
+   * 1. Create an array containing data units + parity units. Please note the
+   *    data units should be first or before the parity units.
+   * 2. Set null in the array locations specified via erasedIndexes to indicate
+   *    they're erased and no data are to read from;
+   * 3. Set null in the array locations for extra redundant items, as they're
+   *    not necessary to read when decoding. For example in RS-6-3, if only 1
+   *    unit is really erased, then we have 2 extra items as redundant. They can
+   *    be set as null to indicate no data will be used from them.
+   *
+   * For an example using RS (6, 3), assuming sources (d0, d1, d2, d3, d4, d5)
+   * and parities (p0, p1, p2), d2 being erased. We can and may want to use only
+   * 6 units like (d1, d3, d4, d5, p0, p2) to recover d2. We will have:
+   *     inputs = [null(d0), d1, null(d2), d3, d4, d5, p0, null(p1), p2]
+   *     erasedIndexes = [2] // index of d2 into inputs array
+   *     outputs = [a-writable-buffer]
+   *
+   * Note, for both inputs and outputs, no mixing of on-heap buffers and direct
+   * buffers are allowed.
+   *
+   * If the coder option ALLOW_CHANGE_INPUTS is set true (false by default), the
+   * content of input buffers may change after the call, subject to concrete
+   * implementation.
+   *
+   * @param inputs input buffers to read data from. The buffers' remaining will
+   *               be 0 after decoding
+   * @param erasedIndexes indexes of erased units in the inputs array
+   * @param outputs output buffers to put decoded data into according to
+   *                erasedIndexes, ready for read after the call
+   */
+  public void decode(ByteBuffer[] inputs, int[] erasedIndexes,
+                     ByteBuffer[] outputs) throws IOException {
+    ByteBufferDecodingState decodingState = new ByteBufferDecodingState(this,
+        inputs, erasedIndexes, outputs);
+
+    boolean usingDirectBuffer = decodingState.usingDirectBuffer;
+    int dataLen = decodingState.decodeLength;
+    if (dataLen == 0) {
+      return;
+    }
+
+    int[] inputPositions = new int[inputs.length];
+    for (int i = 0; i < inputPositions.length; i++) {
+      if (inputs[i] != null) {
+        inputPositions[i] = inputs[i].position();
+      }
+    }
+
+    if (usingDirectBuffer) {
+      doDecode(decodingState);
+    } else {
+      ByteArrayDecodingState badState = decodingState.convertToByteArrayState();
+      doDecode(badState);
+    }
+
+    for (int i = 0; i < inputs.length; i++) {
+      if (inputs[i] != null) {
+        // dataLen bytes consumed
+        inputs[i].position(inputPositions[i] + dataLen);
+      }
+    }
+  }
+
+  /**
+   * Perform the real decoding using Direct ByteBuffer.
+   * @param decodingState the decoding state
+   */
+  protected abstract void doDecode(ByteBufferDecodingState decodingState)
+      throws IOException;
+
+  /**
+   * Decode with inputs and erasedIndexes, generates outputs. More see above.
+   *
+   * @param inputs input buffers to read data from
+   * @param erasedIndexes indexes of erased units in the inputs array
+   * @param outputs output buffers to put decoded data into according to
+   *                erasedIndexes, ready for read after the call
+   * @throws IOException if the decoder is closed.
+   */
+  public void decode(byte[][] inputs, int[] erasedIndexes, byte[][] outputs)
+      throws IOException {
+    ByteArrayDecodingState decodingState = new ByteArrayDecodingState(this,
+        inputs, erasedIndexes, outputs);
+
+    if (decodingState.decodeLength == 0) {
+      return;
+    }
+
+    doDecode(decodingState);
+  }
+
+  /**
+   * Perform the real decoding using bytes array, supporting offsets and
+   * lengths.
+   * @param decodingState the decoding state
+   * @throws IOException if the decoder is closed.
+   */
+  protected abstract void doDecode(ByteArrayDecodingState decodingState)
+      throws IOException;
+
+  /**
+   * Decode with inputs and erasedIndexes, generates outputs. More see above.
+   *
+   * Note, for both input and output ECChunks, no mixing of on-heap buffers and
+   * direct buffers are allowed.
+   *
+   * @param inputs input buffers to read data from
+   * @param erasedIndexes indexes of erased units in the inputs array
+   * @param outputs output buffers to put decoded data into according to
+   *                erasedIndexes, ready for read after the call
+   * @throws IOException if the decoder is closed
+   */
+  public void decode(ECChunk[] inputs, int[] erasedIndexes,
+                     ECChunk[] outputs) throws IOException {
+    ByteBuffer[] newInputs = CoderUtil.toBuffers(inputs);
+    ByteBuffer[] newOutputs = CoderUtil.toBuffers(outputs);
+    decode(newInputs, erasedIndexes, newOutputs);
+  }
+
+  public int getNumDataUnits() {
+    return coderOptions.getData();
+  }
+
+  public int getNumParityUnits() {
+    return coderOptions.getParity();
+  }
+
+  protected int getNumAllUnits() {
+    return coderOptions.getData() + coderOptions.getParity();
+  }
+
+  /**
+   * Tell if direct buffer is preferred or not. It's for callers to
+   * decide how to allocate coding chunk buffers, using DirectByteBuffer or
+   * bytes array. It will return false by default.
+   * @return true if native buffer is preferred for performance consideration,
+   * otherwise false.
+   */
+  public boolean preferDirectBuffer() {
+    return false;
+  }
+
+  /**
+   * Allow change into input buffers or not while perform encoding/decoding.
+   * @return true if it's allowed to change inputs, false otherwise
+   */
+  public boolean allowChangeInputs() {
+    return false;
+  }
+
+  /**
+   * Allow to dump verbose info during encoding/decoding.
+   * @return true if it's allowed to do verbose dump, false otherwise.
+   */
+  public boolean allowVerboseDump() {
+    return false;
+  }
+
+  /**
+   * Should be called when release this coder. Good chance to release encoding
+   * or decoding buffers
+   */
+  public void release() {
+    // Nothing to do here.
+  }
+}
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/RawErasureEncoder.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/RawErasureEncoder.java
new file mode 100644
index 0000000..2d3e704
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/RawErasureEncoder.java
@@ -0,0 +1,193 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.erasurecode.rawcoder;
+
+import org.apache.hadoop.hdds.client.ECReplicationConfig;
+import org.apache.ozone.erasurecode.ECChunk;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+/**
+ * An abstract raw erasure encoder that's to be inherited by new encoders.
+ *
+ * Raw erasure coder is part of erasure codec framework, where erasure coder is
+ * used to encode/decode a group of blocks (BlockGroup) according to the codec
+ * specific BlockGroup layout and logic. An erasure coder extracts chunks of
+ * data from the blocks and can employ various low level raw erasure coders to
+ * perform encoding/decoding against the chunks.
+ *
+ * To distinguish from erasure coder, here raw erasure coder is used to mean the
+ * low level constructs, since it only takes care of the math calculation with
+ * a group of byte buffers.
+ *
+ * Note it mainly provides encode() calls, which should be stateless and may be
+ * made thread-safe in future.
+ */
+public abstract class RawErasureEncoder {
+
+  private final ECReplicationConfig coderOptions;
+
+  public RawErasureEncoder(ECReplicationConfig coderOptions) {
+    this.coderOptions = coderOptions;
+  }
+
+  /**
+   * Encode with inputs and generates outputs.
+   *
+   * Note, for both inputs and outputs, no mixing of on-heap buffers and direct
+   * buffers are allowed.
+   *
+   * If the coder option ALLOW_CHANGE_INPUTS is set true (false by default), the
+   * content of input buffers may change after the call, subject to concrete
+   * implementation. Anyway the positions of input buffers will move forward.
+   *
+   * @param inputs input buffers to read data from. The buffers' remaining will
+   *               be 0 after encoding
+   * @param outputs output buffers to put the encoded data into, ready to read
+   *                after the call
+   * @throws IOException if the encoder is closed.
+   */
+  public void encode(ByteBuffer[] inputs, ByteBuffer[] outputs)
+      throws IOException {
+    ByteBufferEncodingState bbeState = new ByteBufferEncodingState(
+        this, inputs, outputs);
+
+    boolean usingDirectBuffer = bbeState.usingDirectBuffer;
+    int dataLen = bbeState.encodeLength;
+    if (dataLen == 0) {
+      return;
+    }
+
+    int[] inputPositions = new int[inputs.length];
+    for (int i = 0; i < inputPositions.length; i++) {
+      if (inputs[i] != null) {
+        inputPositions[i] = inputs[i].position();
+      }
+    }
+
+    if (usingDirectBuffer) {
+      doEncode(bbeState);
+    } else {
+      ByteArrayEncodingState baeState = bbeState.convertToByteArrayState();
+      doEncode(baeState);
+    }
+
+    for (int i = 0; i < inputs.length; i++) {
+      if (inputs[i] != null) {
+        // dataLen bytes consumed
+        inputs[i].position(inputPositions[i] + dataLen);
+      }
+    }
+  }
+
+  /**
+   * Perform the real encoding work using direct ByteBuffer.
+   * @param encodingState the encoding state
+   */
+  protected abstract void doEncode(ByteBufferEncodingState encodingState)
+      throws IOException;
+
+  /**
+   * Encode with inputs and generates outputs. More see above.
+   *
+   * @param inputs input buffers to read data from
+   * @param outputs output buffers to put the encoded data into, read to read
+   *                after the call
+   */
+  public void encode(byte[][] inputs, byte[][] outputs) throws IOException {
+    ByteArrayEncodingState baeState = new ByteArrayEncodingState(
+        this, inputs, outputs);
+
+    int dataLen = baeState.encodeLength;
+    if (dataLen == 0) {
+      return;
+    }
+
+    doEncode(baeState);
+  }
+
+  /**
+   * Perform the real encoding work using bytes array, supporting offsets
+   * and lengths.
+   * @param encodingState the encoding state
+   */
+  protected abstract void doEncode(ByteArrayEncodingState encodingState)
+      throws IOException;
+
+  /**
+   * Encode with inputs and generates outputs. More see above.
+   *
+   * @param inputs input buffers to read data from
+   * @param outputs output buffers to put the encoded data into, read to read
+   *                after the call
+   * @throws IOException if the encoder is closed.
+   */
+  public void encode(ECChunk[] inputs, ECChunk[] outputs) throws IOException {
+    ByteBuffer[] newInputs = ECChunk.toBuffers(inputs);
+    ByteBuffer[] newOutputs = ECChunk.toBuffers(outputs);
+    encode(newInputs, newOutputs);
+  }
+
+  public int getNumDataUnits() {
+    return coderOptions.getData();
+  }
+
+  public int getNumParityUnits() {
+    return coderOptions.getParity();
+  }
+
+  public int getNumAllUnits() {
+    return coderOptions.getData() + coderOptions.getParity();
+  }
+
+  /**
+   * Tell if direct buffer is preferred or not. It's for callers to
+   * decide how to allocate coding chunk buffers, using DirectByteBuffer or
+   * bytes array. It will return false by default.
+   * @return true if native buffer is preferred for performance consideration,
+   * otherwise false.
+   */
+  public boolean preferDirectBuffer() {
+    return false;
+  }
+
+  /**
+   * Allow change into input buffers or not while perform encoding/decoding.
+   * @return true if it's allowed to change inputs, false otherwise
+   */
+  public boolean allowChangeInputs() {
+    return false;
+  }
+
+  /**
+   * Allow to dump verbose info during encoding/decoding.
+   * @return true if it's allowed to do verbose dump, false otherwise.
+   */
+  public boolean allowVerboseDump() {
+    return false;
+  }
+
+  /**
+   * Should be called when release this coder. Good chance to release encoding
+   * or decoding buffers
+   */
+  public void release() {
+    // Nothing to do here.
+  }
+}
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/XORRawDecoder.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/XORRawDecoder.java
new file mode 100644
index 0000000..1829b17
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/XORRawDecoder.java
@@ -0,0 +1,88 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.erasurecode.rawcoder;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.client.ECReplicationConfig;
+
+import java.nio.ByteBuffer;
+
+/**
+ * A raw decoder in XOR code scheme in pure Java, adapted from HDFS-RAID.
+ *
+ * XOR code is an important primitive code scheme in erasure coding and often
+ * used in advanced codes, like HitchHiker and LRC, though itself is rarely
+ * deployed independently.
+ */
+@InterfaceAudience.Private
+public class XORRawDecoder extends RawErasureDecoder {
+
+  public XORRawDecoder(ECReplicationConfig ecReplicationConfig) {
+    super(ecReplicationConfig);
+  }
+
+  @Override
+  protected void doDecode(ByteBufferDecodingState decodingState) {
+    CoderUtil.resetOutputBuffers(decodingState.outputs,
+        decodingState.decodeLength);
+    ByteBuffer output = decodingState.outputs[0];
+
+    int erasedIdx = decodingState.erasedIndexes[0];
+
+    // Process the inputs.
+    int iIdx, oIdx;
+    for (int i = 0; i < decodingState.inputs.length; i++) {
+      // Skip the erased location.
+      if (i == erasedIdx) {
+        continue;
+      }
+
+      for (iIdx = decodingState.inputs[i].position(), oIdx = output.position();
+           iIdx < decodingState.inputs[i].limit();
+           iIdx++, oIdx++) {
+        output.put(oIdx, (byte) (output.get(oIdx) ^
+            decodingState.inputs[i].get(iIdx)));
+      }
+    }
+  }
+
+  @Override
+  protected void doDecode(ByteArrayDecodingState decodingState) {
+    byte[] output = decodingState.outputs[0];
+    int dataLen = decodingState.decodeLength;
+    CoderUtil.resetOutputBuffers(decodingState.outputs,
+        decodingState.outputOffsets, dataLen);
+    int erasedIdx = decodingState.erasedIndexes[0];
+
+    // Process the inputs.
+    int iIdx, oIdx;
+    for (int i = 0; i < decodingState.inputs.length; i++) {
+      // Skip the erased location.
+      if (i == erasedIdx) {
+        continue;
+      }
+
+      for (iIdx = decodingState.inputOffsets[i],
+               oIdx = decodingState.outputOffsets[0];
+           iIdx < decodingState.inputOffsets[i] + dataLen; iIdx++, oIdx++) {
+        output[oIdx] ^= decodingState.inputs[i][iIdx];
+      }
+    }
+  }
+
+}
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/XORRawEncoder.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/XORRawEncoder.java
new file mode 100644
index 0000000..c02e97c
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/XORRawEncoder.java
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.erasurecode.rawcoder;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.client.ECReplicationConfig;
+
+import java.nio.ByteBuffer;
+
+/**
+ * A raw encoder in XOR code scheme in pure Java, adapted from HDFS-RAID.
+ *
+ * XOR code is an important primitive code scheme in erasure coding and often
+ * used in advanced codes, like HitchHiker and LRC, though itself is rarely
+ * deployed independently.
+ */
+@InterfaceAudience.Private
+public class XORRawEncoder extends RawErasureEncoder {
+
+  public XORRawEncoder(ECReplicationConfig ecReplicationConfig) {
+    super(ecReplicationConfig);
+  }
+
+  protected void doEncode(ByteBufferEncodingState encodingState) {
+    CoderUtil.resetOutputBuffers(encodingState.outputs,
+        encodingState.encodeLength);
+    ByteBuffer output = encodingState.outputs[0];
+
+    // Get the first buffer's data.
+    int iIdx, oIdx;
+    for (iIdx = encodingState.inputs[0].position(), oIdx = output.position();
+         iIdx < encodingState.inputs[0].limit(); iIdx++, oIdx++) {
+      output.put(oIdx, encodingState.inputs[0].get(iIdx));
+    }
+
+    // XOR with everything else.
+    for (int i = 1; i < encodingState.inputs.length; i++) {
+      for (iIdx = encodingState.inputs[i].position(), oIdx = output.position();
+           iIdx < encodingState.inputs[i].limit();
+           iIdx++, oIdx++) {
+        output.put(oIdx, (byte) (output.get(oIdx) ^
+            encodingState.inputs[i].get(iIdx)));
+      }
+    }
+  }
+
+  @Override
+  protected void doEncode(ByteArrayEncodingState encodingState) {
+    int dataLen = encodingState.encodeLength;
+    CoderUtil.resetOutputBuffers(encodingState.outputs,
+        encodingState.outputOffsets, dataLen);
+    byte[] output = encodingState.outputs[0];
+
+    // Get the first buffer's data.
+    int iIdx, oIdx;
+    for (iIdx = encodingState.inputOffsets[0],
+             oIdx = encodingState.outputOffsets[0];
+         iIdx < encodingState.inputOffsets[0] + dataLen; iIdx++, oIdx++) {
+      output[oIdx] = encodingState.inputs[0][iIdx];
+    }
+
+    // XOR with everything else.
+    for (int i = 1; i < encodingState.inputs.length; i++) {
+      for (iIdx = encodingState.inputOffsets[i],
+               oIdx = encodingState.outputOffsets[0];
+           iIdx < encodingState.inputOffsets[i] + dataLen; iIdx++, oIdx++) {
+        output[oIdx] ^= encodingState.inputs[i][iIdx];
+      }
+    }
+  }
+}
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/XORRawErasureCoderFactory.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/XORRawErasureCoderFactory.java
new file mode 100644
index 0000000..631af5b
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/XORRawErasureCoderFactory.java
@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.erasurecode.rawcoder;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdds.client.ECReplicationConfig;
+
+/**
+ * A raw coder factory for raw XOR coder.
+ */
+@InterfaceAudience.Private
+public class XORRawErasureCoderFactory implements RawErasureCoderFactory {
+
+  public static final String CODER_NAME = "xor_java";
+
+
+  @Override
+  public RawErasureEncoder createEncoder(
+      ECReplicationConfig replicationConfig) {
+    return new XORRawEncoder(replicationConfig);
+  }
+
+  @Override
+  public RawErasureDecoder createDecoder(
+      ECReplicationConfig replicationConfig) {
+    return new XORRawDecoder(replicationConfig);
+  }
+
+  @Override
+  public String getCoderName() {
+    return CODER_NAME;
+  }
+
+  @Override
+  public String getCodecName() {
+    return ECReplicationConfig.EcCodec.XOR.name().toLowerCase();
+  }
+}
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/package-info.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/package-info.java
new file mode 100644
index 0000000..cfad4a3
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/package-info.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ *
+ * Raw erasure coders.
+ *
+ * Raw erasure coder is part of erasure codec framework, where erasure coder is
+ * used to encode/decode a group of blocks (BlockGroup) according to the codec
+ * specific BlockGroup layout and logic. An erasure coder extracts chunks of
+ * data from the blocks and can employ various low level raw erasure coders to
+ * perform encoding/decoding against the chunks.
+ *
+ * To distinguish from erasure coder, here raw erasure coder is used to mean the
+ * low level constructs, since it only takes care of the math calculation with
+ * a group of byte buffers.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.ozone.erasurecode.rawcoder;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/DumpUtil.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/DumpUtil.java
new file mode 100644
index 0000000..54e9d76
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/DumpUtil.java
@@ -0,0 +1,99 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.erasurecode.rawcoder.util;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.ozone.erasurecode.ECChunk;
+
+/**
+ * A dump utility class for debugging data erasure coding/decoding issues.
+ * Don't suggest they are used in runtime production codes.
+ */
+@InterfaceAudience.Private
+public final class DumpUtil {
+  private static final String HEX_CHARS_STR = "0123456789ABCDEF";
+  private static final char[] HEX_CHARS = HEX_CHARS_STR.toCharArray();
+
+  private DumpUtil() {
+    // No called
+  }
+
+  /**
+   * Convert bytes into format like 0x02 02 00 80.
+   * If limit is negative or too large, then all bytes will be converted.
+   */
+  public static String bytesToHex(byte[] bytes, int limit) {
+    if (limit <= 0 || limit > bytes.length) {
+      limit = bytes.length;
+    }
+    int len = limit * 2;
+    len += limit; // for ' ' appended for each char
+    len += 2; // for '0x' prefix
+    char[] hexChars = new char[len];
+    hexChars[0] = '0';
+    hexChars[1] = 'x';
+    for (int j = 0; j < limit; j++) {
+      int v = bytes[j] & 0xFF;
+      hexChars[j * 3 + 2] = HEX_CHARS[v >>> 4];
+      hexChars[j * 3 + 3] = HEX_CHARS[v & 0x0F];
+      hexChars[j * 3 + 4] = ' ';
+    }
+
+    return new String(hexChars);
+  }
+
+  public static void dumpMatrix(byte[] matrix,
+                                int numDataUnits, int numAllUnits) {
+    for (int i = 0; i < numDataUnits; i++) {
+      for (int j = 0; j < numAllUnits; j++) {
+        System.out.print(" ");
+        System.out.print(0xff & matrix[i * numAllUnits + j]);
+      }
+      System.out.println();
+    }
+  }
+
+  /**
+   * Print data in hex format in an array of chunks.
+   * @param header
+   * @param chunks
+   */
+  public static void dumpChunks(String header, ECChunk[] chunks) {
+    System.out.println();
+    System.out.println(header);
+    for (int i = 0; i < chunks.length; i++) {
+      dumpChunk(chunks[i]);
+    }
+    System.out.println();
+  }
+
+  /**
+   * Print data in hex format in a chunk.
+   * @param chunk
+   */
+  public static void dumpChunk(ECChunk chunk) {
+    String str;
+    if (chunk == null) {
+      str = "<EMPTY>";
+    } else {
+      byte[] bytes = chunk.toBytesArray();
+      str = DumpUtil.bytesToHex(bytes, 16);
+    }
+    System.out.println(str);
+  }
+}
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/GF256.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/GF256.java
new file mode 100644
index 0000000..4e0b2d2
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/GF256.java
@@ -0,0 +1,333 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.erasurecode.rawcoder.util;
+
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * A GaloisField utility class only caring of 256 fields for efficiency. Some
+ * of the codes are borrowed from ISA-L implementation (C or ASM codes).
+ */
+@InterfaceAudience.Private
+public final class GF256 {
+
+  private GF256() { }
+
+  private static final byte[] GF_BASE = new byte[]{
+      (byte) 0x01, (byte) 0x02, (byte) 0x04, (byte) 0x08, (byte) 0x10,
+      (byte) 0x20, (byte) 0x40, (byte) 0x80, (byte) 0x1d, (byte) 0x3a,
+      (byte) 0x74, (byte) 0xe8, (byte) 0xcd, (byte) 0x87, (byte) 0x13,
+      (byte) 0x26, (byte) 0x4c, (byte) 0x98, (byte) 0x2d, (byte) 0x5a,
+      (byte) 0xb4, (byte) 0x75, (byte) 0xea, (byte) 0xc9, (byte) 0x8f,
+      (byte) 0x03, (byte) 0x06, (byte) 0x0c, (byte) 0x18, (byte) 0x30,
+      (byte) 0x60, (byte) 0xc0, (byte) 0x9d, (byte) 0x27, (byte) 0x4e,
+      (byte) 0x9c, (byte) 0x25, (byte) 0x4a, (byte) 0x94, (byte) 0x35,
+      (byte) 0x6a, (byte) 0xd4, (byte) 0xb5, (byte) 0x77, (byte) 0xee,
+      (byte) 0xc1, (byte) 0x9f, (byte) 0x23, (byte) 0x46, (byte) 0x8c,
+      (byte) 0x05, (byte) 0x0a, (byte) 0x14, (byte) 0x28, (byte) 0x50,
+      (byte) 0xa0, (byte) 0x5d, (byte) 0xba, (byte) 0x69, (byte) 0xd2,
+      (byte) 0xb9, (byte) 0x6f, (byte) 0xde, (byte) 0xa1, (byte) 0x5f,
+      (byte) 0xbe, (byte) 0x61, (byte) 0xc2, (byte) 0x99, (byte) 0x2f,
+      (byte) 0x5e, (byte) 0xbc, (byte) 0x65, (byte) 0xca, (byte) 0x89,
+      (byte) 0x0f, (byte) 0x1e, (byte) 0x3c, (byte) 0x78, (byte) 0xf0,
+      (byte) 0xfd, (byte) 0xe7, (byte) 0xd3, (byte) 0xbb, (byte) 0x6b,
+      (byte) 0xd6, (byte) 0xb1, (byte) 0x7f, (byte) 0xfe, (byte) 0xe1,
+      (byte) 0xdf, (byte) 0xa3, (byte) 0x5b, (byte) 0xb6, (byte) 0x71,
+      (byte) 0xe2, (byte) 0xd9, (byte) 0xaf, (byte) 0x43, (byte) 0x86,
+      (byte) 0x11, (byte) 0x22, (byte) 0x44, (byte) 0x88, (byte) 0x0d,
+      (byte) 0x1a, (byte) 0x34, (byte) 0x68, (byte) 0xd0, (byte) 0xbd,
+      (byte) 0x67, (byte) 0xce, (byte) 0x81, (byte) 0x1f, (byte) 0x3e,
+      (byte) 0x7c, (byte) 0xf8, (byte) 0xed, (byte) 0xc7, (byte) 0x93,
+      (byte) 0x3b, (byte) 0x76, (byte) 0xec, (byte) 0xc5, (byte) 0x97,
+      (byte) 0x33, (byte) 0x66, (byte) 0xcc, (byte) 0x85, (byte) 0x17,
+      (byte) 0x2e, (byte) 0x5c, (byte) 0xb8, (byte) 0x6d, (byte) 0xda,
+      (byte) 0xa9, (byte) 0x4f, (byte) 0x9e, (byte) 0x21, (byte) 0x42,
+      (byte) 0x84, (byte) 0x15, (byte) 0x2a, (byte) 0x54, (byte) 0xa8,
+      (byte) 0x4d, (byte) 0x9a, (byte) 0x29, (byte) 0x52, (byte) 0xa4,
+      (byte) 0x55, (byte) 0xaa, (byte) 0x49, (byte) 0x92, (byte) 0x39,
+      (byte) 0x72, (byte) 0xe4, (byte) 0xd5, (byte) 0xb7, (byte) 0x73,
+      (byte) 0xe6, (byte) 0xd1, (byte) 0xbf, (byte) 0x63, (byte) 0xc6,
+      (byte) 0x91, (byte) 0x3f, (byte) 0x7e, (byte) 0xfc, (byte) 0xe5,
+      (byte) 0xd7, (byte) 0xb3, (byte) 0x7b, (byte) 0xf6, (byte) 0xf1,
+      (byte) 0xff, (byte) 0xe3, (byte) 0xdb, (byte) 0xab, (byte) 0x4b,
+      (byte) 0x96, (byte) 0x31, (byte) 0x62, (byte) 0xc4, (byte) 0x95,
+      (byte) 0x37, (byte) 0x6e, (byte) 0xdc, (byte) 0xa5, (byte) 0x57,
+      (byte) 0xae, (byte) 0x41, (byte) 0x82, (byte) 0x19, (byte) 0x32,
+      (byte) 0x64, (byte) 0xc8, (byte) 0x8d, (byte) 0x07, (byte) 0x0e,
+      (byte) 0x1c, (byte) 0x38, (byte) 0x70, (byte) 0xe0, (byte) 0xdd,
+      (byte) 0xa7, (byte) 0x53, (byte) 0xa6, (byte) 0x51, (byte) 0xa2,
+      (byte) 0x59, (byte) 0xb2, (byte) 0x79, (byte) 0xf2, (byte) 0xf9,
+      (byte) 0xef, (byte) 0xc3, (byte) 0x9b, (byte) 0x2b, (byte) 0x56,
+      (byte) 0xac, (byte) 0x45, (byte) 0x8a, (byte) 0x09, (byte) 0x12,
+      (byte) 0x24, (byte) 0x48, (byte) 0x90, (byte) 0x3d, (byte) 0x7a,
+      (byte) 0xf4, (byte) 0xf5, (byte) 0xf7, (byte) 0xf3, (byte) 0xfb,
+      (byte) 0xeb, (byte) 0xcb, (byte) 0x8b, (byte) 0x0b, (byte) 0x16,
+      (byte) 0x2c, (byte) 0x58, (byte) 0xb0, (byte) 0x7d, (byte) 0xfa,
+      (byte) 0xe9, (byte) 0xcf, (byte) 0x83, (byte) 0x1b, (byte) 0x36,
+      (byte) 0x6c, (byte) 0xd8, (byte) 0xad, (byte) 0x47, (byte) 0x8e,
+      (byte) 0x01
+  };
+
+  private static final byte[] GF_LOG_BASE = new byte[]{
+      (byte) 0x00, (byte) 0xff, (byte) 0x01, (byte) 0x19, (byte) 0x02,
+      (byte) 0x32, (byte) 0x1a, (byte) 0xc6, (byte) 0x03, (byte) 0xdf,
+      (byte) 0x33, (byte) 0xee, (byte) 0x1b, (byte) 0x68, (byte) 0xc7,
+      (byte) 0x4b, (byte) 0x04, (byte) 0x64, (byte) 0xe0, (byte) 0x0e,
+      (byte) 0x34, (byte) 0x8d, (byte) 0xef, (byte) 0x81, (byte) 0x1c,
+      (byte) 0xc1, (byte) 0x69, (byte) 0xf8, (byte) 0xc8, (byte) 0x08,
+      (byte) 0x4c, (byte) 0x71, (byte) 0x05, (byte) 0x8a, (byte) 0x65,
+      (byte) 0x2f, (byte) 0xe1, (byte) 0x24, (byte) 0x0f, (byte) 0x21,
+      (byte) 0x35, (byte) 0x93, (byte) 0x8e, (byte) 0xda, (byte) 0xf0,
+      (byte) 0x12, (byte) 0x82, (byte) 0x45, (byte) 0x1d, (byte) 0xb5,
+      (byte) 0xc2, (byte) 0x7d, (byte) 0x6a, (byte) 0x27, (byte) 0xf9,
+      (byte) 0xb9, (byte) 0xc9, (byte) 0x9a, (byte) 0x09, (byte) 0x78,
+      (byte) 0x4d, (byte) 0xe4, (byte) 0x72, (byte) 0xa6, (byte) 0x06,
+      (byte) 0xbf, (byte) 0x8b, (byte) 0x62, (byte) 0x66, (byte) 0xdd,
+      (byte) 0x30, (byte) 0xfd, (byte) 0xe2, (byte) 0x98, (byte) 0x25,
+      (byte) 0xb3, (byte) 0x10, (byte) 0x91, (byte) 0x22, (byte) 0x88,
+      (byte) 0x36, (byte) 0xd0, (byte) 0x94, (byte) 0xce, (byte) 0x8f,
+      (byte) 0x96, (byte) 0xdb, (byte) 0xbd, (byte) 0xf1, (byte) 0xd2,
+      (byte) 0x13, (byte) 0x5c, (byte) 0x83, (byte) 0x38, (byte) 0x46,
+      (byte) 0x40, (byte) 0x1e, (byte) 0x42, (byte) 0xb6, (byte) 0xa3,
+      (byte) 0xc3, (byte) 0x48, (byte) 0x7e, (byte) 0x6e, (byte) 0x6b,
+      (byte) 0x3a, (byte) 0x28, (byte) 0x54, (byte) 0xfa, (byte) 0x85,
+      (byte) 0xba, (byte) 0x3d, (byte) 0xca, (byte) 0x5e, (byte) 0x9b,
+      (byte) 0x9f, (byte) 0x0a, (byte) 0x15, (byte) 0x79, (byte) 0x2b,
+      (byte) 0x4e, (byte) 0xd4, (byte) 0xe5, (byte) 0xac, (byte) 0x73,
+      (byte) 0xf3, (byte) 0xa7, (byte) 0x57, (byte) 0x07, (byte) 0x70,
+      (byte) 0xc0, (byte) 0xf7, (byte) 0x8c, (byte) 0x80, (byte) 0x63,
+      (byte) 0x0d, (byte) 0x67, (byte) 0x4a, (byte) 0xde, (byte) 0xed,
+      (byte) 0x31, (byte) 0xc5, (byte) 0xfe, (byte) 0x18, (byte) 0xe3,
+      (byte) 0xa5, (byte) 0x99, (byte) 0x77, (byte) 0x26, (byte) 0xb8,
+      (byte) 0xb4, (byte) 0x7c, (byte) 0x11, (byte) 0x44, (byte) 0x92,
+      (byte) 0xd9, (byte) 0x23, (byte) 0x20, (byte) 0x89, (byte) 0x2e,
+      (byte) 0x37, (byte) 0x3f, (byte) 0xd1, (byte) 0x5b, (byte) 0x95,
+      (byte) 0xbc, (byte) 0xcf, (byte) 0xcd, (byte) 0x90, (byte) 0x87,
+      (byte) 0x97, (byte) 0xb2, (byte) 0xdc, (byte) 0xfc, (byte) 0xbe,
+      (byte) 0x61, (byte) 0xf2, (byte) 0x56, (byte) 0xd3, (byte) 0xab,
+      (byte) 0x14, (byte) 0x2a, (byte) 0x5d, (byte) 0x9e, (byte) 0x84,
+      (byte) 0x3c, (byte) 0x39, (byte) 0x53, (byte) 0x47, (byte) 0x6d,
+      (byte) 0x41, (byte) 0xa2, (byte) 0x1f, (byte) 0x2d, (byte) 0x43,
+      (byte) 0xd8, (byte) 0xb7, (byte) 0x7b, (byte) 0xa4, (byte) 0x76,
+      (byte) 0xc4, (byte) 0x17, (byte) 0x49, (byte) 0xec, (byte) 0x7f,
+      (byte) 0x0c, (byte) 0x6f, (byte) 0xf6, (byte) 0x6c, (byte) 0xa1,
+      (byte) 0x3b, (byte) 0x52, (byte) 0x29, (byte) 0x9d, (byte) 0x55,
+      (byte) 0xaa, (byte) 0xfb, (byte) 0x60, (byte) 0x86, (byte) 0xb1,
+      (byte) 0xbb, (byte) 0xcc, (byte) 0x3e, (byte) 0x5a, (byte) 0xcb,
+      (byte) 0x59, (byte) 0x5f, (byte) 0xb0, (byte) 0x9c, (byte) 0xa9,
+      (byte) 0xa0, (byte) 0x51, (byte) 0x0b, (byte) 0xf5, (byte) 0x16,
+      (byte) 0xeb, (byte) 0x7a, (byte) 0x75, (byte) 0x2c, (byte) 0xd7,
+      (byte) 0x4f, (byte) 0xae, (byte) 0xd5, (byte) 0xe9, (byte) 0xe6,
+      (byte) 0xe7, (byte) 0xad, (byte) 0xe8, (byte) 0x74, (byte) 0xd6,
+      (byte) 0xf4, (byte) 0xea, (byte) 0xa8, (byte) 0x50, (byte) 0x58,
+      (byte) 0xaf
+  };
+
+  @SuppressFBWarnings("MS_EXPOSE_REP")
+  private static byte[][] theGfMulTab; // multiply result table in GF 256 space
+
+  /**
+   * Initialize the GF multiply table for performance. Just compute once, and
+   * avoid repeatedly doing the multiply during encoding/decoding.
+   */
+  static {
+    theGfMulTab = new byte[256][256];
+    for (int i = 0; i < 256; i++) {
+      for (int j = 0; j < 256; j++) {
+        theGfMulTab[i][j] = gfMul((byte) i, (byte) j);
+      }
+    }
+  }
+
+  /**
+   * Get the big GF multiply table so utilize it efficiently.
+   * @return the big GF multiply table
+   */
+  public static byte[][] gfMulTab() {
+    return theGfMulTab;
+  }
+
+  public static byte gfMul(byte a, byte b) {
+    if ((a == 0) || (b == 0)) {
+      return 0;
+    }
+
+    int tmp = (GF_LOG_BASE[a & 0xff] & 0xff) +
+        (GF_LOG_BASE[b & 0xff] & 0xff);
+    if (tmp > 254) {
+      tmp -= 255;
+    }
+
+    return GF_BASE[tmp];
+  }
+
+  public static byte gfInv(byte a) {
+    if (a == 0) {
+      return 0;
+    }
+
+    return GF_BASE[255 - GF_LOG_BASE[a & 0xff] & 0xff];
+  }
+
+  /**
+   * Invert a matrix assuming it's invertible.
+   *
+   * Ported from Intel ISA-L library.
+   */
+  public static void gfInvertMatrix(byte[] inMatrix, byte[] outMatrix, int n) {
+    byte temp;
+
+    // Set outMatrix[] to the identity matrix
+    for (int i = 0; i < n * n; i++) {
+      // memset(outMatrix, 0, n*n)
+      outMatrix[i] = 0;
+    }
+
+    for (int i = 0; i < n; i++) {
+      outMatrix[i * n + i] = 1;
+    }
+
+    // Inverse
+    for (int j, i = 0; i < n; i++) {
+      // Check for 0 in pivot element
+      if (inMatrix[i * n + i] == 0) {
+        // Find a row with non-zero in current column and swap
+        for (j = i + 1; j < n; j++) {
+          if (inMatrix[j * n + i] != 0) {
+            break;
+          }
+        }
+        if (j == n) {
+          // Couldn't find means it's singular
+          throw new RuntimeException("Not invertible");
+        }
+
+        for (int k = 0; k < n; k++) {
+          // Swap rows i,j
+          temp = inMatrix[i * n + k];
+          inMatrix[i * n + k] = inMatrix[j * n + k];
+          inMatrix[j * n + k] = temp;
+
+          temp = outMatrix[i * n + k];
+          outMatrix[i * n + k] = outMatrix[j * n + k];
+          outMatrix[j * n + k] = temp;
+        }
+      }
+
+      temp = gfInv(inMatrix[i * n + i]); // 1/pivot
+      for (j = 0; j < n; j++) {
+        // Scale row i by 1/pivot
+        inMatrix[i * n + j] = gfMul(inMatrix[i * n + j], temp);
+        outMatrix[i * n + j] = gfMul(outMatrix[i * n + j], temp);
+      }
+
+      for (j = 0; j < n; j++) {
+        if (j == i) {
+          continue;
+        }
+
+        temp = inMatrix[j * n + i];
+        for (int k = 0; k < n; k++) {
+          outMatrix[j * n + k] ^= gfMul(temp, outMatrix[i * n + k]);
+          inMatrix[j * n + k] ^= gfMul(temp, inMatrix[i * n + k]);
+        }
+      }
+    }
+  }
+
+  /**
+   * Ported from Intel ISA-L library.
+   *
+   * Calculates const table gftbl in GF(2^8) from single input A
+   * gftbl(A) = {A{00}, A{01}, A{02}, ... , A{0f} }, {A{00}, A{10}, A{20},
+   * ... , A{f0} } -- from ISA-L implementation
+   */
+  public static void gfVectMulInit(byte c, byte[] tbl, int offset) {
+    byte c2 = (byte) ((c << 1) ^ ((c & 0x80) != 0 ? 0x1d : 0));
+    byte c4 = (byte) ((c2 << 1) ^ ((c2 & 0x80) != 0 ? 0x1d : 0));
+    byte c8 = (byte) ((c4 << 1) ^ ((c4 & 0x80) != 0 ? 0x1d : 0));
+
+    byte c3, c5, c6, c7, c9, c10, c11, c12, c13, c14, c15;
+    byte c17, c18, c19, c20, c21, c22, c23, c24, c25, c26,
+        c27, c28, c29, c30, c31;
+
+    c3 = (byte) (c2 ^ c);
+    c5 = (byte) (c4 ^ c);
+    c6 = (byte) (c4 ^ c2);
+    c7 = (byte) (c4 ^ c3);
+
+    c9 = (byte) (c8 ^ c);
+    c10 = (byte) (c8 ^ c2);
+    c11 = (byte) (c8 ^ c3);
+    c12 = (byte) (c8 ^ c4);
+    c13 = (byte) (c8 ^ c5);
+    c14 = (byte) (c8 ^ c6);
+    c15 = (byte) (c8 ^ c7);
+
+    tbl[offset + 0] = 0;
+    tbl[offset + 1] = c;
+    tbl[offset + 2] = c2;
+    tbl[offset + 3] = c3;
+    tbl[offset + 4] = c4;
+    tbl[offset + 5] = c5;
+    tbl[offset + 6] = c6;
+    tbl[offset + 7] = c7;
+    tbl[offset + 8] = c8;
+    tbl[offset + 9] = c9;
+    tbl[offset + 10] = c10;
+    tbl[offset + 11] = c11;
+    tbl[offset + 12] = c12;
+    tbl[offset + 13] = c13;
+    tbl[offset + 14] = c14;
+    tbl[offset + 15] = c15;
+
+    c17 = (byte) ((c8 << 1) ^ ((c8 & 0x80) != 0 ? 0x1d : 0));
+    c18 = (byte) ((c17 << 1) ^ ((c17 & 0x80) != 0 ? 0x1d : 0));
+    c19 = (byte) (c18 ^ c17);
+    c20 = (byte) ((c18 << 1) ^ ((c18 & 0x80) != 0 ? 0x1d : 0));
+    c21 = (byte) (c20 ^ c17);
+    c22 = (byte) (c20 ^ c18);
+    c23 = (byte) (c20 ^ c19);
+    c24 = (byte) ((c20 << 1) ^ ((c20 & 0x80) != 0 ? 0x1d : 0));
+    c25 = (byte) (c24 ^ c17);
+    c26 = (byte) (c24 ^ c18);
+    c27 = (byte) (c24 ^ c19);
+    c28 = (byte) (c24 ^ c20);
+    c29 = (byte) (c24 ^ c21);
+    c30 = (byte) (c24 ^ c22);
+    c31 = (byte) (c24 ^ c23);
+
+    tbl[offset + 16] = 0;
+    tbl[offset + 17] = c17;
+    tbl[offset + 18] = c18;
+    tbl[offset + 19] = c19;
+    tbl[offset + 20] = c20;
+    tbl[offset + 21] = c21;
+    tbl[offset + 22] = c22;
+    tbl[offset + 23] = c23;
+    tbl[offset + 24] = c24;
+    tbl[offset + 25] = c25;
+    tbl[offset + 26] = c26;
+    tbl[offset + 27] = c27;
+    tbl[offset + 28] = c28;
+    tbl[offset + 29] = c29;
+    tbl[offset + 30] = c30;
+    tbl[offset + 31] = c31;
+  }
+}
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/GaloisField.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/GaloisField.java
new file mode 100644
index 0000000..38a750f
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/GaloisField.java
@@ -0,0 +1,564 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.erasurecode.rawcoder.util;
+
+import java.nio.ByteBuffer;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * Implementation of Galois field arithmetic with 2^p elements. The input must
+ * be unsigned integers. It's ported from HDFS-RAID, slightly adapted.
+ */
+@InterfaceAudience.Private
+public final class GaloisField {
+
+  // Field size 256 is good for byte based system
+  private static final int DEFAULT_FIELD_SIZE = 256;
+  // primitive polynomial 1 + X^2 + X^3 + X^4 + X^8 (substitute 2)
+  private static final int DEFAULT_PRIMITIVE_POLYNOMIAL = 285;
+  private static final Map<Integer, GaloisField> INSTANCES =
+      new HashMap<>();
+  private final int[] logTable;
+  private final int[] powTable;
+  private final int[][] mulTable;
+  private final int[][] divTable;
+  private final int fieldSize;
+  private final int primitivePeriod;
+  private final int primitivePolynomial;
+
+  private GaloisField(int fieldSize, int primitivePolynomial) {
+    assert fieldSize > 0;
+    assert primitivePolynomial > 0;
+
+    this.fieldSize = fieldSize;
+    this.primitivePeriod = fieldSize - 1;
+    this.primitivePolynomial = primitivePolynomial;
+    logTable = new int[fieldSize];
+    powTable = new int[fieldSize];
+    mulTable = new int[fieldSize][fieldSize];
+    divTable = new int[fieldSize][fieldSize];
+    int value = 1;
+    for (int pow = 0; pow < fieldSize - 1; pow++) {
+      powTable[pow] = value;
+      logTable[value] = pow;
+      value = value * 2;
+      if (value >= fieldSize) {
+        value = value ^ primitivePolynomial;
+      }
+    }
+    // building multiplication table
+    for (int i = 0; i < fieldSize; i++) {
+      for (int j = 0; j < fieldSize; j++) {
+        if (i == 0 || j == 0) {
+          mulTable[i][j] = 0;
+          continue;
+        }
+        int z = logTable[i] + logTable[j];
+        z = z >= primitivePeriod ? z - primitivePeriod : z;
+        z = powTable[z];
+        mulTable[i][j] = z;
+      }
+    }
+    // building division table
+    for (int i = 0; i < fieldSize; i++) {
+      for (int j = 1; j < fieldSize; j++) {
+        if (i == 0) {
+          divTable[i][j] = 0;
+          continue;
+        }
+        int z = logTable[i] - logTable[j];
+        z = z < 0 ? z + primitivePeriod : z;
+        z = powTable[z];
+        divTable[i][j] = z;
+      }
+    }
+  }
+
+  /**
+   * Get the object performs Galois field arithmetics.
+   *
+   * @param fieldSize           size of the field
+   * @param primitivePolynomial a primitive polynomial corresponds to the size
+   */
+  public static GaloisField getInstance(int fieldSize,
+                                        int primitivePolynomial) {
+    int key = ((fieldSize << 16) & 0xFFFF0000)
+        + (primitivePolynomial & 0x0000FFFF);
+    GaloisField gf;
+    synchronized (INSTANCES) {
+      gf = INSTANCES.get(key);
+      if (gf == null) {
+        gf = new GaloisField(fieldSize, primitivePolynomial);
+        INSTANCES.put(key, gf);
+      }
+    }
+    return gf;
+  }
+
+  /**
+   * Get the object performs Galois field arithmetic with default setting.
+   */
+  public static GaloisField getInstance() {
+    return getInstance(DEFAULT_FIELD_SIZE, DEFAULT_PRIMITIVE_POLYNOMIAL);
+  }
+
+  /**
+   * Return number of elements in the field.
+   *
+   * @return number of elements in the field
+   */
+  public int getFieldSize() {
+    return fieldSize;
+  }
+
+  /**
+   * Return the primitive polynomial in GF(2).
+   *
+   * @return primitive polynomial as a integer
+   */
+  public int getPrimitivePolynomial() {
+    return primitivePolynomial;
+  }
+
+  /**
+   * Compute the sum of two fields.
+   *
+   * @param x input field
+   * @param y input field
+   * @return result of addition
+   */
+  public int add(int x, int y) {
+    assert (x >= 0 && x < getFieldSize() && y >= 0 && y < getFieldSize());
+    return x ^ y;
+  }
+
+  /**
+   * Compute the multiplication of two fields.
+   *
+   * @param x input field
+   * @param y input field
+   * @return result of multiplication
+   */
+  public int multiply(int x, int y) {
+    assert (x >= 0 && x < getFieldSize() && y >= 0 && y < getFieldSize());
+    return mulTable[x][y];
+  }
+
+  /**
+   * Compute the division of two fields.
+   *
+   * @param x input field
+   * @param y input field
+   * @return x/y
+   */
+  public int divide(int x, int y) {
+    assert (x >= 0 && x < getFieldSize() && y > 0 && y < getFieldSize());
+    return divTable[x][y];
+  }
+
+  /**
+   * Compute power n of a field.
+   *
+   * @param x input field
+   * @param n power
+   * @return x^n
+   */
+  public int power(int x, int n) {
+    assert (x >= 0 && x < getFieldSize());
+    if (n == 0) {
+      return 1;
+    }
+    if (x == 0) {
+      return 0;
+    }
+    x = logTable[x] * n;
+    if (x < primitivePeriod) {
+      return powTable[x];
+    }
+    x = x % primitivePeriod;
+    return powTable[x];
+  }
+
+  /**
+   * Given a Vandermonde matrix V[i][j]=x[j]^i and vector y, solve for z such
+   * that Vz=y. The output z will be placed in y.
+   *
+   * @param x the vector which describe the Vandermonde matrix
+   * @param y right-hand side of the Vandermonde system equation. will be
+   *          replaced the output in this vector
+   */
+  public void solveVandermondeSystem(int[] x, int[] y) {
+    solveVandermondeSystem(x, y, x.length);
+  }
+
+  /**
+   * Given a Vandermonde matrix V[i][j]=x[j]^i and vector y, solve for z such
+   * that Vz=y. The output z will be placed in y.
+   *
+   * @param x   the vector which describe the Vandermonde matrix
+   * @param y   right-hand side of the Vandermonde system equation. will be
+   *            replaced the output in this vector
+   * @param len consider x and y only from 0...len-1
+   */
+  public void solveVandermondeSystem(int[] x, int[] y, int len) {
+    assert (x.length <= len && y.length <= len);
+    for (int i = 0; i < len - 1; i++) {
+      for (int j = len - 1; j > i; j--) {
+        y[j] = y[j] ^ mulTable[x[i]][y[j - 1]];
+      }
+    }
+    for (int i = len - 1; i >= 0; i--) {
+      for (int j = i + 1; j < len; j++) {
+        y[j] = divTable[y[j]][x[j] ^ x[j - i - 1]];
+      }
+      for (int j = i; j < len - 1; j++) {
+        y[j] = y[j] ^ y[j + 1];
+      }
+    }
+  }
+
+  /**
+   * A "bulk" version to the solving of Vandermonde System.
+   */
+  public void solveVandermondeSystem(int[] x, byte[][] y, int[] outputOffsets,
+                                     int len, int dataLen) {
+    int idx1, idx2;
+    for (int i = 0; i < len - 1; i++) {
+      for (int j = len - 1; j > i; j--) {
+        for (idx2 = outputOffsets[j-1], idx1 = outputOffsets[j];
+             idx1 < outputOffsets[j] + dataLen; idx1++, idx2++) {
+          y[j][idx1] = (byte) (y[j][idx1] ^ mulTable[x[i]][y[j - 1][idx2] &
+              0x000000FF]);
+        }
+      }
+    }
+    for (int i = len - 1; i >= 0; i--) {
+      for (int j = i + 1; j < len; j++) {
+        for (idx1 = outputOffsets[j];
+             idx1 < outputOffsets[j] + dataLen; idx1++) {
+          y[j][idx1] = (byte) (divTable[y[j][idx1] & 0x000000FF][x[j] ^
+              x[j - i - 1]]);
+        }
+      }
+      for (int j = i; j < len - 1; j++) {
+        for (idx2 = outputOffsets[j+1], idx1 = outputOffsets[j];
+             idx1 < outputOffsets[j] + dataLen; idx1++, idx2++) {
+          y[j][idx1] = (byte) (y[j][idx1] ^ y[j + 1][idx2]);
+        }
+      }
+    }
+  }
+
+  /**
+   * A "bulk" version of the solveVandermondeSystem, using ByteBuffer.
+   */
+  public void solveVandermondeSystem(int[] x, ByteBuffer[] y, int len) {
+    ByteBuffer p;
+    int idx1, idx2;
+    for (int i = 0; i < len - 1; i++) {
+      for (int j = len - 1; j > i; j--) {
+        p = y[j];
+        for (idx1 = p.position(), idx2 = y[j-1].position();
+             idx1 < p.limit(); idx1++, idx2++) {
+          p.put(idx1, (byte) (p.get(idx1) ^ mulTable[x[i]][y[j-1].get(idx2) &
+              0x000000FF]));
+        }
+      }
+    }
+
+    for (int i = len - 1; i >= 0; i--) {
+      for (int j = i + 1; j < len; j++) {
+        p = y[j];
+        for (idx1 = p.position(); idx1 < p.limit(); idx1++) {
+          p.put(idx1, (byte) (divTable[p.get(idx1) &
+              0x000000FF][x[j] ^ x[j - i - 1]]));
+        }
+      }
+
+      for (int j = i; j < len - 1; j++) {
+        p = y[j];
+        for (idx1 = p.position(), idx2 = y[j+1].position();
+             idx1 < p.limit(); idx1++, idx2++) {
+          p.put(idx1, (byte) (p.get(idx1) ^ y[j+1].get(idx2)));
+        }
+      }
+    }
+  }
+
+  /**
+   * Compute the multiplication of two polynomials. The index in the array
+   * corresponds to the power of the entry. For example p[0] is the constant
+   * term of the polynomial p.
+   *
+   * @param p input polynomial
+   * @param q input polynomial
+   * @return polynomial represents p*q
+   */
+  public int[] multiply(int[] p, int[] q) {
+    int len = p.length + q.length - 1;
+    int[] result = new int[len];
+    for (int i = 0; i < len; i++) {
+      result[i] = 0;
+    }
+    for (int i = 0; i < p.length; i++) {
+
+      for (int j = 0; j < q.length; j++) {
+        result[i + j] = add(result[i + j], multiply(p[i], q[j]));
+      }
+    }
+    return result;
+  }
+
+  /**
+   * Compute the remainder of a dividend and divisor pair. The index in the
+   * array corresponds to the power of the entry. For example p[0] is the
+   * constant term of the polynomial p.
+   *
+   * @param dividend dividend polynomial, the remainder will be placed
+   *                 here when return
+   * @param divisor  divisor polynomial
+   */
+  public void remainder(int[] dividend, int[] divisor) {
+    for (int i = dividend.length - divisor.length; i >= 0; i--) {
+      int ratio = divTable[dividend[i +
+          divisor.length - 1]][divisor[divisor.length - 1]];
+      for (int j = 0; j < divisor.length; j++) {
+        int k = j + i;
+        dividend[k] = dividend[k] ^ mulTable[ratio][divisor[j]];
+      }
+    }
+  }
+
+  /**
+   * Compute the sum of two polynomials. The index in the array corresponds to
+   * the power of the entry. For example p[0] is the constant term of the
+   * polynomial p.
+   *
+   * @param p input polynomial
+   * @param q input polynomial
+   * @return polynomial represents p+q
+   */
+  public int[] add(int[] p, int[] q) {
+    int len = Math.max(p.length, q.length);
+    int[] result = new int[len];
+    for (int i = 0; i < len; i++) {
+      if (i < p.length && i < q.length) {
+        result[i] = add(p[i], q[i]);
+      } else if (i < p.length) {
+        result[i] = p[i];
+      } else {
+        result[i] = q[i];
+      }
+    }
+    return result;
+  }
+
+  /**
+   * Substitute x into polynomial p(x).
+   *
+   * @param p input polynomial
+   * @param x input field
+   * @return p(x)
+   */
+  public int substitute(int[] p, int x) {
+    int result = 0;
+    int y = 1;
+    for (int i = 0; i < p.length; i++) {
+      result = result ^ mulTable[p[i]][y];
+      y = mulTable[x][y];
+    }
+    return result;
+  }
+
+  /**
+   * A "bulk" version of the substitute.
+   * Tends to be 2X faster than the "int" substitute in a loop.
+   *
+   * @param p input polynomial
+   * @param q store the return result
+   * @param x input field
+   */
+  public void substitute(byte[][] p, byte[] q, int x) {
+    int y = 1;
+    for (int i = 0; i < p.length; i++) {
+      byte[] pi = p[i];
+      for (int j = 0; j < pi.length; j++) {
+        int pij = pi[j] & 0x000000FF;
+        q[j] = (byte) (q[j] ^ mulTable[pij][y]);
+      }
+      y = mulTable[x][y];
+    }
+  }
+
+  /**
+   * A "bulk" version of the substitute.
+   * Tends to be 2X faster than the "int" substitute in a loop.
+   *
+   * @param p input polynomial
+   * @param offsets
+   * @param len
+   * @param q store the return result
+   * @param offset
+   * @param x input field
+   */
+  public void substitute(byte[][] p, int[] offsets,
+                         int len, byte[] q, int offset, int x) {
+    int y = 1, iIdx, oIdx;
+    for (int i = 0; i < p.length; i++) {
+      byte[] pi = p[i];
+      for (iIdx = offsets[i], oIdx = offset;
+           iIdx < offsets[i] + len; iIdx++, oIdx++) {
+        int pij = pi != null ? pi[iIdx] & 0x000000FF : 0;
+        q[oIdx] = (byte) (q[oIdx] ^ mulTable[pij][y]);
+      }
+      y = mulTable[x][y];
+    }
+  }
+
+  /**
+   * A "bulk" version of the substitute, using ByteBuffer.
+   * Tends to be 2X faster than the "int" substitute in a loop.
+   *
+   * @param p input polynomial
+   * @param q store the return result
+   * @param x input field
+   */
+  public void substitute(ByteBuffer[] p, int len, ByteBuffer q, int x) {
+    int y = 1, iIdx, oIdx;
+    for (int i = 0; i < p.length; i++) {
+      ByteBuffer pi = p[i];
+      int pos = pi != null ? pi.position() : 0;
+      int limit = pi != null ? pi.limit() : len;
+      for (oIdx = q.position(), iIdx = pos;
+           iIdx < limit; iIdx++, oIdx++) {
+        int pij = pi != null ? pi.get(iIdx) & 0x000000FF : 0;
+        q.put(oIdx, (byte) (q.get(oIdx) ^ mulTable[pij][y]));
+      }
+      y = mulTable[x][y];
+    }
+  }
+
+  /**
+   * The "bulk" version of the remainder.
+   * Warning: This function will modify the "dividend" inputs.
+   */
+  public void remainder(byte[][] dividend, int[] divisor) {
+    for (int i = dividend.length - divisor.length; i >= 0; i--) {
+      for (int j = 0; j < divisor.length; j++) {
+        for (int k = 0; k < dividend[i].length; k++) {
+          int ratio = divTable[dividend[i + divisor.length - 1][k] &
+              0x00FF][divisor[divisor.length - 1]];
+          dividend[j + i][k] = (byte) ((dividend[j + i][k] & 0x00FF) ^
+              mulTable[ratio][divisor[j]]);
+        }
+      }
+    }
+  }
+
+  /**
+   * The "bulk" version of the remainder.
+   * Warning: This function will modify the "dividend" inputs.
+   */
+  public void remainder(byte[][] dividend, int[] offsets,
+                        int len, int[] divisor) {
+    int idx1, idx2;
+    for (int i = dividend.length - divisor.length; i >= 0; i--) {
+      for (int j = 0; j < divisor.length; j++) {
+        for (idx2 = offsets[j + i], idx1 = offsets[i + divisor.length - 1];
+             idx1 < offsets[i + divisor.length - 1] + len;
+             idx1++, idx2++) {
+          int ratio = divTable[dividend[i + divisor.length - 1][idx1] &
+              0x00FF][divisor[divisor.length - 1]];
+          dividend[j + i][idx2] = (byte) ((dividend[j + i][idx2] & 0x00FF) ^
+              mulTable[ratio][divisor[j]]);
+        }
+      }
+    }
+  }
+
+  /**
+   * The "bulk" version of the remainder, using ByteBuffer.
+   * Warning: This function will modify the "dividend" inputs.
+   */
+  public void remainder(ByteBuffer[] dividend, int[] divisor) {
+    int idx1, idx2;
+    ByteBuffer b1, b2;
+    for (int i = dividend.length - divisor.length; i >= 0; i--) {
+      for (int j = 0; j < divisor.length; j++) {
+        b1 = dividend[i + divisor.length - 1];
+        b2 = dividend[j + i];
+        for (idx1 = b1.position(), idx2 = b2.position();
+             idx1 < b1.limit(); idx1++, idx2++) {
+          int ratio = divTable[b1.get(idx1) &
+              0x00FF][divisor[divisor.length - 1]];
+          b2.put(idx2, (byte) ((b2.get(idx2) & 0x00FF) ^
+              mulTable[ratio][divisor[j]]));
+        }
+      }
+    }
+  }
+
+  /**
+   * Perform Gaussian elimination on the given matrix. This matrix has to be a
+   * fat matrix (number of rows &gt; number of columns).
+   */
+  public void gaussianElimination(int[][] matrix) {
+    assert(matrix != null && matrix.length > 0 && matrix[0].length > 0
+        && matrix.length < matrix[0].length);
+    int height = matrix.length;
+    int width = matrix[0].length;
+    for (int i = 0; i < height; i++) {
+      boolean pivotFound = false;
+      // scan the column for a nonzero pivot and swap it to the diagonal
+      for (int j = i; j < height; j++) {
+        if (matrix[i][j] != 0) {
+          int[] tmp = matrix[i];
+          matrix[i] = matrix[j];
+          matrix[j] = tmp;
+          pivotFound = true;
+          break;
+        }
+      }
+      if (!pivotFound) {
+        continue;
+      }
+      int pivot = matrix[i][i];
+      for (int j = i; j < width; j++) {
+        matrix[i][j] = divide(matrix[i][j], pivot);
+      }
+      for (int j = i + 1; j < height; j++) {
+        int lead = matrix[j][i];
+        for (int k = i; k < width; k++) {
+          matrix[j][k] = add(matrix[j][k], multiply(lead, matrix[i][k]));
+        }
+      }
+    }
+    for (int i = height - 1; i >=0; i--) {
+      for (int j = 0; j < i; j++) {
+        int lead = matrix[j][i];
+        for (int k = i; k < width; k++) {
+          matrix[j][k] = add(matrix[j][k], multiply(lead, matrix[i][k]));
+        }
+      }
+    }
+  }
+
+}
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/RSUtil.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/RSUtil.java
new file mode 100644
index 0000000..6d23c1f
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/RSUtil.java
@@ -0,0 +1,187 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.erasurecode.rawcoder.util;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+import java.nio.ByteBuffer;
+
+/**
+ * Utilities for implementing Reed-Solomon code, used by RS coder. Some of the
+ * codes are borrowed from ISA-L implementation (C or ASM codes).
+ */
+@InterfaceAudience.Private
+public final class RSUtil {
+
+  private RSUtil(){}
+
+  // We always use the byte system (with symbol size 8, field size 256,
+  // primitive polynomial 285, and primitive root 2).
+  public static final GaloisField GF = GaloisField.getInstance();
+  public static final int PRIMITIVE_ROOT = 2;
+
+  public static int[] getPrimitivePower(int numDataUnits, int numParityUnits) {
+    int[] primitivePower = new int[numDataUnits + numParityUnits];
+    // compute powers of the primitive root
+    for (int i = 0; i < numDataUnits + numParityUnits; i++) {
+      primitivePower[i] = GF.power(PRIMITIVE_ROOT, i);
+    }
+    return primitivePower;
+  }
+
+  public static void initTables(int k, int rows, byte[] codingMatrix,
+      int matrixOffset, byte[] gfTables) {
+    int i, j;
+
+    int offset = 0, idx = matrixOffset;
+    for (i = 0; i < rows; i++) {
+      for (j = 0; j < k; j++) {
+        GF256.gfVectMulInit(codingMatrix[idx++], gfTables, offset);
+        offset += 32;
+      }
+    }
+  }
+
+  /**
+   * Ported from Intel ISA-L library.
+   */
+  public static void genCauchyMatrix(byte[] a, int m, int k) {
+    // Identity matrix in high position
+    for (int i = 0; i < k; i++) {
+      a[k * i + i] = 1;
+    }
+
+    // For the rest choose 1/(i + j) | i != j
+    int pos = k * k;
+    for (int i = k; i < m; i++) {
+      for (int j = 0; j < k; j++) {
+        a[pos++] = GF256.gfInv((byte) (i ^ j));
+      }
+    }
+  }
+
+  /**
+   * Encode a group of inputs data and generate the outputs. It's also used for
+   * decoding because, in this implementation, encoding and decoding are
+   * unified.
+   *
+   * The algorithm is ported from Intel ISA-L library for compatible. It
+   * leverages Java auto-vectorization support for performance.
+   */
+  public static void encodeData(byte[] gfTables, int dataLen, byte[][] inputs,
+      int[] inputOffsets, byte[][] outputs,
+      int[] outputOffsets) {
+    int numInputs = inputs.length;
+    int numOutputs = outputs.length;
+    int l, i, j, iPos, oPos;
+    byte[] input, output;
+    byte s;
+    final int times = dataLen / 8;
+    final int extra = dataLen - dataLen % 8;
+    byte[] tableLine;
+
+    for (l = 0; l < numOutputs; l++) {
+      output = outputs[l];
+
+      for (j = 0; j < numInputs; j++) {
+        input = inputs[j];
+        iPos = inputOffsets[j];
+        oPos = outputOffsets[l];
+
+        s = gfTables[j * 32 + l * numInputs * 32 + 1];
+        tableLine = GF256.gfMulTab()[s & 0xff];
+
+        /**
+         * Purely for performance, assuming we can use 8 bytes in the SIMD
+         * instruction. Subject to be improved.
+         */
+        for (i = 0; i < times; i++, iPos += 8, oPos += 8) {
+          output[oPos + 0] ^= tableLine[0xff & input[iPos + 0]];
+          output[oPos + 1] ^= tableLine[0xff & input[iPos + 1]];
+          output[oPos + 2] ^= tableLine[0xff & input[iPos + 2]];
+          output[oPos + 3] ^= tableLine[0xff & input[iPos + 3]];
+          output[oPos + 4] ^= tableLine[0xff & input[iPos + 4]];
+          output[oPos + 5] ^= tableLine[0xff & input[iPos + 5]];
+          output[oPos + 6] ^= tableLine[0xff & input[iPos + 6]];
+          output[oPos + 7] ^= tableLine[0xff & input[iPos + 7]];
+        }
+
+        /**
+         * For the left bytes, do it one by one.
+         */
+        for (i = extra; i < dataLen; i++, iPos++, oPos++) {
+          output[oPos] ^= tableLine[0xff & input[iPos]];
+        }
+      }
+    }
+  }
+
+  /**
+   * See above. Try to use the byte[] version when possible.
+   */
+  public static void encodeData(byte[] gfTables, ByteBuffer[] inputs,
+      ByteBuffer[] outputs) {
+    int numInputs = inputs.length;
+    int numOutputs = outputs.length;
+    int dataLen = inputs[0].remaining();
+    int l, i, j, iPos, oPos;
+    ByteBuffer input, output;
+    byte s;
+    final int times = dataLen / 8;
+    final int extra = dataLen - dataLen % 8;
+    byte[] tableLine;
+
+    for (l = 0; l < numOutputs; l++) {
+      output = outputs[l];
+
+      for (j = 0; j < numInputs; j++) {
+        input = inputs[j];
+        iPos = input.position();
+        oPos = output.position();
+
+        s = gfTables[j * 32 + l * numInputs * 32 + 1];
+        tableLine = GF256.gfMulTab()[s & 0xff];
+
+        for (i = 0; i < times; i++, iPos += 8, oPos += 8) {
+          output.put(oPos + 0, (byte) (output.get(oPos + 0) ^
+              tableLine[0xff & input.get(iPos + 0)]));
+          output.put(oPos + 1, (byte) (output.get(oPos + 1) ^
+              tableLine[0xff & input.get(iPos + 1)]));
+          output.put(oPos + 2, (byte) (output.get(oPos + 2) ^
+              tableLine[0xff & input.get(iPos + 2)]));
+          output.put(oPos + 3, (byte) (output.get(oPos + 3) ^
+              tableLine[0xff & input.get(iPos + 3)]));
+          output.put(oPos + 4, (byte) (output.get(oPos + 4) ^
+              tableLine[0xff & input.get(iPos + 4)]));
+          output.put(oPos + 5, (byte) (output.get(oPos + 5) ^
+              tableLine[0xff & input.get(iPos + 5)]));
+          output.put(oPos + 6, (byte) (output.get(oPos + 6) ^
+              tableLine[0xff & input.get(iPos + 6)]));
+          output.put(oPos + 7, (byte) (output.get(oPos + 7) ^
+              tableLine[0xff & input.get(iPos + 7)]));
+        }
+
+        for (i = extra; i < dataLen; i++, iPos++, oPos++) {
+          output.put(oPos, (byte) (output.get(oPos) ^
+              tableLine[0xff & input.get(iPos)]));
+        }
+      }
+    }
+  }
+
+}
diff --git a/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/package-info.java b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/package-info.java
new file mode 100644
index 0000000..5e6fac4
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/main/java/org/apache/ozone/erasurecode/rawcoder/util/package-info.java
@@ -0,0 +1,27 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * General helpers for implementing raw erasure coders.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.ozone.erasurecode.rawcoder.util;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file
diff --git a/hadoop-hdds/erasurecode/src/main/resources/META-INF/services/org.apache.ozone.erasurecode.rawcoder.RawErasureCoderFactory b/hadoop-hdds/erasurecode/src/main/resources/META-INF/services/org.apache.ozone.erasurecode.rawcoder.RawErasureCoderFactory
new file mode 100644
index 0000000..c27b0fa
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/main/resources/META-INF/services/org.apache.ozone.erasurecode.rawcoder.RawErasureCoderFactory
@@ -0,0 +1,15 @@
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+org.apache.ozone.erasurecode.rawcoder.RSRawErasureCoderFactory
+org.apache.ozone.erasurecode.rawcoder.XORRawErasureCoderFactory
diff --git a/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/DumpUtil.java b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/DumpUtil.java
new file mode 100644
index 0000000..fbbf4f6
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/DumpUtil.java
@@ -0,0 +1,96 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.erasurecode;
+
+
+/**
+ * A dump utility class for debugging data erasure coding/decoding issues.
+ * Don't suggest they are used in runtime production codes.
+ */
+public final class DumpUtil {
+  private static final String HEX_CHARS_STR = "0123456789ABCDEF";
+  private static final char[] HEX_CHARS = HEX_CHARS_STR.toCharArray();
+
+  private DumpUtil() {
+    // No called
+  }
+
+  /**
+   * Convert bytes into format like 0x02 02 00 80.
+   * If limit is negative or too large, then all bytes will be converted.
+   */
+  public static String bytesToHex(byte[] bytes, int limit) {
+    if (limit <= 0 || limit > bytes.length) {
+      limit = bytes.length;
+    }
+    int len = limit * 2;
+    len += limit; // for ' ' appended for each char
+    len += 2; // for '0x' prefix
+    char[] hexChars = new char[len];
+    hexChars[0] = '0';
+    hexChars[1] = 'x';
+    for (int j = 0; j < limit; j++) {
+      int v = bytes[j] & 0xFF;
+      hexChars[j * 3 + 2] = HEX_CHARS[v >>> 4];
+      hexChars[j * 3 + 3] = HEX_CHARS[v & 0x0F];
+      hexChars[j * 3 + 4] = ' ';
+    }
+
+    return new String(hexChars);
+  }
+
+  public static void dumpMatrix(byte[] matrix,
+                                int numDataUnits, int numAllUnits) {
+    for (int i = 0; i < numDataUnits; i++) {
+      for (int j = 0; j < numAllUnits; j++) {
+        System.out.print(" ");
+        System.out.print(0xff & matrix[i * numAllUnits + j]);
+      }
+      System.out.println();
+    }
+  }
+
+  /**
+   * Print data in hex format in an array of chunks.
+   * @param header
+   * @param chunks
+   */
+  public static void dumpChunks(String header, ECChunk[] chunks) {
+    System.out.println();
+    System.out.println(header);
+    for (int i = 0; i < chunks.length; i++) {
+      dumpChunk(chunks[i]);
+    }
+    System.out.println();
+  }
+
+  /**
+   * Print data in hex format in a chunk.
+   * @param chunk
+   */
+  public static void dumpChunk(ECChunk chunk) {
+    String str;
+    if (chunk == null) {
+      str = "<EMPTY>";
+    } else {
+      byte[] bytes = chunk.toBytesArray();
+      str = DumpUtil.bytesToHex(bytes, 16);
+    }
+    System.out.println(str);
+  }
+}
diff --git a/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/TestCodecRegistry.java b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/TestCodecRegistry.java
new file mode 100644
index 0000000..8ac3ac3
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/TestCodecRegistry.java
@@ -0,0 +1,114 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.erasurecode;
+
+import org.apache.hadoop.hdds.client.ECReplicationConfig;
+import org.apache.ozone.erasurecode.rawcoder.RSRawErasureCoderFactory;
+import org.apache.ozone.erasurecode.rawcoder.RawErasureCoderFactory;
+import org.apache.ozone.erasurecode.rawcoder.RawErasureDecoder;
+import org.apache.ozone.erasurecode.rawcoder.RawErasureEncoder;
+import org.apache.ozone.erasurecode.rawcoder.XORRawErasureCoderFactory;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Test CodecRegistry.
+ */
+public class TestCodecRegistry {
+  @Test
+  public void testGetCodecs() {
+    Set<String> codecs = CodecRegistry.getInstance().getCodecNames();
+    assertEquals(2, codecs.size());
+    assertTrue(
+        codecs.contains(ECReplicationConfig.EcCodec.RS.name().toLowerCase()));
+    assertTrue(
+        codecs.contains(ECReplicationConfig.EcCodec.XOR.name().toLowerCase()));
+  }
+
+  @Test
+  public void testGetCoders() {
+    List<RawErasureCoderFactory> coders = CodecRegistry.getInstance().
+        getCoders(ECReplicationConfig.EcCodec.RS.name().toLowerCase());
+    assertEquals(1, coders.size());
+    assertTrue(coders.get(0) instanceof RSRawErasureCoderFactory);
+
+    coders = CodecRegistry.getInstance().
+        getCoders(ECReplicationConfig.EcCodec.XOR.name().toLowerCase());
+    assertEquals(1, coders.size());
+    assertTrue(coders.get(0) instanceof XORRawErasureCoderFactory);
+  }
+
+  @Test
+  public void testGetCodersWrong() {
+    List<RawErasureCoderFactory> coders =
+        CodecRegistry.getInstance().getCoders("WRONG_CODEC");
+    assertNull(coders);
+  }
+
+  @Test
+  public void testGetCoderByNameWrong() {
+    RawErasureCoderFactory coder = CodecRegistry.getInstance().
+        getCoderByName(ECReplicationConfig.EcCodec.RS.name().toLowerCase(),
+            "WRONG_RS");
+    assertNull(coder);
+  }
+
+  @Test
+  public void testUpdateCoders() {
+    class RSUserDefinedIncorrectFactory implements RawErasureCoderFactory {
+      public RawErasureEncoder createEncoder(ECReplicationConfig coderOptions) {
+        return null;
+      }
+
+      public RawErasureDecoder createDecoder(ECReplicationConfig coderOptions) {
+        return null;
+      }
+
+      public String getCoderName() {
+        return "rs_java";
+      }
+
+      public String getCodecName() {
+        return ECReplicationConfig.EcCodec.RS.name().toLowerCase();
+      }
+    }
+
+    List<RawErasureCoderFactory> userDefinedFactories = new ArrayList<>();
+    userDefinedFactories.add(new RSUserDefinedIncorrectFactory());
+    CodecRegistry.getInstance().updateCoders(userDefinedFactories);
+
+    // check RS coders
+    List<RawErasureCoderFactory> rsCoders = CodecRegistry.getInstance().
+        getCoders(ECReplicationConfig.EcCodec.RS.name().toLowerCase());
+    assertEquals(1, rsCoders.size());
+    assertTrue(rsCoders.get(0) instanceof RSRawErasureCoderFactory);
+
+    // check RS coder names
+    String[] rsCoderNames = CodecRegistry.getInstance().
+        getCoderNames(ECReplicationConfig.EcCodec.RS.name().toLowerCase());
+    assertEquals(1, rsCoderNames.length);
+    assertEquals(RSRawErasureCoderFactory.CODER_NAME, rsCoderNames[0]);
+  }
+}
diff --git a/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/TestCoderBase.java b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/TestCoderBase.java
new file mode 100644
index 0000000..e4b02cd
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/TestCoderBase.java
@@ -0,0 +1,524 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.erasurecode;
+
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.Random;
+
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Test base of common utilities for tests not only raw coders but also block
+ * coders.
+ */
+@SuppressWarnings({"checkstyle:VisibilityModifier", "checkstyle:HiddenField"})
+public abstract class TestCoderBase {
+  protected static final Random RAND = new Random();
+  private static int fixedDataGenerator = 0;
+  protected boolean allowDump = true;
+  protected int numDataUnits;
+  protected int numParityUnits;
+  protected int baseChunkSize = 1024;
+  // Indexes of erased data units.
+  protected int[] erasedDataIndexes = new int[]{0};
+  // Indexes of erased parity units.
+  protected int[] erasedParityIndexes = new int[]{0};
+  // Data buffers are either direct or on-heap, for performance the two cases
+  // may go to different coding implementations.
+  protected boolean usingDirectBuffer = true;
+  protected boolean usingFixedData = true;
+  protected byte[][] fixedData;
+  private int chunkSize = baseChunkSize;
+  private BufferAllocator allocator;
+  private byte[] zeroChunkBytes;
+  private boolean startBufferWithZero = true;
+  // Using this the generated data can be repeatable across multiple calls to
+  // encode(), in order for troubleshooting.
+  private ConfigurationSource conf;
+
+  protected int getChunkSize() {
+    return chunkSize;
+  }
+
+  protected void setChunkSize(int chunkSize) {
+    this.chunkSize = chunkSize;
+    this.zeroChunkBytes = new byte[chunkSize]; // With ZERO by default
+  }
+
+  protected byte[] getZeroChunkBytes() {
+    return zeroChunkBytes;
+  }
+
+  protected void prepareBufferAllocator(boolean usingSlicedBuffer) {
+    if (usingSlicedBuffer) {
+      int roughEstimationSpace =
+          chunkSize * (numDataUnits + numParityUnits) * 10;
+      allocator = new BufferAllocator.SlicedBufferAllocator(usingDirectBuffer,
+          roughEstimationSpace);
+    } else {
+      allocator = new BufferAllocator.SimpleBufferAllocator(usingDirectBuffer);
+    }
+  }
+
+  protected boolean isAllowDump() {
+    return allowDump;
+  }
+
+  /**
+   * Prepare before running the case.
+   *
+   * @param usingFixedData Using fixed or pre-generated data to test instead of
+   *                       generating data
+   */
+  protected void prepare(ConfigurationSource conf,
+      int numDataUnits,
+      int numParityUnits,
+      int[] erasedDataIndexes,
+      int[] erasedParityIndexes,
+      boolean usingFixedData) {
+    this.conf = conf != null ? conf : new OzoneConfiguration();
+    this.numDataUnits = numDataUnits;
+    this.numParityUnits = numParityUnits;
+    this.erasedDataIndexes = erasedDataIndexes != null ?
+        erasedDataIndexes : new int[]{0};
+    this.erasedParityIndexes = erasedParityIndexes != null ?
+        erasedParityIndexes : new int[]{0};
+    this.usingFixedData = usingFixedData;
+    if (usingFixedData) {
+      prepareFixedData();
+    }
+  }
+
+  /**
+   * Prepare before running the case.
+   *
+   * @param conf
+   * @param numDataUnits
+   * @param numParityUnits
+   * @param erasedDataIndexes
+   * @param erasedParityIndexes
+   */
+  protected void prepare(ConfigurationSource conf, int numDataUnits,
+      int numParityUnits, int[] erasedDataIndexes,
+      int[] erasedParityIndexes) {
+    prepare(conf, numDataUnits, numParityUnits, erasedDataIndexes,
+        erasedParityIndexes, false);
+  }
+
+  /**
+   * Prepare before running the case.
+   *
+   * @param numDataUnits
+   * @param numParityUnits
+   * @param erasedDataIndexes
+   * @param erasedParityIndexes
+   */
+  protected void prepare(
+      int numDataUnits,
+      int numParityUnits,
+      int[] erasedDataIndexes,
+      int[] erasedParityIndexes) {
+    prepare(null, numDataUnits, numParityUnits, erasedDataIndexes,
+        erasedParityIndexes, false);
+  }
+
+  /**
+   * Get the conf the test.
+   *
+   * @return configuration
+   */
+  protected ConfigurationSource getConf() {
+    return this.conf;
+  }
+
+  /**
+   * Compare and verify if erased chunks are equal to recovered chunks.
+   *
+   * @param erasedChunks
+   * @param recoveredChunks
+   */
+  protected void compareAndVerify(ECChunk[] erasedChunks,
+      ECChunk[] recoveredChunks) {
+    byte[][] erased = toArrays(erasedChunks);
+    byte[][] recovered = toArrays(recoveredChunks);
+    boolean result = Arrays.deepEquals(erased, recovered);
+    if (!result) {
+      assertTrue("Decoding and comparing failed.", result);
+    }
+  }
+
+  /**
+   * Adjust and return erased indexes altogether, including erased data indexes
+   * and parity indexes.
+   * @return erased indexes altogether
+   */
+  protected int[] getErasedIndexesForDecoding() {
+    int[] erasedIndexesForDecoding =
+        new int[erasedDataIndexes.length + erasedParityIndexes.length];
+
+    int idx = 0;
+
+    for (int i = 0; i < erasedDataIndexes.length; i++) {
+      erasedIndexesForDecoding[idx++] = erasedDataIndexes[i];
+    }
+
+    for (int i = 0; i < erasedParityIndexes.length; i++) {
+      erasedIndexesForDecoding[idx++] = erasedParityIndexes[i] + numDataUnits;
+    }
+
+    return erasedIndexesForDecoding;
+  }
+
+  /**
+   * Return input chunks for decoding, which is dataChunks + parityChunks.
+   *
+   * @param dataChunks
+   * @param parityChunks
+   * @return
+   */
+  protected ECChunk[] prepareInputChunksForDecoding(ECChunk[] dataChunks,
+      ECChunk[] parityChunks) {
+    ECChunk[] inputChunks = new ECChunk[numDataUnits + numParityUnits];
+
+    int idx = 0;
+
+    for (int i = 0; i < numDataUnits; i++) {
+      inputChunks[idx++] = dataChunks[i];
+    }
+
+    for (int i = 0; i < numParityUnits; i++) {
+      inputChunks[idx++] = parityChunks[i];
+    }
+
+    return inputChunks;
+  }
+
+  /**
+   * Erase some data chunks to test the recovering of them. As they're erased,
+   * we don't need to read them and will not have the buffers at all, so just
+   * set them as null.
+   *
+   * @param dataChunks
+   * @param parityChunks
+   * @return clone of erased chunks
+   */
+  protected ECChunk[] backupAndEraseChunks(ECChunk[] dataChunks,
+      ECChunk[] parityChunks) {
+    ECChunk[] toEraseChunks = new ECChunk[erasedDataIndexes.length +
+        erasedParityIndexes.length];
+
+    int idx = 0;
+
+    for (int i = 0; i < erasedDataIndexes.length; i++) {
+      toEraseChunks[idx++] = dataChunks[erasedDataIndexes[i]];
+      dataChunks[erasedDataIndexes[i]] = null;
+    }
+
+    for (int i = 0; i < erasedParityIndexes.length; i++) {
+      toEraseChunks[idx++] = parityChunks[erasedParityIndexes[i]];
+      parityChunks[erasedParityIndexes[i]] = null;
+    }
+
+    return toEraseChunks;
+  }
+
+  /**
+   * Erase data from the specified chunks, just setting them as null.
+   * @param chunks
+   */
+  protected void eraseDataFromChunks(ECChunk[] chunks) {
+    for (int i = 0; i < chunks.length; i++) {
+      chunks[i] = null;
+    }
+  }
+
+  protected void markChunks(ECChunk[] chunks) {
+    for (int i = 0; i < chunks.length; i++) {
+      if (chunks[i] != null) {
+        chunks[i].getBuffer().mark();
+      }
+    }
+  }
+
+  protected void restoreChunksFromMark(ECChunk[] chunks) {
+    for (int i = 0; i < chunks.length; i++) {
+      if (chunks[i] != null) {
+        chunks[i].getBuffer().reset();
+      }
+    }
+  }
+
+  /**
+   * Clone chunks along with copying the associated data. It respects how the
+   * chunk buffer is allocated, direct or non-direct. It avoids affecting the
+   * original chunk buffers.
+   * @param chunks
+   * @return
+   */
+  protected ECChunk[] cloneChunksWithData(ECChunk[] chunks) {
+    ECChunk[] results = new ECChunk[chunks.length];
+    for (int i = 0; i < chunks.length; i++) {
+      results[i] = cloneChunkWithData(chunks[i]);
+    }
+
+    return results;
+  }
+
+  /**
+   * Clone chunk along with copying the associated data. It respects how the
+   * chunk buffer is allocated, direct or non-direct. It avoids affecting the
+   * original chunk.
+   * @param chunk
+   * @return a new chunk
+   */
+  protected ECChunk cloneChunkWithData(ECChunk chunk) {
+    if (chunk == null) {
+      return null;
+    }
+
+    ByteBuffer srcBuffer = chunk.getBuffer();
+
+    byte[] bytesArr = new byte[srcBuffer.remaining()];
+    srcBuffer.mark();
+    srcBuffer.get(bytesArr, 0, bytesArr.length);
+    srcBuffer.reset();
+
+    ByteBuffer destBuffer = allocateOutputBuffer(bytesArr.length);
+    int pos = destBuffer.position();
+    destBuffer.put(bytesArr);
+    destBuffer.flip();
+    destBuffer.position(pos);
+
+    return new ECChunk(destBuffer);
+  }
+
+  /**
+   * Allocate a chunk for output or writing.
+   * @return
+   */
+  protected ECChunk allocateOutputChunk() {
+    ByteBuffer buffer = allocateOutputBuffer(chunkSize);
+
+    return new ECChunk(buffer);
+  }
+
+  /**
+   * Allocate a buffer for output or writing. It can prepare for two kinds of
+   * data buffers: one with position as 0, the other with position > 0
+   * @return a buffer ready to write chunkSize bytes from current position
+   */
+  protected ByteBuffer allocateOutputBuffer(int bufferLen) {
+    /**
+     * When startBufferWithZero, will prepare a buffer as:---------------
+     * otherwise, the buffer will be like:             ___TO--BE--WRITTEN___,
+     * and in the beginning, dummy data are prefixed, to simulate a buffer of
+     * position > 0.
+     */
+    int startOffset = startBufferWithZero ? 0 : 11; // 11 is arbitrary
+    int allocLen = startOffset + bufferLen + startOffset;
+    ByteBuffer buffer = allocator.allocate(allocLen);
+    buffer.limit(startOffset + bufferLen);
+    fillDummyData(buffer, startOffset);
+    startBufferWithZero = !startBufferWithZero;
+
+    return buffer;
+  }
+
+  /**
+   * Prepare data chunks for each data unit, by generating random data.
+   * @return
+   */
+  protected ECChunk[] prepareDataChunksForEncoding() {
+    if (usingFixedData) {
+      ECChunk[] chunks = new ECChunk[numDataUnits];
+      for (int i = 0; i < chunks.length; i++) {
+        chunks[i] = makeChunkUsingData(fixedData[i]);
+      }
+      return chunks;
+    }
+
+    return generateDataChunks();
+  }
+
+  private ECChunk makeChunkUsingData(byte[] data) {
+    ECChunk chunk = allocateOutputChunk();
+    ByteBuffer buffer = chunk.getBuffer();
+    int pos = buffer.position();
+    buffer.put(data, 0, chunkSize);
+    buffer.flip();
+    buffer.position(pos);
+
+    return chunk;
+  }
+
+  private ECChunk[] generateDataChunks() {
+    ECChunk[] chunks = new ECChunk[numDataUnits];
+    for (int i = 0; i < chunks.length; i++) {
+      chunks[i] = generateDataChunk();
+    }
+
+    return chunks;
+  }
+
+  private void prepareFixedData() {
+    // We may load test data from a resource, or just generate randomly.
+    // The generated data will be used across subsequent encode/decode calls.
+    this.fixedData = new byte[numDataUnits][];
+    for (int i = 0; i < numDataUnits; i++) {
+      fixedData[i] = generateFixedData(baseChunkSize * 2);
+    }
+  }
+
+  /**
+   * Generate data chunk by making random data.
+   * @return
+   */
+  protected ECChunk generateDataChunk() {
+    ByteBuffer buffer = allocateOutputBuffer(chunkSize);
+    int pos = buffer.position();
+    buffer.put(generateData(chunkSize));
+    buffer.flip();
+    buffer.position(pos);
+
+    return new ECChunk(buffer);
+  }
+
+  /**
+   * Fill len of dummy data in the buffer at the current position.
+   * @param buffer
+   * @param len
+   */
+  protected void fillDummyData(ByteBuffer buffer, int len) {
+    byte[] dummy = new byte[len];
+    RAND.nextBytes(dummy);
+    buffer.put(dummy);
+  }
+
+  protected byte[] generateData(int len) {
+    byte[] buffer = new byte[len];
+    for (int i = 0; i < buffer.length; i++) {
+      buffer[i] = (byte) RAND.nextInt(256);
+    }
+    return buffer;
+  }
+
+  protected byte[] generateFixedData(int len) {
+    byte[] buffer = new byte[len];
+    for (int i = 0; i < buffer.length; i++) {
+      buffer[i] = (byte) fixedDataGenerator++;
+      if (fixedDataGenerator == 256) {
+        fixedDataGenerator = 0;
+      }
+    }
+    return buffer;
+  }
+
+  /**
+   * Prepare parity chunks for encoding, each chunk for each parity unit.
+   * @return
+   */
+  protected ECChunk[] prepareParityChunksForEncoding() {
+    ECChunk[] chunks = new ECChunk[numParityUnits];
+    for (int i = 0; i < chunks.length; i++) {
+      chunks[i] = allocateOutputChunk();
+    }
+
+    return chunks;
+  }
+
+  /**
+   * Prepare output chunks for decoding, each output chunk for each erased
+   * chunk.
+   * @return
+   */
+  protected ECChunk[] prepareOutputChunksForDecoding() {
+    ECChunk[] chunks = new ECChunk[erasedDataIndexes.length +
+        erasedParityIndexes.length];
+
+    for (int i = 0; i < chunks.length; i++) {
+      chunks[i] = allocateOutputChunk();
+    }
+
+    return chunks;
+  }
+
+  /**
+   * Convert an array of this chunks to an array of byte array.
+   * Note the chunk buffers are not affected.
+   * @param chunks
+   * @return an array of byte array
+   */
+  protected byte[][] toArrays(ECChunk[] chunks) {
+    byte[][] bytesArr = new byte[chunks.length][];
+
+    for (int i = 0; i < chunks.length; i++) {
+      if (chunks[i] != null) {
+        bytesArr[i] = chunks[i].toBytesArray();
+      }
+    }
+
+    return bytesArr;
+  }
+
+  /**
+   * Dump all the settings used in the test case if isAllowingVerboseDump
+   * is enabled.
+   */
+  protected void dumpSetting() {
+    if (allowDump) {
+      StringBuilder sb = new StringBuilder("Erasure coder test settings:\n");
+      sb.append(" numDataUnits=").append(numDataUnits);
+      sb.append(" numParityUnits=").append(numParityUnits);
+      sb.append(" chunkSize=").append(chunkSize).append("\n");
+
+      sb.append(" erasedDataIndexes=").
+          append(Arrays.toString(erasedDataIndexes));
+      sb.append(" erasedParityIndexes=").
+          append(Arrays.toString(erasedParityIndexes));
+      sb.append(" usingDirectBuffer=").append(usingDirectBuffer);
+      sb.append(" allowVerboseDump=").append(allowDump);
+      sb.append("\n");
+
+      System.out.println(sb.toString());
+    }
+  }
+
+  /**
+   * Dump chunks prefixed with a header if isAllowingVerboseDump is enabled.
+   */
+  protected void dumpChunks(String header, ECChunk[] chunks) {
+    if (allowDump) {
+      DumpUtil.dumpChunks(header, chunks);
+    }
+  }
+
+  /**
+   * Make some chunk messy or not correct any more.
+   */
+  protected void corruptSomeChunk(ECChunk[] chunks) {
+    int idx = new Random().nextInt(chunks.length);
+    ByteBuffer buffer = chunks[idx].getBuffer();
+    if (buffer.hasRemaining()) {
+      buffer.position(buffer.position() + 1);
+    }
+  }
+}
diff --git a/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/package-info.java b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/package-info.java
new file mode 100644
index 0000000..c034c0b
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/package-info.java
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Unit tests for EC related classes.
+ */
+package org.apache.ozone.erasurecode;
+
diff --git a/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/RawErasureCoderBenchmark.java b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/RawErasureCoderBenchmark.java
new file mode 100644
index 0000000..326950d
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/RawErasureCoderBenchmark.java
@@ -0,0 +1,412 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.erasurecode.rawcoder;
+
+import com.google.common.base.Preconditions;
+
+import org.apache.hadoop.hdds.client.ECReplicationConfig;
+import org.apache.hadoop.util.StopWatch;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.text.DecimalFormat;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * A benchmark tool to test the performance of different erasure coders.
+ * The tool launches multiple threads to encode/decode certain amount of data,
+ * and measures the total throughput. It only focuses on performance and doesn't
+ * validate correctness of the encoded/decoded results.
+ * User can specify the data size each thread processes, as well as the chunk
+ * size to use for the coder.
+ * Different coders are supported. User can specify the coder by a coder index.
+ * The coder is shared among all the threads.
+ */
+public final class RawErasureCoderBenchmark {
+
+  private RawErasureCoderBenchmark() {
+    // prevent instantiation
+  }
+
+  // target size of input data buffer
+  private static final int TARGET_BUFFER_SIZE_MB = 126;
+
+  private static final int MAX_CHUNK_SIZE =
+      TARGET_BUFFER_SIZE_MB / BenchData.NUM_DATA_UNITS * 1024;
+
+  private static final List<RawErasureCoderFactory> CODER_MAKERS =
+      Collections.unmodifiableList(
+          Arrays.asList(new DummyRawErasureCoderFactory(),
+              new RSRawErasureCoderFactory()));
+
+  enum CODER {
+    DUMMY_CODER("Dummy coder"),
+    RS_CODER("Reed-Solomon Java coder");
+
+    private final String name;
+
+    CODER(String name) {
+      this.name = name;
+    }
+
+    @Override
+    public String toString() {
+      return name;
+    }
+  }
+
+  static {
+    Preconditions.checkArgument(CODER_MAKERS.size() == CODER.values().length);
+  }
+
+  private static void printAvailableCoders() {
+    StringBuilder sb = new StringBuilder(
+        "Available coders with coderIndex:\n");
+    for (CODER coder : CODER.values()) {
+      sb.append(coder.ordinal()).append(":").append(coder).append("\n");
+    }
+    System.out.println(sb);
+  }
+
+  private static void usage(String message) {
+    if (message != null) {
+      System.out.println(message);
+    }
+    System.out.println(
+        "Usage: RawErasureCoderBenchmark <encode/decode> <coderIndex> " +
+            "[numThreads] [dataSize-in-MB] [chunkSize-in-KB]");
+    printAvailableCoders();
+    System.exit(1);
+  }
+
+  public static void main(String[] args) throws Exception {
+    String opType = null;
+    int coderIndex = 0;
+    // default values
+    int dataSizeMB = 10240;
+    int chunkSizeKB = 1024;
+    int numThreads = 1;
+
+    if (args.length > 1) {
+      opType = args[0];
+      if (!"encode".equals(opType) && !"decode".equals(opType)) {
+        usage("Invalid type: should be either 'encode' or 'decode'");
+      }
+
+      try {
+        coderIndex = Integer.parseInt(args[1]);
+        if (coderIndex < 0 || coderIndex >= CODER.values().length) {
+          usage("Invalid coder index, should be [0-" +
+              (CODER.values().length - 1) + "]");
+        }
+      } catch (NumberFormatException e) {
+        usage("Malformed coder index, " + e.getMessage());
+      }
+    } else {
+      usage(null);
+    }
+
+    if (args.length > 2) {
+      try {
+        numThreads = Integer.parseInt(args[2]);
+        if (numThreads <= 0) {
+          usage("Invalid number of threads.");
+        }
+      } catch (NumberFormatException e) {
+        usage("Malformed number of threads, " + e.getMessage());
+      }
+    }
+
+    if (args.length > 3) {
+      try {
+        dataSizeMB = Integer.parseInt(args[3]);
+        if (dataSizeMB <= 0) {
+          usage("Invalid data size.");
+        }
+      } catch (NumberFormatException e) {
+        usage("Malformed data size, " + e.getMessage());
+      }
+    }
+
+    if (args.length > 4) {
+      try {
+        chunkSizeKB = Integer.parseInt(args[4]);
+        if (chunkSizeKB <= 0) {
+          usage("Chunk size should be positive.");
+        }
+        if (chunkSizeKB > MAX_CHUNK_SIZE) {
+          usage("Chunk size should be no larger than " + MAX_CHUNK_SIZE);
+        }
+      } catch (NumberFormatException e) {
+        usage("Malformed chunk size, " + e.getMessage());
+      }
+    }
+
+    performBench(opType, CODER.values()[coderIndex],
+        numThreads, dataSizeMB, chunkSizeKB);
+  }
+
+  /**
+   * Performs benchmark.
+   *
+   * @param opType      The operation to perform. Can be encode or decode
+   * @param coder       The coder to use
+   * @param numThreads  Number of threads to launch concurrently
+   * @param dataSizeMB  Total test data size in MB
+   * @param chunkSizeKB Chunk size in KB
+   */
+  public static void performBench(String opType, CODER coder,
+      int numThreads, int dataSizeMB, int chunkSizeKB) throws Exception {
+    BenchData.configure(dataSizeMB, chunkSizeKB);
+
+    RawErasureEncoder encoder = null;
+    RawErasureDecoder decoder = null;
+    ByteBuffer testData;
+    boolean isEncode = opType.equals("encode");
+
+    if (isEncode) {
+      encoder = getRawEncoder(coder.ordinal());
+      testData = genTestData(encoder.preferDirectBuffer(),
+          BenchData.bufferSizeKB);
+    } else {
+      decoder = getRawDecoder(coder.ordinal());
+      testData = genTestData(decoder.preferDirectBuffer(),
+          BenchData.bufferSizeKB);
+    }
+
+    ExecutorService executor = Executors.newFixedThreadPool(numThreads);
+    List<Future<Long>> futures = new ArrayList<>(numThreads);
+    StopWatch sw = new StopWatch().start();
+    for (int i = 0; i < numThreads; i++) {
+      futures.add(executor.submit(new BenchmarkCallable(isEncode,
+          encoder, decoder, testData.duplicate())));
+    }
+    List<Long> durations = new ArrayList<>(numThreads);
+    try {
+      for (Future<Long> future : futures) {
+        durations.add(future.get());
+      }
+      long duration = sw.now(TimeUnit.MILLISECONDS);
+      double totalDataSize = BenchData.totalDataSizeKB * numThreads / 1024.0;
+      DecimalFormat df = new DecimalFormat("#.##");
+      System.out.println(coder + " " + opType + " " +
+          df.format(totalDataSize) + "MB data, with chunk size " +
+          BenchData.chunkSize / 1024 + "KB");
+      System.out.println("Total time: " + df.format(duration / 1000.0) + " s.");
+      System.out.println("Total throughput: " + df.format(
+          totalDataSize / duration * 1000.0) + " MB/s");
+      printThreadStatistics(durations, df);
+    } catch (Exception e) {
+      System.out.println("Error waiting for thread to finish.");
+      e.printStackTrace();
+      throw e;
+    } finally {
+      executor.shutdown();
+      if (encoder != null) {
+        encoder.release();
+      }
+      if (decoder != null) {
+        decoder.release();
+      }
+    }
+  }
+
+  private static RawErasureEncoder getRawEncoder(int index) throws IOException {
+    RawErasureEncoder encoder =
+        CODER_MAKERS.get(index).createEncoder(BenchData.OPTIONS);
+    final boolean isDirect = encoder.preferDirectBuffer();
+    encoder.encode(
+        getBufferForInit(BenchData.NUM_DATA_UNITS, 1, isDirect),
+        getBufferForInit(BenchData.NUM_PARITY_UNITS, 1, isDirect));
+    return encoder;
+  }
+
+  private static RawErasureDecoder getRawDecoder(int index) throws IOException {
+    RawErasureDecoder decoder =
+        CODER_MAKERS.get(index).createDecoder(BenchData.OPTIONS);
+    final boolean isDirect = decoder.preferDirectBuffer();
+    ByteBuffer[] inputs = getBufferForInit(
+        BenchData.NUM_ALL_UNITS, 1, isDirect);
+    for (int erasedIndex : BenchData.ERASED_INDEXES) {
+      inputs[erasedIndex] = null;
+    }
+    decoder.decode(inputs, BenchData.ERASED_INDEXES,
+        getBufferForInit(BenchData.ERASED_INDEXES.length, 1, isDirect));
+    return decoder;
+  }
+
+  private static ByteBuffer[] getBufferForInit(int numBuf,
+      int bufCap, boolean isDirect) {
+    ByteBuffer[] buffers = new ByteBuffer[numBuf];
+    for (int i = 0; i < buffers.length; i++) {
+      buffers[i] = isDirect ? ByteBuffer.allocateDirect(bufCap) :
+          ByteBuffer.allocate(bufCap);
+    }
+    return buffers;
+  }
+
+  private static void printThreadStatistics(
+      List<Long> durations, DecimalFormat df) {
+    Collections.sort(durations);
+    System.out.println("Threads statistics: ");
+    Double min = durations.get(0) / 1000.0;
+    Double max = durations.get(durations.size() - 1) / 1000.0;
+    Long sum = 0L;
+    for (Long duration : durations) {
+      sum += duration;
+    }
+    Double avg = sum.doubleValue() / durations.size() / 1000.0;
+    Double percentile = durations.get(
+        (int) Math.ceil(durations.size() * 0.9) - 1) / 1000.0;
+    System.out.println(durations.size() + " threads in total.");
+    System.out.println("Min: " + df.format(min) + " s, Max: " +
+        df.format(max) + " s, Avg: " + df.format(avg) +
+        " s, 90th Percentile: " + df.format(percentile) + " s.");
+  }
+
+  private static ByteBuffer genTestData(boolean useDirectBuffer, int sizeKB) {
+    Random random = new Random();
+    int bufferSize = sizeKB * 1024;
+    byte[] tmp = new byte[bufferSize];
+    random.nextBytes(tmp);
+    ByteBuffer data = useDirectBuffer ?
+        ByteBuffer.allocateDirect(bufferSize) :
+        ByteBuffer.allocate(bufferSize);
+    data.put(tmp);
+    data.flip();
+    return data;
+  }
+
+  private static class BenchData {
+    public static final ECReplicationConfig OPTIONS =
+        new ECReplicationConfig(6, 3);
+    public static final int NUM_DATA_UNITS = OPTIONS.getData();
+    public static final int NUM_PARITY_UNITS = OPTIONS.getParity();
+    public static final int NUM_ALL_UNITS = OPTIONS.getData() +
+        OPTIONS.getParity();
+    private static int chunkSize;
+    private static long totalDataSizeKB;
+    private static int bufferSizeKB;
+
+    private static final int[] ERASED_INDEXES = new int[]{6, 7, 8};
+    private final ByteBuffer[] inputs = new ByteBuffer[NUM_DATA_UNITS];
+    private ByteBuffer[] outputs = new ByteBuffer[NUM_PARITY_UNITS];
+    private ByteBuffer[] decodeInputs = new ByteBuffer[NUM_ALL_UNITS];
+
+    public static void configure(int dataSizeMB, int chunkSizeKB) {
+      chunkSize = chunkSizeKB * 1024;
+      // buffer size needs to be a multiple of (numDataUnits * chunkSize)
+      int round = (int) Math.round(
+          TARGET_BUFFER_SIZE_MB * 1024.0 / NUM_DATA_UNITS / chunkSizeKB);
+      Preconditions.checkArgument(round > 0);
+      bufferSizeKB = NUM_DATA_UNITS * chunkSizeKB * round;
+      System.out.println("Using " + bufferSizeKB / 1024 + "MB buffer.");
+
+      round = (int) Math.round(
+          (dataSizeMB * 1024.0) / bufferSizeKB);
+      if (round == 0) {
+        round = 1;
+      }
+      totalDataSizeKB = round * bufferSizeKB;
+    }
+
+    BenchData(boolean useDirectBuffer) {
+      for (int i = 0; i < outputs.length; i++) {
+        outputs[i] = useDirectBuffer ? ByteBuffer.allocateDirect(chunkSize) :
+            ByteBuffer.allocate(chunkSize);
+      }
+    }
+
+    public void prepareDecInput() {
+      System.arraycopy(inputs, 0, decodeInputs, 0, NUM_DATA_UNITS);
+    }
+
+    public void encode(RawErasureEncoder encoder) throws IOException {
+      encoder.encode(inputs, outputs);
+    }
+
+    public void decode(RawErasureDecoder decoder) throws IOException {
+      decoder.decode(decodeInputs, ERASED_INDEXES, outputs);
+    }
+  }
+
+  private static class BenchmarkCallable implements Callable<Long> {
+    private final boolean isEncode;
+    private final RawErasureEncoder encoder;
+    private final RawErasureDecoder decoder;
+    private final BenchData benchData;
+    private final ByteBuffer testData;
+
+    BenchmarkCallable(boolean isEncode, RawErasureEncoder encoder,
+        RawErasureDecoder decoder, ByteBuffer testData) {
+      if (isEncode) {
+        Preconditions.checkArgument(encoder != null);
+        this.encoder = encoder;
+        this.decoder = null;
+        benchData = new BenchData(encoder.preferDirectBuffer());
+      } else {
+        Preconditions.checkArgument(decoder != null);
+        this.decoder = decoder;
+        this.encoder = null;
+        benchData = new BenchData(decoder.preferDirectBuffer());
+      }
+      this.isEncode = isEncode;
+      this.testData = testData;
+    }
+
+    @Override
+    public Long call() throws Exception {
+      long rounds = BenchData.totalDataSizeKB / BenchData.bufferSizeKB;
+
+      StopWatch sw = new StopWatch().start();
+      for (long i = 0; i < rounds; i++) {
+        while (testData.remaining() > 0) {
+          for (ByteBuffer output : benchData.outputs) {
+            output.clear();
+          }
+
+          for (int j = 0; j < benchData.inputs.length; j++) {
+            benchData.inputs[j] = testData.duplicate();
+            benchData.inputs[j].limit(
+                testData.position() + BenchData.chunkSize);
+            benchData.inputs[j] = benchData.inputs[j].slice();
+            testData.position(testData.position() + BenchData.chunkSize);
+          }
+
+          if (isEncode) {
+            benchData.encode(encoder);
+          } else {
+            benchData.prepareDecInput();
+            benchData.decode(decoder);
+          }
+        }
+        testData.clear();
+      }
+      return sw.now(TimeUnit.MILLISECONDS);
+    }
+  }
+}
diff --git a/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestDummyRawCoder.java b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestDummyRawCoder.java
new file mode 100644
index 0000000..10c72b2
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestDummyRawCoder.java
@@ -0,0 +1,96 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.erasurecode.rawcoder;
+
+import org.apache.ozone.erasurecode.ECChunk;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+/**
+ * Test dummy raw coder.
+ */
+public class TestDummyRawCoder extends TestRawCoderBase {
+
+  public TestDummyRawCoder() {
+    super(DummyRawErasureCoderFactory.class, DummyRawErasureCoderFactory.class);
+  }
+
+  @Before
+  public void setup() {
+    setAllowDump(false);
+    setChunkSize(baseChunkSize);
+  }
+
+  @Test
+  public void testCoding6x3ErasingD0Dd2() {
+    prepare(null, 6, 3, new int[]{0, 2}, new int[0], false);
+    testCodingDoMixed();
+  }
+
+  @Test
+  public void testCoding6x3ErasingD0P0() {
+    prepare(null, 6, 3, new int[]{0}, new int[]{0}, false);
+    testCodingDoMixed();
+  }
+
+  @Override
+  protected void testCoding(boolean usingDirectBuffer) {
+    this.usingDirectBuffer = usingDirectBuffer;
+    prepareCoders(true);
+
+    prepareBufferAllocator(true);
+
+    // Generate data and encode
+    ECChunk[] dataChunks = prepareDataChunksForEncoding();
+    markChunks(dataChunks);
+    ECChunk[] parityChunks = prepareParityChunksForEncoding();
+    try {
+      encode(dataChunks, parityChunks);
+    } catch (IOException e) {
+      Assert.fail("Unexpected IOException: " + e.getMessage());
+    }
+    compareAndVerify(parityChunks, getEmptyChunks(parityChunks.length));
+
+    // Decode
+    restoreChunksFromMark(dataChunks);
+    backupAndEraseChunks(dataChunks, parityChunks);
+    ECChunk[] inputChunks = prepareInputChunksForDecoding(
+        dataChunks, parityChunks);
+    ensureOnlyLeastRequiredChunks(inputChunks);
+    ECChunk[] recoveredChunks = prepareOutputChunksForDecoding();
+    try {
+      decode(inputChunks, getErasedIndexesForDecoding(),
+          recoveredChunks);
+    } catch (IOException e) {
+      Assert.fail("Unexpected IOException: " + e.getMessage());
+    }
+    compareAndVerify(recoveredChunks, getEmptyChunks(recoveredChunks.length));
+  }
+
+  private ECChunk[] getEmptyChunks(int num) {
+    ECChunk[] chunks = new ECChunk[num];
+    for (int i = 0; i < chunks.length; i++) {
+      chunks[i] = new ECChunk(ByteBuffer.wrap(getZeroChunkBytes()));
+    }
+    return chunks;
+  }
+}
diff --git a/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestRSRawCoder.java b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestRSRawCoder.java
new file mode 100644
index 0000000..93496b3
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestRSRawCoder.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.erasurecode.rawcoder;
+
+import org.junit.Before;
+
+/**
+ * Test the new raw Reed-solomon coder implemented in Java.
+ */
+public class TestRSRawCoder extends TestRSRawCoderBase {
+
+  public TestRSRawCoder() {
+    super(RSRawErasureCoderFactory.class, RSRawErasureCoderFactory.class);
+  }
+
+  @Before
+  public void setup() {
+    setAllowDump(false);
+  }
+}
diff --git a/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestRSRawCoderBase.java b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestRSRawCoderBase.java
new file mode 100644
index 0000000..e6c795a
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestRSRawCoderBase.java
@@ -0,0 +1,123 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.erasurecode.rawcoder;
+
+import org.junit.Test;
+
+/**
+ * Test base for raw Reed-solomon coders.
+ */
+public abstract class TestRSRawCoderBase extends TestRawCoderBase {
+
+  public TestRSRawCoderBase(
+      Class<? extends RawErasureCoderFactory> encoderFactoryClass,
+      Class<? extends RawErasureCoderFactory> decoderFactoryClass) {
+    super(encoderFactoryClass, decoderFactoryClass);
+  }
+
+  @Test
+  public void testCoding6xaErasingAllD() {
+    prepare(null, 6, 3, new int[]{0, 1, 2}, new int[0], true);
+    testCodingDoMixAndTwice();
+  }
+
+  @Test
+  public void testCoding6xaErasingD0D2() {
+    prepare(null, 6, 3, new int[]{0, 2}, new int[]{});
+    testCodingDoMixAndTwice();
+  }
+
+  @Test
+  public void testCoding6xaErasingD0() {
+    prepare(null, 6, 3, new int[]{0}, new int[0]);
+    testCodingDoMixAndTwice();
+  }
+
+  @Test
+  public void testCoding6xaErasingD2() {
+    prepare(null, 6, 3, new int[]{2}, new int[]{});
+    testCodingDoMixAndTwice();
+  }
+
+  @Test
+  public void testCoding6xaErasingD0P0() {
+    prepare(null, 6, 3, new int[]{0}, new int[]{0});
+    testCodingDoMixAndTwice();
+  }
+
+  @Test
+  public void testCoding6xaErasingAllP() {
+    prepare(null, 6, 3, new int[0], new int[]{0, 1, 2});
+    testCodingDoMixAndTwice();
+  }
+
+  @Test
+  public void testCoding6xaErasingP0() {
+    prepare(null, 6, 3, new int[0], new int[]{0});
+    testCodingDoMixAndTwice();
+  }
+
+  @Test
+  public void testCoding6xaErasingP2() {
+    prepare(null, 6, 3, new int[0], new int[]{2});
+    testCodingDoMixAndTwice();
+  }
+
+  @Test
+  public void testCoding6xaErasureP0P2() {
+    prepare(null, 6, 3, new int[0], new int[]{0, 2});
+    testCodingDoMixAndTwice();
+  }
+
+  @Test
+  public void testCoding6xaErasingD0P0P1() {
+    prepare(null, 6, 3, new int[]{0}, new int[]{0, 1});
+    testCodingDoMixAndTwice();
+  }
+
+  @Test
+  public void testCoding6xaErasingD0D2P2() {
+    prepare(null, 6, 3, new int[]{0, 2}, new int[]{2});
+    testCodingDoMixAndTwice();
+  }
+
+  @Test
+  public void testCodingNegative6xaErasingD2D4() {
+    prepare(null, 6, 3, new int[]{2, 4}, new int[0]);
+    testCodingDoMixAndTwice();
+  }
+
+  @Test
+  public void testCodingNegative6xaErasingTooMany() {
+    prepare(null, 6, 3, new int[]{2, 4}, new int[]{0, 1});
+    testCodingWithErasingTooMany();
+  }
+
+  @Test
+  public void testCoding10x4ErasingD0P0() {
+    prepare(null, 10, 4, new int[] {0}, new int[] {0});
+    testCodingDoMixAndTwice();
+  }
+
+  @Test
+  public void testCodingInputBufferPosition() {
+    prepare(null, 6, 3, new int[]{0}, new int[]{0});
+    testInputPosition(false);
+    testInputPosition(true);
+  }
+}
diff --git a/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestRawCoderBase.java b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestRawCoderBase.java
new file mode 100644
index 0000000..044b60f
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestRawCoderBase.java
@@ -0,0 +1,353 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.erasurecode.rawcoder;
+
+import org.apache.hadoop.hdds.client.ECReplicationConfig;
+import org.apache.ozone.erasurecode.ECChunk;
+import org.apache.ozone.erasurecode.TestCoderBase;
+import org.apache.ozone.test.LambdaTestUtils;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+
+/**
+ * Raw coder test base with utilities.
+ */
+@SuppressWarnings("checkstyle:VisibilityModifier")
+public abstract class TestRawCoderBase extends TestCoderBase {
+  private final Class<? extends RawErasureCoderFactory> encoderFactoryClass;
+  private final Class<? extends RawErasureCoderFactory> decoderFactoryClass;
+  private RawErasureEncoder encoder;
+  private RawErasureDecoder decoder;
+
+  public TestRawCoderBase(
+      Class<? extends RawErasureCoderFactory> encoderFactoryClass,
+      Class<? extends RawErasureCoderFactory> decoderFactoryClass) {
+    this.encoderFactoryClass = encoderFactoryClass;
+    this.decoderFactoryClass = decoderFactoryClass;
+  }
+
+  /**
+   * Doing twice to test if the coders can be repeatedly reused. This matters
+   * as the underlying coding buffers are shared, which may have bugs.
+   */
+  protected void testCodingDoMixAndTwice() {
+    testCodingDoMixed();
+    testCodingDoMixed();
+  }
+
+  /**
+   * Doing in mixed buffer usage model to test if the coders can be repeatedly
+   * reused with different buffer usage model. This matters as the underlying
+   * coding buffers are shared, which may have bugs.
+   */
+  protected void testCodingDoMixed() {
+    testCoding(true);
+    testCoding(false);
+  }
+
+  /**
+   * Generating source data, encoding, recovering and then verifying.
+   * RawErasureCoder mainly uses ECChunk to pass input and output data buffers,
+   * it supports two kinds of ByteBuffers, one is array backed, the other is
+   * direct ByteBuffer. Use usingDirectBuffer indicate which case to test.
+   *
+   * @param usingDirectBuffer
+   */
+  protected void testCoding(boolean usingDirectBuffer) {
+    this.usingDirectBuffer = usingDirectBuffer;
+    prepareCoders(true);
+
+    /**
+     * The following runs will use 3 different chunkSize for inputs and outputs,
+     * to verify the same encoder/decoder can process variable width of data.
+     */
+    performTestCoding(baseChunkSize, true, false, false);
+    performTestCoding(baseChunkSize - 17, false, false, false);
+    performTestCoding(baseChunkSize + 16, true, false, false);
+  }
+
+  /**
+   * Similar to above, but perform negative cases using bad input for encoding.
+   * @param usingDirectBuffer
+   */
+  protected void testCodingWithBadInput(boolean usingDirectBuffer) {
+    this.usingDirectBuffer = usingDirectBuffer;
+    prepareCoders(true);
+
+    try {
+      performTestCoding(baseChunkSize, false, true, false);
+      Assert.fail("Encoding test with bad input should fail");
+    } catch (Exception e) {
+      // Expected
+    }
+  }
+
+  /**
+   * Similar to above, but perform negative cases using bad output for decoding.
+   * @param usingDirectBuffer
+   */
+  protected void testCodingWithBadOutput(boolean usingDirectBuffer) {
+    this.usingDirectBuffer = usingDirectBuffer;
+    prepareCoders(true);
+
+    try {
+      performTestCoding(baseChunkSize, false, false, true);
+      Assert.fail("Decoding test with bad output should fail");
+    } catch (Exception e) {
+      // Expected
+    }
+  }
+
+  /**
+   * Test encode / decode after release(). It should raise IOException.
+   *
+   * @throws Exception
+   */
+  void testAfterRelease() throws Exception {
+    prepareCoders(true);
+    prepareBufferAllocator(true);
+
+    encoder.release();
+    final ECChunk[] data = prepareDataChunksForEncoding();
+    final ECChunk[] parity = prepareParityChunksForEncoding();
+    LambdaTestUtils.intercept(IOException.class, "closed",
+        () -> encoder.encode(data, parity));
+
+    decoder.release();
+    final ECChunk[] in = prepareInputChunksForDecoding(data, parity);
+    final ECChunk[] out = prepareOutputChunksForDecoding();
+    LambdaTestUtils.intercept(IOException.class, "closed",
+        () -> decoder.decode(in, getErasedIndexesForDecoding(), out));
+  }
+
+  @Test
+  public void testCodingWithErasingTooMany() {
+    try {
+      testCoding(true);
+      Assert.fail("Decoding test erasing too many should fail");
+    } catch (Exception e) {
+      // Expected
+    }
+
+    try {
+      testCoding(false);
+      Assert.fail("Decoding test erasing too many should fail");
+    } catch (Exception e) {
+      // Expected
+    }
+  }
+
+  @Test
+  public void testIdempotentReleases() {
+    prepareCoders(true);
+
+    for (int i = 0; i < 3; i++) {
+      encoder.release();
+      decoder.release();
+    }
+  }
+
+  private void performTestCoding(int chunkSize, boolean usingSlicedBuffer,
+      boolean useBadInput, boolean useBadOutput) {
+    setChunkSize(chunkSize);
+    prepareBufferAllocator(usingSlicedBuffer);
+
+    dumpSetting();
+
+    // Generate data and encode
+    ECChunk[] dataChunks = prepareDataChunksForEncoding();
+    if (useBadInput) {
+      corruptSomeChunk(dataChunks);
+    }
+    dumpChunks("Testing data chunks", dataChunks);
+
+    ECChunk[] parityChunks = prepareParityChunksForEncoding();
+
+    // Backup all the source chunks for later recovering because some coders
+    // may affect the source data.
+    ECChunk[] clonedDataChunks = cloneChunksWithData(dataChunks);
+    markChunks(dataChunks);
+
+    try {
+      encoder.encode(dataChunks, parityChunks);
+    } catch (IOException e) {
+      Assert.fail("Should not get IOException: " + e.getMessage());
+    }
+    dumpChunks("Encoded parity chunks", parityChunks);
+
+    //TODOif (!allowChangeInputs) {
+    restoreChunksFromMark(dataChunks);
+    compareAndVerify(clonedDataChunks, dataChunks);
+    //}
+
+    // Backup and erase some chunks
+    ECChunk[] backupChunks =
+        backupAndEraseChunks(clonedDataChunks, parityChunks);
+
+    // Decode
+    ECChunk[] inputChunks = prepareInputChunksForDecoding(
+        clonedDataChunks, parityChunks);
+
+    // Remove unnecessary chunks,
+    //     allowing only least required chunks to be read.
+    ensureOnlyLeastRequiredChunks(inputChunks);
+
+    ECChunk[] recoveredChunks = prepareOutputChunksForDecoding();
+    if (useBadOutput) {
+      corruptSomeChunk(recoveredChunks);
+    }
+
+    ECChunk[] clonedInputChunks = null;
+    //TODOif (!allowChangeInputs) {
+    markChunks(inputChunks);
+    clonedInputChunks = cloneChunksWithData(inputChunks);
+    //}
+
+    dumpChunks("Decoding input chunks", inputChunks);
+    try {
+      decoder.decode(inputChunks, getErasedIndexesForDecoding(),
+          recoveredChunks);
+    } catch (IOException e) {
+      Assert.fail("Should not get IOException: " + e.getMessage());
+    }
+    dumpChunks("Decoded/recovered chunks", recoveredChunks);
+
+    //TODOif (!allowChangeInputs) {
+    restoreChunksFromMark(inputChunks);
+    compareAndVerify(clonedInputChunks, inputChunks);
+    //}
+
+    // Compare
+    compareAndVerify(backupChunks, recoveredChunks);
+  }
+
+  /**
+   * Set true during setup if want to dump test settings and coding data,
+   * useful in debugging.
+   * @param allowDump
+   */
+  protected void setAllowDump(boolean allowDump) {
+    this.allowDump = allowDump;
+  }
+
+  protected void prepareCoders(boolean recreate) {
+    if (encoder == null || recreate) {
+      encoder = createEncoder();
+    }
+
+    if (decoder == null || recreate) {
+      decoder = createDecoder();
+    }
+  }
+
+  protected void ensureOnlyLeastRequiredChunks(ECChunk[] inputChunks) {
+    int leastRequiredNum = numDataUnits;
+    int erasedNum = erasedDataIndexes.length + erasedParityIndexes.length;
+    int goodNum = inputChunks.length - erasedNum;
+    int redundantNum = goodNum - leastRequiredNum;
+
+    for (int i = 0; i < inputChunks.length && redundantNum > 0; i++) {
+      if (inputChunks[i] != null) {
+        inputChunks[i] = null; // Setting it null, not needing it actually
+        redundantNum--;
+      }
+    }
+  }
+
+  /**
+   * Create the raw erasure encoder to test.
+   */
+  protected RawErasureEncoder createEncoder() {
+    ECReplicationConfig replicationConfig =
+        new ECReplicationConfig(numDataUnits, numParityUnits);
+    try {
+      RawErasureCoderFactory factory = encoderFactoryClass.newInstance();
+      return factory.createEncoder(replicationConfig);
+    } catch (Exception e) {
+      throw new RuntimeException("Failed to create encoder", e);
+    }
+  }
+
+  /**
+   * Create the raw erasure decoder to test.
+   */
+  protected RawErasureDecoder createDecoder() {
+    ECReplicationConfig replicationConfig =
+        new ECReplicationConfig(numDataUnits, numParityUnits);
+    try {
+      RawErasureCoderFactory factory = encoderFactoryClass.newInstance();
+      return factory.createDecoder(replicationConfig);
+    } catch (Exception e) {
+      throw new RuntimeException("Failed to create decoder", e);
+    }
+  }
+
+  /**
+   * Tests that the input buffer's position is moved to the end after
+   * encode/decode.
+   */
+  protected void testInputPosition(boolean usingDirectBuffer) {
+    this.usingDirectBuffer = usingDirectBuffer;
+    prepareCoders(true);
+    prepareBufferAllocator(false);
+
+    // verify encode
+    ECChunk[] dataChunks = prepareDataChunksForEncoding();
+    ECChunk[] parityChunks = prepareParityChunksForEncoding();
+    ECChunk[] clonedDataChunks = cloneChunksWithData(dataChunks);
+    try {
+      encoder.encode(dataChunks, parityChunks);
+    } catch (IOException e) {
+      Assert.fail("Should not get IOException: " + e.getMessage());
+    }
+    verifyBufferPositionAtEnd(dataChunks);
+
+    // verify decode
+    backupAndEraseChunks(clonedDataChunks, parityChunks);
+    ECChunk[] inputChunks = prepareInputChunksForDecoding(
+        clonedDataChunks, parityChunks);
+    ensureOnlyLeastRequiredChunks(inputChunks);
+    ECChunk[] recoveredChunks = prepareOutputChunksForDecoding();
+    try {
+      decoder.decode(inputChunks, getErasedIndexesForDecoding(),
+          recoveredChunks);
+    } catch (IOException e) {
+      Assert.fail("Should not get IOException: " + e.getMessage());
+    }
+    verifyBufferPositionAtEnd(inputChunks);
+  }
+
+  private void verifyBufferPositionAtEnd(ECChunk[] inputChunks) {
+    for (ECChunk chunk : inputChunks) {
+      if (chunk != null) {
+        Assert.assertEquals(0, chunk.getBuffer().remaining());
+      }
+    }
+  }
+
+  public void encode(ECChunk[] inputs, ECChunk[] outputs) throws IOException {
+    this.encoder.encode(inputs, outputs);
+  }
+
+  public void decode(ECChunk[] inputs, int[] erasedIndexes,
+      ECChunk[] outputs) throws IOException {
+    this.decoder.decode(inputs, erasedIndexes, outputs);
+  }
+}
diff --git a/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestRawErasureCoderBenchmark.java b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestRawErasureCoderBenchmark.java
new file mode 100644
index 0000000..cb700c4
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestRawErasureCoderBenchmark.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.erasurecode.rawcoder;
+
+import org.junit.Test;
+
+/**
+ * Tests for the raw erasure coder benchmark tool.
+ */
+public class TestRawErasureCoderBenchmark {
+
+  @Test
+  public void testDummyCoder() throws Exception {
+    // Dummy coder
+    RawErasureCoderBenchmark.performBench("encode",
+        RawErasureCoderBenchmark.CODER.DUMMY_CODER, 2, 100, 1024);
+    RawErasureCoderBenchmark.performBench("decode",
+        RawErasureCoderBenchmark.CODER.DUMMY_CODER, 5, 150, 100);
+  }
+
+  @Test
+  public void testRSCoder() throws Exception {
+    // RS Java coder
+    RawErasureCoderBenchmark.performBench("encode",
+        RawErasureCoderBenchmark.CODER.RS_CODER, 3, 200, 200);
+    RawErasureCoderBenchmark.performBench("decode",
+        RawErasureCoderBenchmark.CODER.RS_CODER, 4, 135, 20);
+  }
+
+}
diff --git a/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestXORRawCoder.java b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestXORRawCoder.java
new file mode 100644
index 0000000..dcc8099
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestXORRawCoder.java
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.erasurecode.rawcoder;
+
+/**
+ * Test pure Java XOR encoding and decoding.
+ */
+public class TestXORRawCoder extends TestXORRawCoderBase {
+
+  public TestXORRawCoder() {
+    super(XORRawErasureCoderFactory.class, XORRawErasureCoderFactory.class);
+  }
+
+
+}
diff --git a/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestXORRawCoderBase.java b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestXORRawCoderBase.java
new file mode 100644
index 0000000..77ed8c7
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestXORRawCoderBase.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.erasurecode.rawcoder;
+
+import org.junit.Test;
+
+/**
+ * Test base for raw XOR coders.
+ */
+public abstract class TestXORRawCoderBase extends TestRawCoderBase {
+
+  public TestXORRawCoderBase(
+      Class<? extends RawErasureCoderFactory> encoderFactoryClass,
+      Class<? extends RawErasureCoderFactory> decoderFactoryClass) {
+    super(encoderFactoryClass, decoderFactoryClass);
+  }
+
+  @Test
+  public void testCoding10x1ErasingD0() {
+    prepare(null, 10, 1, new int[]{0}, new int[0]);
+    testCodingDoMixAndTwice();
+  }
+
+  @Test
+  public void testCoding10x1ErasingP0() {
+    prepare(null, 10, 1, new int[0], new int[]{0});
+    testCodingDoMixAndTwice();
+  }
+
+  @Test
+  public void testCoding10x1ErasingD5() {
+    prepare(null, 10, 1, new int[]{5}, new int[0]);
+    testCodingDoMixAndTwice();
+  }
+
+  @Test
+  public void testCodingNegative10x1ErasingTooMany() {
+    prepare(null, 10, 1, new int[]{2}, new int[]{0});
+    testCodingWithErasingTooMany();
+  }
+
+  @Test
+  public void testCodingNegative10x1ErasingD5() {
+    prepare(null, 10, 1, new int[]{5}, new int[0]);
+    testCodingWithBadInput(true);
+    testCodingWithBadOutput(false);
+    testCodingWithBadInput(true);
+    testCodingWithBadOutput(false);
+  }
+}
diff --git a/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/package-info.java b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/package-info.java
new file mode 100644
index 0000000..b2422af
--- /dev/null
+++ b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/package-info.java
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Unit tests for rawencoders.
+ */
+package org.apache.ozone.erasurecode.rawcoder;
+
diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml
index 6384884..a59c07d 100644
--- a/hadoop-hdds/pom.xml
+++ b/hadoop-hdds/pom.xml
@@ -45,6 +45,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
     <module>docs</module>
     <module>config</module>
     <module>test-utils</module>
+    <module>erasurecode</module>
   </modules>
 
   <repositories>
diff --git a/hadoop-ozone/client/pom.xml b/hadoop-ozone/client/pom.xml
index 56ebe56..a53cbca 100644
--- a/hadoop-ozone/client/pom.xml
+++ b/hadoop-ozone/client/pom.xml
@@ -39,14 +39,14 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
       <artifactId>ozone-common</artifactId>
     </dependency>
     <dependency>
+      <groupId>org.apache.ozone</groupId>
+      <artifactId>hdds-erasurecode</artifactId>
+    </dependency>
+    <dependency>
       <groupId>junit</groupId>
       <artifactId>junit</artifactId>
       <scope>test</scope>
     </dependency>
-      <dependency>
-          <groupId>org.apache.hadoop</groupId>
-          <artifactId>hadoop-hdfs-client</artifactId>
-      </dependency>
   </dependencies>
 
   <build>
diff --git a/hadoop-ozone/dist/src/main/license/jar-report.txt b/hadoop-ozone/dist/src/main/license/jar-report.txt
index ec949fd..a48b2ed 100644
--- a/hadoop-ozone/dist/src/main/license/jar-report.txt
+++ b/hadoop-ozone/dist/src/main/license/jar-report.txt
@@ -64,6 +64,7 @@ share/ozone/lib/hdds-common.jar
 share/ozone/lib/hdds-config.jar
 share/ozone/lib/hdds-container-service.jar
 share/ozone/lib/hdds-docs.jar
+share/ozone/lib/hdds-erasurecode.jar
 share/ozone/lib/hdds-hadoop-dependency-client.jar
 share/ozone/lib/hdds-hadoop-dependency-server.jar
 share/ozone/lib/hdds-interface-admin.jar
diff --git a/hadoop-ozone/dist/src/main/org.apache.hadoop.io.erasurecode.rawcoder.RawErasureCoderFactory b/hadoop-ozone/dist/src/main/org.apache.hadoop.io.erasurecode.rawcoder.RawErasureCoderFactory
new file mode 100644
index 0000000..23b1a14
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/org.apache.hadoop.io.erasurecode.rawcoder.RawErasureCoderFactory
@@ -0,0 +1,18 @@
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory
+org.apache.hadoop.io.erasurecode.rawcoder.NativeXORRawErasureCoderFactory
+org.apache.hadoop.io.erasurecode.rawcoder.RSRawErasureCoderFactory
+org.apache.hadoop.io.erasurecode.rawcoder.RSLegacyRawErasureCoderFactory
+org.apache.hadoop.io.erasurecode.rawcoder.XORRawErasureCoderFactory
diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml
index 8116839..a29673f 100644
--- a/hadoop-ozone/pom.xml
+++ b/hadoop-ozone/pom.xml
@@ -134,6 +134,11 @@
       </dependency>
       <dependency>
         <groupId>org.apache.ozone</groupId>
+        <artifactId>hdds-erasurecode</artifactId>
+        <version>${hdds.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.ozone</groupId>
         <artifactId>hdds-interface-client</artifactId>
         <version>${hdds.version}</version>
       </dependency>

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org