You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cd...@apache.org on 2009/07/11 03:22:15 UTC

svn commit: r793163 [1/4] - in /hadoop/common/branches/branch-0.20: ./ src/core/org/apache/hadoop/io/file/ src/core/org/apache/hadoop/io/file/tfile/ src/test/ src/test/org/apache/hadoop/io/file/ src/test/org/apache/hadoop/io/file/tfile/

Author: cdouglas
Date: Sat Jul 11 01:22:14 2009
New Revision: 793163

URL: http://svn.apache.org/viewvc?rev=793163&view=rev
Log:
HADOOP-3315. Add a new, binary file foramt, TFile. Contributed by Hong Tang

Added:
    hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/
    hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/
    hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/BCFile.java
    hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/BoundedByteArrayOutputStream.java
    hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/BoundedRangeFileInputStream.java
    hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/ByteArray.java
    hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/Chunk.java
    hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/CompareUtils.java
    hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/Compression.java
    hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/MetaBlockAlreadyExists.java
    hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/MetaBlockDoesNotExist.java
    hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/RawComparable.java
    hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/SimpleBufferedOutputStream.java
    hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/TFile.java
    hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/TFileDumper.java
    hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/Utils.java
    hadoop/common/branches/branch-0.20/src/test/org/apache/hadoop/io/file/
    hadoop/common/branches/branch-0.20/src/test/org/apache/hadoop/io/file/tfile/
    hadoop/common/branches/branch-0.20/src/test/org/apache/hadoop/io/file/tfile/KVGenerator.java
    hadoop/common/branches/branch-0.20/src/test/org/apache/hadoop/io/file/tfile/KeySampler.java
    hadoop/common/branches/branch-0.20/src/test/org/apache/hadoop/io/file/tfile/NanoTimer.java
    hadoop/common/branches/branch-0.20/src/test/org/apache/hadoop/io/file/tfile/RandomDistribution.java
    hadoop/common/branches/branch-0.20/src/test/org/apache/hadoop/io/file/tfile/TestTFile.java
    hadoop/common/branches/branch-0.20/src/test/org/apache/hadoop/io/file/tfile/TestTFileByteArrays.java
    hadoop/common/branches/branch-0.20/src/test/org/apache/hadoop/io/file/tfile/TestTFileComparators.java
    hadoop/common/branches/branch-0.20/src/test/org/apache/hadoop/io/file/tfile/TestTFileJClassComparatorByteArrays.java
    hadoop/common/branches/branch-0.20/src/test/org/apache/hadoop/io/file/tfile/TestTFileLzoCodecsByteArrays.java
    hadoop/common/branches/branch-0.20/src/test/org/apache/hadoop/io/file/tfile/TestTFileLzoCodecsStreams.java
    hadoop/common/branches/branch-0.20/src/test/org/apache/hadoop/io/file/tfile/TestTFileNoneCodecsByteArrays.java
    hadoop/common/branches/branch-0.20/src/test/org/apache/hadoop/io/file/tfile/TestTFileNoneCodecsJClassComparatorByteArrays.java
    hadoop/common/branches/branch-0.20/src/test/org/apache/hadoop/io/file/tfile/TestTFileNoneCodecsStreams.java
    hadoop/common/branches/branch-0.20/src/test/org/apache/hadoop/io/file/tfile/TestTFileSeek.java
    hadoop/common/branches/branch-0.20/src/test/org/apache/hadoop/io/file/tfile/TestTFileSeqFileComparison.java
    hadoop/common/branches/branch-0.20/src/test/org/apache/hadoop/io/file/tfile/TestTFileSplit.java
    hadoop/common/branches/branch-0.20/src/test/org/apache/hadoop/io/file/tfile/TestTFileStreams.java
    hadoop/common/branches/branch-0.20/src/test/org/apache/hadoop/io/file/tfile/TestTFileUnsortedByteArrays.java
    hadoop/common/branches/branch-0.20/src/test/org/apache/hadoop/io/file/tfile/TestVLong.java
    hadoop/common/branches/branch-0.20/src/test/org/apache/hadoop/io/file/tfile/Timer.java
Modified:
    hadoop/common/branches/branch-0.20/CHANGES.txt
    hadoop/common/branches/branch-0.20/build.xml
    hadoop/common/branches/branch-0.20/src/test/findbugsExcludeFile.xml

Modified: hadoop/common/branches/branch-0.20/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20/CHANGES.txt?rev=793163&r1=793162&r2=793163&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20/CHANGES.txt (original)
+++ hadoop/common/branches/branch-0.20/CHANGES.txt Sat Jul 11 01:22:14 2009
@@ -15,6 +15,8 @@
     HADOOP-6080. Introduce -skipTrash option to rm and rmr.
     (Jakob Homan via shv)
 
+    HADOOP-3315. Add a new, binary file foramt, TFile. (Hong Tang via cdouglas)
+
   IMPROVEMENTS
 
     HADOOP-5711. Change Namenode file close log to info. (szetszwo)

Modified: hadoop/common/branches/branch-0.20/build.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20/build.xml?rev=793163&r1=793162&r2=793163&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20/build.xml (original)
+++ hadoop/common/branches/branch-0.20/build.xml Sat Jul 11 01:22:14 2009
@@ -728,6 +728,10 @@
       <sysproperty key="java.library.path"
        value="${build.native}/lib:${lib.dir}/native/${build.platform}"/>
       <sysproperty key="install.c++.examples" value="${install.c++.examples}"/>
+      <!-- set io.compression.codec.lzo.class in the child jvm only if it is set -->
+	  <syspropertyset dynamic="no">
+		  <propertyref name="io.compression.codec.lzo.class"/>
+	  </syspropertyset>
       <!-- set compile.c++ in the child jvm only if it is set -->
       <syspropertyset dynamic="no">
          <propertyref name="compile.c++"/>

Added: hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/BCFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/BCFile.java?rev=793163&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/BCFile.java (added)
+++ hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/BCFile.java Sat Jul 11 01:22:14 2009
@@ -0,0 +1,979 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.io.file.tfile;
+
+import java.io.Closeable;
+import java.io.DataInput;
+import java.io.DataInputStream;
+import java.io.DataOutput;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Map;
+import java.util.TreeMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.io.compress.Compressor;
+import org.apache.hadoop.io.compress.Decompressor;
+import org.apache.hadoop.io.file.tfile.CompareUtils.Scalar;
+import org.apache.hadoop.io.file.tfile.CompareUtils.ScalarComparator;
+import org.apache.hadoop.io.file.tfile.CompareUtils.ScalarLong;
+import org.apache.hadoop.io.file.tfile.Compression.Algorithm;
+import org.apache.hadoop.io.file.tfile.Utils.Version;
+
+/**
+ * Block Compressed file, the underlying physical storage layer for TFile.
+ * BCFile provides the basic block level compression for the data block and meta
+ * blocks. It is separated from TFile as it may be used for other
+ * block-compressed file implementation.
+ */
+final class BCFile {
+  // the current version of BCFile impl, increment them (major or minor) made
+  // enough changes
+  static final Version API_VERSION = new Version((short) 1, (short) 0);
+  static final Log LOG = LogFactory.getLog(BCFile.class);
+
+  /**
+   * Prevent the instantiation of BCFile objects.
+   */
+  private BCFile() {
+    // nothing
+  }
+
+  /**
+   * BCFile writer, the entry point for creating a new BCFile.
+   */
+  static public class Writer implements Closeable {
+    private final FSDataOutputStream out;
+    private final Configuration conf;
+    // the single meta block containing index of compressed data blocks
+    final DataIndex dataIndex;
+    // index for meta blocks
+    final MetaIndex metaIndex;
+    boolean blkInProgress = false;
+    private boolean metaBlkSeen = false;
+    private boolean closed = false;
+    long errorCount = 0;
+    // reusable buffers.
+    private BytesWritable fsOutputBuffer;
+
+    /**
+     * Call-back interface to register a block after a block is closed.
+     */
+    private static interface BlockRegister {
+      /**
+       * Register a block that is fully closed.
+       * 
+       * @param raw
+       *          The size of block in terms of uncompressed bytes.
+       * @param offsetStart
+       *          The start offset of the block.
+       * @param offsetEnd
+       *          One byte after the end of the block. Compressed block size is
+       *          offsetEnd - offsetStart.
+       */
+      public void register(long raw, long offsetStart, long offsetEnd);
+    }
+
+    /**
+     * Intermediate class that maintain the state of a Writable Compression
+     * Block.
+     */
+    private static final class WBlockState {
+      private final Algorithm compressAlgo;
+      private Compressor compressor; // !null only if using native
+      // Hadoop compression
+      private final FSDataOutputStream fsOut;
+      private final long posStart;
+      private final SimpleBufferedOutputStream fsBufferedOutput;
+      private OutputStream out;
+
+      /**
+       * @param compressionAlgo
+       *          The compression algorithm to be used to for compression.
+       * @throws IOException
+       */
+      public WBlockState(Algorithm compressionAlgo, FSDataOutputStream fsOut,
+          BytesWritable fsOutputBuffer, Configuration conf) throws IOException {
+        this.compressAlgo = compressionAlgo;
+        this.fsOut = fsOut;
+        this.posStart = fsOut.getPos();
+
+        fsOutputBuffer.setCapacity(TFile.getFSOutputBufferSize(conf));
+
+        this.fsBufferedOutput =
+            new SimpleBufferedOutputStream(this.fsOut, fsOutputBuffer.get());
+        this.compressor = compressAlgo.getCompressor();
+
+        try {
+          this.out =
+              compressionAlgo.createCompressionStream(fsBufferedOutput,
+                  compressor, 0);
+        } catch (IOException e) {
+          compressAlgo.returnCompressor(compressor);
+          throw e;
+        }
+      }
+
+      /**
+       * Get the output stream for BlockAppender's consumption.
+       * 
+       * @return the output stream suitable for writing block data.
+       */
+      OutputStream getOutputStream() {
+        return out;
+      }
+
+      /**
+       * Get the current position in file.
+       * 
+       * @return The current byte offset in underlying file.
+       * @throws IOException
+       */
+      long getCurrentPos() throws IOException {
+        return fsOut.getPos() + fsBufferedOutput.size();
+      }
+
+      long getStartPos() {
+        return posStart;
+      }
+
+      /**
+       * Current size of compressed data.
+       * 
+       * @return
+       * @throws IOException
+       */
+      long getCompressedSize() throws IOException {
+        long ret = getCurrentPos() - posStart;
+        return ret;
+      }
+
+      /**
+       * Finishing up the current block.
+       */
+      public void finish() throws IOException {
+        try {
+          if (out != null) {
+            out.flush();
+            out = null;
+          }
+        } finally {
+          compressAlgo.returnCompressor(compressor);
+          compressor = null;
+        }
+      }
+    }
+
+    /**
+     * Access point to stuff data into a block.
+     * 
+     * TODO: Change DataOutputStream to something else that tracks the size as
+     * long instead of int. Currently, we will wrap around if the row block size
+     * is greater than 4GB.
+     */
+    public class BlockAppender extends DataOutputStream {
+      private final BlockRegister blockRegister;
+      private final WBlockState wBlkState;
+      @SuppressWarnings("hiding")
+      private boolean closed = false;
+
+      /**
+       * Constructor
+       * 
+       * @param register
+       *          the block register, which is called when the block is closed.
+       * @param wbs
+       *          The writable compression block state.
+       */
+      BlockAppender(BlockRegister register, WBlockState wbs) {
+        super(wbs.getOutputStream());
+        this.blockRegister = register;
+        this.wBlkState = wbs;
+      }
+
+      /**
+       * Get the raw size of the block.
+       * 
+       * @return the number of uncompressed bytes written through the
+       *         BlockAppender so far.
+       * @throws IOException
+       */
+      public long getRawSize() throws IOException {
+        /**
+         * Expecting the size() of a block not exceeding 4GB. Assuming the
+         * size() will wrap to negative integer if it exceeds 2GB.
+         */
+        return size() & 0x00000000ffffffffL;
+      }
+
+      /**
+       * Get the compressed size of the block in progress.
+       * 
+       * @return the number of compressed bytes written to the underlying FS
+       *         file. The size may be smaller than actual need to compress the
+       *         all data written due to internal buffering inside the
+       *         compressor.
+       * @throws IOException
+       */
+      public long getCompressedSize() throws IOException {
+        return wBlkState.getCompressedSize();
+      }
+
+      @Override
+      public void flush() {
+        // The down stream is a special kind of stream that finishes a
+        // compression block upon flush. So we disable flush() here.
+      }
+
+      /**
+       * Signaling the end of write to the block. The block register will be
+       * called for registering the finished block.
+       */
+      @Override
+      public void close() throws IOException {
+        if (closed == true) {
+          return;
+        }
+        try {
+          ++errorCount;
+          wBlkState.finish();
+          blockRegister.register(getRawSize(), wBlkState.getStartPos(),
+              wBlkState.getCurrentPos());
+          --errorCount;
+        } finally {
+          closed = true;
+          blkInProgress = false;
+        }
+      }
+    }
+
+    /**
+     * Constructor
+     * 
+     * @param fout
+     *          FS output stream.
+     * @param compressionName
+     *          Name of the compression algorithm, which will be used for all
+     *          data blocks.
+     * @throws IOException
+     * @see Compression#getSupportedAlgorithms
+     */
+    public Writer(FSDataOutputStream fout, String compressionName,
+        Configuration conf) throws IOException {
+      if (fout.getPos() != 0) {
+        throw new IOException("Output file not at zero offset.");
+      }
+
+      this.out = fout;
+      this.conf = conf;
+      dataIndex = new DataIndex(compressionName);
+      metaIndex = new MetaIndex();
+      fsOutputBuffer = new BytesWritable();
+      Magic.write(fout);
+    }
+
+    /**
+     * Close the BCFile Writer. Attempting to use the Writer after calling
+     * <code>close</code> is not allowed and may lead to undetermined results.
+     */
+    public void close() throws IOException {
+      if (closed == true) {
+        return;
+      }
+
+      try {
+        if (errorCount == 0) {
+          if (blkInProgress == true) {
+            throw new IllegalStateException(
+                "Close() called with active block appender.");
+          }
+
+          // add metaBCFileIndex to metaIndex as the last meta block
+          BlockAppender appender =
+              prepareMetaBlock(DataIndex.BLOCK_NAME,
+                  getDefaultCompressionAlgorithm());
+          try {
+            dataIndex.write(appender);
+          } finally {
+            appender.close();
+          }
+
+          long offsetIndexMeta = out.getPos();
+          metaIndex.write(out);
+
+          // Meta Index and the trailing section are written out directly.
+          out.writeLong(offsetIndexMeta);
+
+          API_VERSION.write(out);
+          Magic.write(out);
+          out.flush();
+        }
+      } finally {
+        closed = true;
+      }
+    }
+
+    private Algorithm getDefaultCompressionAlgorithm() {
+      return dataIndex.getDefaultCompressionAlgorithm();
+    }
+
+    private BlockAppender prepareMetaBlock(String name, Algorithm compressAlgo)
+        throws IOException, MetaBlockAlreadyExists {
+      if (blkInProgress == true) {
+        throw new IllegalStateException(
+            "Cannot create Meta Block until previous block is closed.");
+      }
+
+      if (metaIndex.getMetaByName(name) != null) {
+        throw new MetaBlockAlreadyExists("name=" + name);
+      }
+
+      MetaBlockRegister mbr = new MetaBlockRegister(name, compressAlgo);
+      WBlockState wbs =
+          new WBlockState(compressAlgo, out, fsOutputBuffer, conf);
+      BlockAppender ba = new BlockAppender(mbr, wbs);
+      blkInProgress = true;
+      metaBlkSeen = true;
+      return ba;
+    }
+
+    /**
+     * Create a Meta Block and obtain an output stream for adding data into the
+     * block. There can only be one BlockAppender stream active at any time.
+     * Regular Blocks may not be created after the first Meta Blocks. The caller
+     * must call BlockAppender.close() to conclude the block creation.
+     * 
+     * @param name
+     *          The name of the Meta Block. The name must not conflict with
+     *          existing Meta Blocks.
+     * @param compressionName
+     *          The name of the compression algorithm to be used.
+     * @return The BlockAppender stream
+     * @throws IOException
+     * @throws MetaBlockAlreadyExists
+     *           If the meta block with the name already exists.
+     */
+    public BlockAppender prepareMetaBlock(String name, String compressionName)
+        throws IOException, MetaBlockAlreadyExists {
+      return prepareMetaBlock(name, Compression
+          .getCompressionAlgorithmByName(compressionName));
+    }
+
+    /**
+     * Create a Meta Block and obtain an output stream for adding data into the
+     * block. The Meta Block will be compressed with the same compression
+     * algorithm as data blocks. There can only be one BlockAppender stream
+     * active at any time. Regular Blocks may not be created after the first
+     * Meta Blocks. The caller must call BlockAppender.close() to conclude the
+     * block creation.
+     * 
+     * @param name
+     *          The name of the Meta Block. The name must not conflict with
+     *          existing Meta Blocks.
+     * @return The BlockAppender stream
+     * @throws MetaBlockAlreadyExists
+     *           If the meta block with the name already exists.
+     * @throws IOException
+     */
+    public BlockAppender prepareMetaBlock(String name) throws IOException,
+        MetaBlockAlreadyExists {
+      return prepareMetaBlock(name, getDefaultCompressionAlgorithm());
+    }
+
+    /**
+     * Create a Data Block and obtain an output stream for adding data into the
+     * block. There can only be one BlockAppender stream active at any time.
+     * Data Blocks may not be created after the first Meta Blocks. The caller
+     * must call BlockAppender.close() to conclude the block creation.
+     * 
+     * @return The BlockAppender stream
+     * @throws IOException
+     */
+    public BlockAppender prepareDataBlock() throws IOException {
+      if (blkInProgress == true) {
+        throw new IllegalStateException(
+            "Cannot create Data Block until previous block is closed.");
+      }
+
+      if (metaBlkSeen == true) {
+        throw new IllegalStateException(
+            "Cannot create Data Block after Meta Blocks.");
+      }
+
+      DataBlockRegister dbr = new DataBlockRegister();
+
+      WBlockState wbs =
+          new WBlockState(getDefaultCompressionAlgorithm(), out,
+              fsOutputBuffer, conf);
+      BlockAppender ba = new BlockAppender(dbr, wbs);
+      blkInProgress = true;
+      return ba;
+    }
+
+    /**
+     * Callback to make sure a meta block is added to the internal list when its
+     * stream is closed.
+     */
+    private class MetaBlockRegister implements BlockRegister {
+      private final String name;
+      private final Algorithm compressAlgo;
+
+      MetaBlockRegister(String name, Algorithm compressAlgo) {
+        this.name = name;
+        this.compressAlgo = compressAlgo;
+      }
+
+      public void register(long raw, long begin, long end) {
+        metaIndex.addEntry(new MetaIndexEntry(name, compressAlgo,
+            new BlockRegion(begin, end - begin, raw)));
+      }
+    }
+
+    /**
+     * Callback to make sure a data block is added to the internal list when
+     * it's being closed.
+     * 
+     */
+    private class DataBlockRegister implements BlockRegister {
+      DataBlockRegister() {
+        // do nothing
+      }
+
+      public void register(long raw, long begin, long end) {
+        dataIndex.addBlockRegion(new BlockRegion(begin, end - begin, raw));
+      }
+    }
+  }
+
+  /**
+   * BCFile Reader, interface to read the file's data and meta blocks.
+   */
+  static public class Reader implements Closeable {
+    private final FSDataInputStream in;
+    private final Configuration conf;
+    final DataIndex dataIndex;
+    // Index for meta blocks
+    final MetaIndex metaIndex;
+    final Version version;
+
+    /**
+     * Intermediate class that maintain the state of a Readable Compression
+     * Block.
+     */
+    static private final class RBlockState {
+      private final Algorithm compressAlgo;
+      private Decompressor decompressor;
+      private final BlockRegion region;
+      private final InputStream in;
+
+      public RBlockState(Algorithm compressionAlgo, FSDataInputStream fsin,
+          BlockRegion region, Configuration conf) throws IOException {
+        this.compressAlgo = compressionAlgo;
+        this.region = region;
+        this.decompressor = compressionAlgo.getDecompressor();
+
+        try {
+          this.in =
+              compressAlgo
+                  .createDecompressionStream(new BoundedRangeFileInputStream(
+                      fsin, this.region.getOffset(), this.region
+                          .getCompressedSize()), decompressor, TFile
+                      .getFSInputBufferSize(conf));
+        } catch (IOException e) {
+          compressAlgo.returnDecompressor(decompressor);
+          throw e;
+        }
+      }
+
+      /**
+       * Get the output stream for BlockAppender's consumption.
+       * 
+       * @return the output stream suitable for writing block data.
+       */
+      public InputStream getInputStream() {
+        return in;
+      }
+
+      public String getCompressionName() {
+        return compressAlgo.getName();
+      }
+
+      public BlockRegion getBlockRegion() {
+        return region;
+      }
+
+      public void finish() throws IOException {
+        try {
+          in.close();
+        } finally {
+          compressAlgo.returnDecompressor(decompressor);
+          decompressor = null;
+        }
+      }
+    }
+
+    /**
+     * Access point to read a block.
+     */
+    public static class BlockReader extends DataInputStream {
+      private final RBlockState rBlkState;
+      private boolean closed = false;
+
+      BlockReader(RBlockState rbs) {
+        super(rbs.getInputStream());
+        rBlkState = rbs;
+      }
+
+      /**
+       * Finishing reading the block. Release all resources.
+       */
+      @Override
+      public void close() throws IOException {
+        if (closed == true) {
+          return;
+        }
+        try {
+          // Do not set rBlkState to null. People may access stats after calling
+          // close().
+          rBlkState.finish();
+        } finally {
+          closed = true;
+        }
+      }
+
+      /**
+       * Get the name of the compression algorithm used to compress the block.
+       * 
+       * @return name of the compression algorithm.
+       */
+      public String getCompressionName() {
+        return rBlkState.getCompressionName();
+      }
+
+      /**
+       * Get the uncompressed size of the block.
+       * 
+       * @return uncompressed size of the block.
+       */
+      public long getRawSize() {
+        return rBlkState.getBlockRegion().getRawSize();
+      }
+
+      /**
+       * Get the compressed size of the block.
+       * 
+       * @return compressed size of the block.
+       */
+      public long getCompressedSize() {
+        return rBlkState.getBlockRegion().getCompressedSize();
+      }
+
+      /**
+       * Get the starting position of the block in the file.
+       * 
+       * @return the starting position of the block in the file.
+       */
+      public long getStartPos() {
+        return rBlkState.getBlockRegion().getOffset();
+      }
+    }
+
+    /**
+     * Constructor
+     * 
+     * @param fin
+     *          FS input stream.
+     * @param fileLength
+     *          Length of the corresponding file
+     * @throws IOException
+     */
+    public Reader(FSDataInputStream fin, long fileLength, Configuration conf)
+        throws IOException {
+      this.in = fin;
+      this.conf = conf;
+
+      // move the cursor to the beginning of the tail, containing: offset to the
+      // meta block index, version and magic
+      fin.seek(fileLength - Magic.size() - Version.size() - Long.SIZE
+          / Byte.SIZE);
+      long offsetIndexMeta = fin.readLong();
+      version = new Version(fin);
+      Magic.readAndVerify(fin);
+
+      if (!version.compatibleWith(BCFile.API_VERSION)) {
+        throw new RuntimeException("Incompatible BCFile fileBCFileVersion.");
+      }
+
+      // read meta index
+      fin.seek(offsetIndexMeta);
+      metaIndex = new MetaIndex(fin);
+
+      // read data:BCFile.index, the data block index
+      BlockReader blockR = getMetaBlock(DataIndex.BLOCK_NAME);
+      try {
+        dataIndex = new DataIndex(blockR);
+      } finally {
+        blockR.close();
+      }
+    }
+
+    /**
+     * Get the name of the default compression algorithm.
+     * 
+     * @return the name of the default compression algorithm.
+     */
+    public String getDefaultCompressionName() {
+      return dataIndex.getDefaultCompressionAlgorithm().getName();
+    }
+
+    /**
+     * Get version of BCFile file being read.
+     * 
+     * @return version of BCFile file being read.
+     */
+    public Version getBCFileVersion() {
+      return version;
+    }
+
+    /**
+     * Get version of BCFile API.
+     * 
+     * @return version of BCFile API.
+     */
+    public Version getAPIVersion() {
+      return API_VERSION;
+    }
+
+    /**
+     * Finishing reading the BCFile. Release all resources.
+     */
+    public void close() {
+      // nothing to be done now
+    }
+
+    /**
+     * Get the number of data blocks.
+     * 
+     * @return the number of data blocks.
+     */
+    public int getBlockCount() {
+      return dataIndex.getBlockRegionList().size();
+    }
+
+    /**
+     * Stream access to a Meta Block.
+     * 
+     * @param name
+     *          meta block name
+     * @return BlockReader input stream for reading the meta block.
+     * @throws IOException
+     * @throws MetaBlockDoesNotExist
+     *           The Meta Block with the given name does not exist.
+     */
+    public BlockReader getMetaBlock(String name) throws IOException,
+        MetaBlockDoesNotExist {
+      MetaIndexEntry imeBCIndex = metaIndex.getMetaByName(name);
+      if (imeBCIndex == null) {
+        throw new MetaBlockDoesNotExist("name=" + name);
+      }
+
+      BlockRegion region = imeBCIndex.getRegion();
+      return createReader(imeBCIndex.getCompressionAlgorithm(), region);
+    }
+
+    /**
+     * Stream access to a Data Block.
+     * 
+     * @param blockIndex
+     *          0-based data block index.
+     * @return BlockReader input stream for reading the data block.
+     * @throws IOException
+     */
+    public BlockReader getDataBlock(int blockIndex) throws IOException {
+      if (blockIndex < 0 || blockIndex >= getBlockCount()) {
+        throw new IndexOutOfBoundsException(String.format(
+            "blockIndex=%d, numBlocks=%d", blockIndex, getBlockCount()));
+      }
+
+      BlockRegion region = dataIndex.getBlockRegionList().get(blockIndex);
+      return createReader(dataIndex.getDefaultCompressionAlgorithm(), region);
+    }
+
+    private BlockReader createReader(Algorithm compressAlgo, BlockRegion region)
+        throws IOException {
+      RBlockState rbs = new RBlockState(compressAlgo, in, region, conf);
+      return new BlockReader(rbs);
+    }
+
+    /**
+     * Find the smallest Block index whose starting offset is greater than or
+     * equal to the specified offset.
+     * 
+     * @param offset
+     *          User-specific offset.
+     * @return the index to the data Block if such block exists; or -1
+     *         otherwise.
+     */
+    public int getBlockIndexNear(long offset) {
+      ArrayList<BlockRegion> list = dataIndex.getBlockRegionList();
+      int idx =
+          Utils
+              .lowerBound(list, new ScalarLong(offset), new ScalarComparator());
+
+      if (idx == list.size()) {
+        return -1;
+      }
+
+      return idx;
+    }
+  }
+
+  /**
+   * Index for all Meta blocks.
+   */
+  static class MetaIndex {
+    // use a tree map, for getting a meta block entry by name
+    final Map<String, MetaIndexEntry> index;
+
+    // for write
+    public MetaIndex() {
+      index = new TreeMap<String, MetaIndexEntry>();
+    }
+
+    // for read, construct the map from the file
+    public MetaIndex(DataInput in) throws IOException {
+      int count = Utils.readVInt(in);
+      index = new TreeMap<String, MetaIndexEntry>();
+
+      for (int nx = 0; nx < count; nx++) {
+        MetaIndexEntry indexEntry = new MetaIndexEntry(in);
+        index.put(indexEntry.getMetaName(), indexEntry);
+      }
+    }
+
+    public void addEntry(MetaIndexEntry indexEntry) {
+      index.put(indexEntry.getMetaName(), indexEntry);
+    }
+
+    public MetaIndexEntry getMetaByName(String name) {
+      return index.get(name);
+    }
+
+    public void write(DataOutput out) throws IOException {
+      Utils.writeVInt(out, index.size());
+
+      for (MetaIndexEntry indexEntry : index.values()) {
+        indexEntry.write(out);
+      }
+    }
+  }
+
+  /**
+   * An entry describes a meta block in the MetaIndex.
+   */
+  static final class MetaIndexEntry {
+    private final String metaName;
+    private final Algorithm compressionAlgorithm;
+    private final static String defaultPrefix = "data:";
+
+    private final BlockRegion region;
+
+    public MetaIndexEntry(DataInput in) throws IOException {
+      String fullMetaName = Utils.readString(in);
+      if (fullMetaName.startsWith(defaultPrefix)) {
+        metaName =
+            fullMetaName.substring(defaultPrefix.length(), fullMetaName
+                .length());
+      } else {
+        throw new IOException("Corrupted Meta region Index");
+      }
+
+      compressionAlgorithm =
+          Compression.getCompressionAlgorithmByName(Utils.readString(in));
+      region = new BlockRegion(in);
+    }
+
+    public MetaIndexEntry(String metaName, Algorithm compressionAlgorithm,
+        BlockRegion region) {
+      this.metaName = metaName;
+      this.compressionAlgorithm = compressionAlgorithm;
+      this.region = region;
+    }
+
+    public String getMetaName() {
+      return metaName;
+    }
+
+    public Algorithm getCompressionAlgorithm() {
+      return compressionAlgorithm;
+    }
+
+    public BlockRegion getRegion() {
+      return region;
+    }
+
+    public void write(DataOutput out) throws IOException {
+      Utils.writeString(out, defaultPrefix + metaName);
+      Utils.writeString(out, compressionAlgorithm.getName());
+
+      region.write(out);
+    }
+  }
+
+  /**
+   * Index of all compressed data blocks.
+   */
+  static class DataIndex {
+    final static String BLOCK_NAME = "BCFile.index";
+
+    private final Algorithm defaultCompressionAlgorithm;
+
+    // for data blocks, each entry specifies a block's offset, compressed size
+    // and raw size
+    private final ArrayList<BlockRegion> listRegions;
+
+    // for read, deserialized from a file
+    public DataIndex(DataInput in) throws IOException {
+      defaultCompressionAlgorithm =
+          Compression.getCompressionAlgorithmByName(Utils.readString(in));
+
+      int n = Utils.readVInt(in);
+      listRegions = new ArrayList<BlockRegion>(n);
+
+      for (int i = 0; i < n; i++) {
+        BlockRegion region = new BlockRegion(in);
+        listRegions.add(region);
+      }
+    }
+
+    // for write
+    public DataIndex(String defaultCompressionAlgorithmName) {
+      this.defaultCompressionAlgorithm =
+          Compression
+              .getCompressionAlgorithmByName(defaultCompressionAlgorithmName);
+      listRegions = new ArrayList<BlockRegion>();
+    }
+
+    public Algorithm getDefaultCompressionAlgorithm() {
+      return defaultCompressionAlgorithm;
+    }
+
+    public ArrayList<BlockRegion> getBlockRegionList() {
+      return listRegions;
+    }
+
+    public void addBlockRegion(BlockRegion region) {
+      listRegions.add(region);
+    }
+
+    public void write(DataOutput out) throws IOException {
+      Utils.writeString(out, defaultCompressionAlgorithm.getName());
+
+      Utils.writeVInt(out, listRegions.size());
+
+      for (BlockRegion region : listRegions) {
+        region.write(out);
+      }
+    }
+  }
+
+  /**
+   * Magic number uniquely identifying a BCFile in the header/footer.
+   */
+  static final class Magic {
+    private final static byte[] AB_MAGIC_BCFILE =
+        {
+            // ... total of 16 bytes
+            (byte) 0xd1, (byte) 0x11, (byte) 0xd3, (byte) 0x68, (byte) 0x91,
+            (byte) 0xb5, (byte) 0xd7, (byte) 0xb6, (byte) 0x39, (byte) 0xdf,
+            (byte) 0x41, (byte) 0x40, (byte) 0x92, (byte) 0xba, (byte) 0xe1,
+            (byte) 0x50 };
+
+    public static void readAndVerify(DataInput in) throws IOException {
+      byte[] abMagic = new byte[size()];
+      in.readFully(abMagic);
+
+      // check against AB_MAGIC_BCFILE, if not matching, throw an
+      // Exception
+      if (!Arrays.equals(abMagic, AB_MAGIC_BCFILE)) {
+        throw new IOException("Not a valid BCFile.");
+      }
+    }
+
+    public static void write(DataOutput out) throws IOException {
+      out.write(AB_MAGIC_BCFILE);
+    }
+
+    public static int size() {
+      return AB_MAGIC_BCFILE.length;
+    }
+  }
+
+  /**
+   * Block region.
+   */
+  static final class BlockRegion implements Scalar {
+    private final long offset;
+    private final long compressedSize;
+    private final long rawSize;
+
+    public BlockRegion(DataInput in) throws IOException {
+      offset = Utils.readVLong(in);
+      compressedSize = Utils.readVLong(in);
+      rawSize = Utils.readVLong(in);
+    }
+
+    public BlockRegion(long offset, long compressedSize, long rawSize) {
+      this.offset = offset;
+      this.compressedSize = compressedSize;
+      this.rawSize = rawSize;
+    }
+
+    public void write(DataOutput out) throws IOException {
+      Utils.writeVLong(out, offset);
+      Utils.writeVLong(out, compressedSize);
+      Utils.writeVLong(out, rawSize);
+    }
+
+    public long getOffset() {
+      return offset;
+    }
+
+    public long getCompressedSize() {
+      return compressedSize;
+    }
+
+    public long getRawSize() {
+      return rawSize;
+    }
+
+    @Override
+    public long magnitude() {
+      return offset;
+    }
+  }
+}

Added: hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/BoundedByteArrayOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/BoundedByteArrayOutputStream.java?rev=793163&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/BoundedByteArrayOutputStream.java (added)
+++ hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/BoundedByteArrayOutputStream.java Sat Jul 11 01:22:14 2009
@@ -0,0 +1,96 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.io.file.tfile;
+
+import java.io.EOFException;
+import java.io.IOException;
+import java.io.OutputStream;
+
+/**
+ * A byte array backed output stream with a limit. The limit should be smaller
+ * than the buffer capacity. The object can be reused through <code>reset</code>
+ * API and choose different limits in each round.
+ */
+class BoundedByteArrayOutputStream extends OutputStream {
+  private final byte[] buffer;
+  private int limit;
+  private int count;
+
+  public BoundedByteArrayOutputStream(int capacity) {
+    this(capacity, capacity);
+  }
+
+  public BoundedByteArrayOutputStream(int capacity, int limit) {
+    if ((capacity < limit) || (capacity | limit) < 0) {
+      throw new IllegalArgumentException("Invalid capacity/limit");
+    }
+    this.buffer = new byte[capacity];
+    this.limit = limit;
+    this.count = 0;
+  }
+
+  @Override
+  public void write(int b) throws IOException {
+    if (count >= limit) {
+      throw new EOFException("Reaching the limit of the buffer.");
+    }
+    buffer[count++] = (byte) b;
+  }
+
+  @Override
+  public void write(byte b[], int off, int len) throws IOException {
+    if ((off < 0) || (off > b.length) || (len < 0) || ((off + len) > b.length)
+        || ((off + len) < 0)) {
+      throw new IndexOutOfBoundsException();
+    } else if (len == 0) {
+      return;
+    }
+
+    if (count + len > limit) {
+      throw new EOFException("Reach the limit of the buffer");
+    }
+
+    System.arraycopy(b, off, buffer, count, len);
+    count += len;
+  }
+
+  public void reset(int newlim) {
+    if (newlim > buffer.length) {
+      throw new IndexOutOfBoundsException("Limit exceeds buffer size");
+    }
+    this.limit = newlim;
+    this.count = 0;
+  }
+
+  public void reset() {
+    this.limit = buffer.length;
+    this.count = 0;
+  }
+
+  public int getLimit() {
+    return limit;
+  }
+
+  public byte[] getBuffer() {
+    return buffer;
+  }
+
+  public int size() {
+    return count;
+  }
+}

Added: hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/BoundedRangeFileInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/BoundedRangeFileInputStream.java?rev=793163&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/BoundedRangeFileInputStream.java (added)
+++ hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/BoundedRangeFileInputStream.java Sat Jul 11 01:22:14 2009
@@ -0,0 +1,141 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.io.file.tfile;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+import org.apache.hadoop.fs.FSDataInputStream;
+
+/**
+ * BoundedRangeFIleInputStream abstracts a contiguous region of a Hadoop
+ * FSDataInputStream as a regular input stream. One can create multiple
+ * BoundedRangeFileInputStream on top of the same FSDataInputStream and they
+ * would not interfere with each other.
+ */
+class BoundedRangeFileInputStream extends InputStream {
+
+  private FSDataInputStream in;
+  private long pos;
+  private long end;
+  private long mark;
+  private final byte[] oneByte = new byte[1];
+
+  /**
+   * Constructor
+   * 
+   * @param in
+   *          The FSDataInputStream we connect to.
+   * @param offset
+   *          Begining offset of the region.
+   * @param length
+   *          Length of the region.
+   * 
+   *          The actual length of the region may be smaller if (off_begin +
+   *          length) goes beyond the end of FS input stream.
+   */
+  public BoundedRangeFileInputStream(FSDataInputStream in, long offset,
+      long length) {
+    if (offset < 0 || length < 0) {
+      throw new IndexOutOfBoundsException("Invalid offset/length: " + offset
+          + "/" + length);
+    }
+
+    this.in = in;
+    this.pos = offset;
+    this.end = offset + length;
+    this.mark = -1;
+  }
+
+  @Override
+  public int available() throws IOException {
+    int avail = in.available();
+    if (pos + avail > end) {
+      avail = (int) (end - pos);
+    }
+
+    return avail;
+  }
+
+  @Override
+  public int read() throws IOException {
+    int ret = read(oneByte);
+    if (ret == 1) return oneByte[0] & 0xff;
+    return -1;
+  }
+
+  @Override
+  public int read(byte[] b) throws IOException {
+    return read(b, 0, b.length);
+  }
+
+  @Override
+  public int read(byte[] b, int off, int len) throws IOException {
+    if ((off | len | (off + len) | (b.length - (off + len))) < 0) {
+      throw new IndexOutOfBoundsException();
+    }
+
+    int n = (int) Math.min(Integer.MAX_VALUE, Math.min(len, (end - pos)));
+    if (n == 0) return -1;
+    int ret = 0;
+    synchronized (in) {
+      in.seek(pos);
+      ret = in.read(b, off, n);
+    }
+    if (ret < 0) {
+      end = pos;
+      return -1;
+    }
+    pos += ret;
+    return ret;
+  }
+
+  @Override
+  /*
+   * We may skip beyond the end of the file.
+   */
+  public long skip(long n) throws IOException {
+    long len = Math.min(n, end - pos);
+    pos += len;
+    return len;
+  }
+
+  @Override
+  public void mark(int readlimit) {
+    mark = pos;
+  }
+
+  @Override
+  public void reset() throws IOException {
+    if (mark < 0) throw new IOException("Resetting to invalid mark");
+    pos = mark;
+  }
+
+  @Override
+  public boolean markSupported() {
+    return true;
+  }
+
+  @Override
+  public void close() {
+    // Invalidate the state of the stream.
+    in = null;
+    pos = end;
+    mark = -1;
+  }
+}

Added: hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/ByteArray.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/ByteArray.java?rev=793163&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/ByteArray.java (added)
+++ hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/ByteArray.java Sat Jul 11 01:22:14 2009
@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.io.file.tfile;
+
+import org.apache.hadoop.io.BytesWritable;
+
+/**
+ * Adaptor class to wrap byte-array backed objects (including java byte array)
+ * as RawComparable objects.
+ */
+public final class ByteArray implements RawComparable {
+  private final byte[] buffer;
+  private final int offset;
+  private final int len;
+
+  /**
+   * Constructing a ByteArray from a {@link BytesWritable}.
+   * 
+   * @param other
+   */
+  public ByteArray(BytesWritable other) {
+    this(other.get(), 0, other.getSize());
+  }
+
+  /**
+   * Wrap a whole byte array as a RawComparable.
+   * 
+   * @param buffer
+   *          the byte array buffer.
+   */
+  public ByteArray(byte[] buffer) {
+    this(buffer, 0, buffer.length);
+  }
+
+  /**
+   * Wrap a partial byte array as a RawComparable.
+   * 
+   * @param buffer
+   *          the byte array buffer.
+   * @param offset
+   *          the starting offset
+   * @param len
+   *          the length of the consecutive bytes to be wrapped.
+   */
+  public ByteArray(byte[] buffer, int offset, int len) {
+    if ((offset | len | (buffer.length - offset - len)) < 0) {
+      throw new IndexOutOfBoundsException();
+    }
+    this.buffer = buffer;
+    this.offset = offset;
+    this.len = len;
+  }
+
+  /**
+   * @return the underlying buffer.
+   */
+  @Override
+  public byte[] buffer() {
+    return buffer;
+  }
+
+  /**
+   * @return the offset in the buffer.
+   */
+  @Override
+  public int offset() {
+    return offset;
+  }
+
+  /**
+   * @return the size of the byte array.
+   */
+  @Override
+  public int size() {
+    return len;
+  }
+}

Added: hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/Chunk.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/Chunk.java?rev=793163&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/Chunk.java (added)
+++ hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/Chunk.java Sat Jul 11 01:22:14 2009
@@ -0,0 +1,429 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.io.file.tfile;
+
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+
+/**
+ * Several related classes to support chunk-encoded sub-streams on top of a
+ * regular stream.
+ */
+final class Chunk {
+
+  /**
+   * Prevent the instantiation of class.
+   */
+  private Chunk() {
+    // nothing
+  }
+
+  /**
+   * Decoding a chain of chunks encoded through ChunkEncoder or
+   * SingleChunkEncoder.
+   */
+  static public class ChunkDecoder extends InputStream {
+    private DataInputStream in = null;
+    private boolean lastChunk;
+    private int remain = 0;
+    private boolean closed;
+
+    public ChunkDecoder() {
+      lastChunk = true;
+      closed = true;
+    }
+
+    public void reset(DataInputStream downStream) {
+      // no need to wind forward the old input.
+      in = downStream;
+      lastChunk = false;
+      remain = 0;
+      closed = false;
+    }
+
+    /**
+     * Constructor
+     * 
+     * @param in
+     *          The source input stream which contains chunk-encoded data
+     *          stream.
+     */
+    public ChunkDecoder(DataInputStream in) {
+      this.in = in;
+      lastChunk = false;
+      closed = false;
+    }
+
+    /**
+     * Have we reached the last chunk.
+     * 
+     * @return true if we have reached the last chunk.
+     * @throws java.io.IOException
+     */
+    public boolean isLastChunk() throws IOException {
+      checkEOF();
+      return lastChunk;
+    }
+
+    /**
+     * How many bytes remain in the current chunk?
+     * 
+     * @return remaining bytes left in the current chunk.
+     * @throws java.io.IOException
+     */
+    public int getRemain() throws IOException {
+      checkEOF();
+      return remain;
+    }
+
+    /**
+     * Reading the length of next chunk.
+     * 
+     * @throws java.io.IOException
+     *           when no more data is available.
+     */
+    private void readLength() throws IOException {
+      remain = Utils.readVInt(in);
+      if (remain >= 0) {
+        lastChunk = true;
+      } else {
+        remain = -remain;
+      }
+    }
+
+    /**
+     * Check whether we reach the end of the stream.
+     * 
+     * @return false if the chunk encoded stream has more data to read (in which
+     *         case available() will be greater than 0); true otherwise.
+     * @throws java.io.IOException
+     *           on I/O errors.
+     */
+    private boolean checkEOF() throws IOException {
+      if (isClosed()) return true;
+      while (true) {
+        if (remain > 0) return false;
+        if (lastChunk) return true;
+        readLength();
+      }
+    }
+
+    @Override
+    /*
+     * This method never blocks the caller. Returning 0 does not mean we reach
+     * the end of the stream.
+     */
+    public int available() {
+      return remain;
+    }
+
+    @Override
+    public int read() throws IOException {
+      if (checkEOF()) return -1;
+      int ret = in.read();
+      if (ret < 0) throw new IOException("Corrupted chunk encoding stream");
+      --remain;
+      return ret;
+    }
+
+    @Override
+    public int read(byte[] b) throws IOException {
+      return read(b, 0, b.length);
+    }
+
+    @Override
+    public int read(byte[] b, int off, int len) throws IOException {
+      if ((off | len | (off + len) | (b.length - (off + len))) < 0) {
+        throw new IndexOutOfBoundsException();
+      }
+
+      if (!checkEOF()) {
+        int n = Math.min(remain, len);
+        int ret = in.read(b, off, n);
+        if (ret < 0) throw new IOException("Corrupted chunk encoding stream");
+        remain -= ret;
+        return ret;
+      }
+      return -1;
+    }
+
+    @Override
+    public long skip(long n) throws IOException {
+      if (!checkEOF()) {
+        long ret = in.skip(Math.min(remain, n));
+        remain -= ret;
+        return ret;
+      }
+      return 0;
+    }
+
+    @Override
+    public boolean markSupported() {
+      return false;
+    }
+
+    public boolean isClosed() {
+      return closed;
+    }
+
+    @Override
+    public void close() throws IOException {
+      if (closed == false) {
+        try {
+          while (!checkEOF()) {
+            skip(Integer.MAX_VALUE);
+          }
+        } finally {
+          closed = true;
+        }
+      }
+    }
+  }
+
+  /**
+   * Chunk Encoder. Encoding the output data into a chain of chunks in the
+   * following sequences: -len1, byte[len1], -len2, byte[len2], ... len_n,
+   * byte[len_n]. Where len1, len2, ..., len_n are the lengths of the data
+   * chunks. Non-terminal chunks have their lengths negated. Non-terminal chunks
+   * cannot have length 0. All lengths are in the range of 0 to
+   * Integer.MAX_VALUE and are encoded in Utils.VInt format.
+   */
+  static public class ChunkEncoder extends OutputStream {
+    /**
+     * The data output stream it connects to.
+     */
+    private DataOutputStream out;
+
+    /**
+     * The internal buffer that is only used when we do not know the advertised
+     * size.
+     */
+    private byte buf[];
+
+    /**
+     * The number of valid bytes in the buffer. This value is always in the
+     * range <tt>0</tt> through <tt>buf.length</tt>; elements <tt>buf[0]</tt>
+     * through <tt>buf[count-1]</tt> contain valid byte data.
+     */
+    private int count;
+
+    /**
+     * Constructor.
+     * 
+     * @param out
+     *          the underlying output stream.
+     * @param buf
+     *          user-supplied buffer. The buffer would be used exclusively by
+     *          the ChunkEncoder during its life cycle.
+     */
+    public ChunkEncoder(DataOutputStream out, byte[] buf) {
+      this.out = out;
+      this.buf = buf;
+      this.count = 0;
+    }
+
+    /**
+     * Write out a chunk.
+     * 
+     * @param chunk
+     *          The chunk buffer.
+     * @param offset
+     *          Offset to chunk buffer for the beginning of chunk.
+     * @param len
+     * @param last
+     *          Is this the last call to flushBuffer?
+     */
+    private void writeChunk(byte[] chunk, int offset, int len, boolean last)
+        throws IOException {
+      if (last) { // always write out the length for the last chunk.
+        Utils.writeVInt(out, len);
+        if (len > 0) {
+          out.write(chunk, offset, len);
+        }
+      } else {
+        if (len > 0) {
+          Utils.writeVInt(out, -len);
+          out.write(chunk, offset, len);
+        }
+      }
+    }
+
+    /**
+     * Write out a chunk that is a concatenation of the internal buffer plus
+     * user supplied data. This will never be the last block.
+     * 
+     * @param data
+     *          User supplied data buffer.
+     * @param offset
+     *          Offset to user data buffer.
+     * @param len
+     *          User data buffer size.
+     */
+    private void writeBufData(byte[] data, int offset, int len)
+        throws IOException {
+      if (count + len > 0) {
+        Utils.writeVInt(out, -(count + len));
+        out.write(buf, 0, count);
+        count = 0;
+        out.write(data, offset, len);
+      }
+    }
+
+    /**
+     * Flush the internal buffer.
+     * 
+     * Is this the last call to flushBuffer?
+     * 
+     * @throws java.io.IOException
+     */
+    private void flushBuffer() throws IOException {
+      if (count > 0) {
+        writeChunk(buf, 0, count, false);
+        count = 0;
+      }
+    }
+
+    @Override
+    public void write(int b) throws IOException {
+      if (count >= buf.length) {
+        flushBuffer();
+      }
+      buf[count++] = (byte) b;
+    }
+
+    @Override
+    public void write(byte b[]) throws IOException {
+      write(b, 0, b.length);
+    }
+
+    @Override
+    public void write(byte b[], int off, int len) throws IOException {
+      if ((len + count) >= buf.length) {
+        /*
+         * If the input data do not fit in buffer, flush the output buffer and
+         * then write the data directly. In this way buffered streams will
+         * cascade harmlessly.
+         */
+        writeBufData(b, off, len);
+        return;
+      }
+
+      System.arraycopy(b, off, buf, count, len);
+      count += len;
+    }
+
+    @Override
+    public void flush() throws IOException {
+      flushBuffer();
+      out.flush();
+    }
+
+    @Override
+    public void close() throws IOException {
+      if (buf != null) {
+        try {
+          writeChunk(buf, 0, count, true);
+        } finally {
+          buf = null;
+          out = null;
+        }
+      }
+    }
+  }
+
+  /**
+   * Encode the whole stream as a single chunk. Expecting to know the size of
+   * the chunk up-front.
+   */
+  static public class SingleChunkEncoder extends OutputStream {
+    /**
+     * The data output stream it connects to.
+     */
+    private final DataOutputStream out;
+
+    /**
+     * The remaining bytes to be written.
+     */
+    private int remain;
+    private boolean closed = false;
+
+    /**
+     * Constructor.
+     * 
+     * @param out
+     *          the underlying output stream.
+     * @param size
+     *          The total # of bytes to be written as a single chunk.
+     * @throws java.io.IOException
+     *           if an I/O error occurs.
+     */
+    public SingleChunkEncoder(DataOutputStream out, int size)
+        throws IOException {
+      this.out = out;
+      this.remain = size;
+      Utils.writeVInt(out, size);
+    }
+
+    @Override
+    public void write(int b) throws IOException {
+      if (remain > 0) {
+        out.write(b);
+        --remain;
+      } else {
+        throw new IOException("Writing more bytes than advertised size.");
+      }
+    }
+
+    @Override
+    public void write(byte b[]) throws IOException {
+      write(b, 0, b.length);
+    }
+
+    @Override
+    public void write(byte b[], int off, int len) throws IOException {
+      if (remain >= len) {
+        out.write(b, off, len);
+        remain -= len;
+      } else {
+        throw new IOException("Writing more bytes than advertised size.");
+      }
+    }
+
+    @Override
+    public void flush() throws IOException {
+      out.flush();
+    }
+
+    @Override
+    public void close() throws IOException {
+      if (closed == true) {
+        return;
+      }
+
+      try {
+        if (remain > 0) {
+          throw new IOException("Writing less bytes than advertised size.");
+        }
+      } finally {
+        closed = true;
+      }
+    }
+  }
+}

Added: hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/CompareUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/CompareUtils.java?rev=793163&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/CompareUtils.java (added)
+++ hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/CompareUtils.java Sat Jul 11 01:22:14 2009
@@ -0,0 +1,97 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.io.file.tfile;
+
+import java.util.Comparator;
+
+import org.apache.hadoop.io.RawComparator;
+import org.apache.hadoop.io.WritableComparator;
+
+class CompareUtils {
+  /**
+   * Prevent the instantiation of class.
+   */
+  private CompareUtils() {
+    // nothing
+  }
+
+  /**
+   * A comparator to compare anything that implements {@link RawComparable}
+   * using a customized comparator.
+   */
+  public static final class BytesComparator implements
+      Comparator<RawComparable> {
+    private RawComparator<Object> cmp;
+
+    public BytesComparator(RawComparator<Object> cmp) {
+      this.cmp = cmp;
+    }
+
+    @Override
+    public int compare(RawComparable o1, RawComparable o2) {
+      return compare(o1.buffer(), o1.offset(), o1.size(), o2.buffer(), o2
+          .offset(), o2.size());
+    }
+
+    public int compare(byte[] a, int off1, int len1, byte[] b, int off2,
+        int len2) {
+      return cmp.compare(a, off1, len1, b, off2, len2);
+    }
+  }
+
+  /**
+   * Interface for all objects that has a single integer magnitude.
+   */
+  static interface Scalar {
+    long magnitude();
+  }
+
+  static final class ScalarLong implements Scalar {
+    private long magnitude;
+
+    public ScalarLong(long m) {
+      magnitude = m;
+    }
+
+    public long magnitude() {
+      return magnitude;
+    }
+  }
+
+  public static final class ScalarComparator implements Comparator<Scalar> {
+    @Override
+    public int compare(Scalar o1, Scalar o2) {
+      long diff = o1.magnitude() - o2.magnitude();
+      if (diff < 0) return -1;
+      if (diff > 0) return 1;
+      return 0;
+    }
+  }
+
+  public static final class MemcmpRawComparator implements
+      RawComparator<Object> {
+    @Override
+    public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
+      return WritableComparator.compareBytes(b1, s1, l1, b2, s2, l2);
+    }
+
+    @Override
+    public int compare(Object o1, Object o2) {
+      throw new RuntimeException("Object comparison not supported");
+    }
+  }
+}

Added: hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/Compression.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/Compression.java?rev=793163&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/Compression.java (added)
+++ hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/Compression.java Sat Jul 11 01:22:14 2009
@@ -0,0 +1,361 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.io.file.tfile;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.FilterOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.ArrayList;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.compress.CodecPool;
+import org.apache.hadoop.io.compress.CompressionCodec;
+import org.apache.hadoop.io.compress.CompressionInputStream;
+import org.apache.hadoop.io.compress.CompressionOutputStream;
+import org.apache.hadoop.io.compress.Compressor;
+import org.apache.hadoop.io.compress.Decompressor;
+import org.apache.hadoop.io.compress.DefaultCodec;
+import org.apache.hadoop.util.ReflectionUtils;
+
+/**
+ * Compression related stuff.
+ */
+final class Compression {
+  static final Log LOG = LogFactory.getLog(Compression.class);
+
+  /**
+   * Prevent the instantiation of class.
+   */
+  private Compression() {
+    // nothing
+  }
+
+  static class FinishOnFlushCompressionStream extends FilterOutputStream {
+    public FinishOnFlushCompressionStream(CompressionOutputStream cout) {
+      super(cout);
+    }
+
+    @Override
+    public void write(byte b[], int off, int len) throws IOException {
+      out.write(b, off, len);
+    }
+
+    @Override
+    public void flush() throws IOException {
+      CompressionOutputStream cout = (CompressionOutputStream) out;
+      cout.finish();
+      cout.flush();
+      cout.resetState();
+    }
+  }
+
+  /**
+   * Compression algorithms.
+   */
+  static enum Algorithm {
+    LZO(TFile.COMPRESSION_LZO) {
+      private transient boolean checked = false;
+      private static final String defaultClazz =
+          "org.apache.hadoop.io.compress.LzoCodec";
+      private transient CompressionCodec codec = null;
+
+      @Override
+      public synchronized boolean isSupported() {
+        if (!checked) {
+          checked = true;
+          String extClazz =
+              (conf.get(CONF_LZO_CLASS) == null ? System
+                  .getProperty(CONF_LZO_CLASS) : null);
+          String clazz = (extClazz != null) ? extClazz : defaultClazz;
+          try {
+            LOG.info("Trying to load Lzo codec class: " + clazz);
+            codec =
+                (CompressionCodec) ReflectionUtils.newInstance(Class
+                    .forName(clazz), conf);
+          } catch (ClassNotFoundException e) {
+            // that is okay
+          }
+        }
+        return codec != null;
+      }
+
+      @Override
+      CompressionCodec getCodec() throws IOException {
+        if (!isSupported()) {
+          throw new IOException(
+              "LZO codec class not specified. Did you forget to set property "
+                  + CONF_LZO_CLASS + "?");
+        }
+
+        return codec;
+      }
+
+      @Override
+      public synchronized InputStream createDecompressionStream(
+          InputStream downStream, Decompressor decompressor,
+          int downStreamBufferSize) throws IOException {
+        if (!isSupported()) {
+          throw new IOException(
+              "LZO codec class not specified. Did you forget to set property "
+                  + CONF_LZO_CLASS + "?");
+        }
+        InputStream bis1 = null;
+        if (downStreamBufferSize > 0) {
+          bis1 = new BufferedInputStream(downStream, downStreamBufferSize);
+        } else {
+          bis1 = downStream;
+        }
+        conf.setInt("io.compression.codec.lzo.buffersize", 64 * 1024);
+        CompressionInputStream cis =
+            codec.createInputStream(bis1, decompressor);
+        BufferedInputStream bis2 = new BufferedInputStream(cis, DATA_IBUF_SIZE);
+        return bis2;
+      }
+
+      @Override
+      public synchronized OutputStream createCompressionStream(
+          OutputStream downStream, Compressor compressor,
+          int downStreamBufferSize) throws IOException {
+        if (!isSupported()) {
+          throw new IOException(
+              "LZO codec class not specified. Did you forget to set property "
+                  + CONF_LZO_CLASS + "?");
+        }
+        OutputStream bos1 = null;
+        if (downStreamBufferSize > 0) {
+          bos1 = new BufferedOutputStream(downStream, downStreamBufferSize);
+        } else {
+          bos1 = downStream;
+        }
+        conf.setInt("io.compression.codec.lzo.buffersize", 64 * 1024);
+        CompressionOutputStream cos =
+            codec.createOutputStream(bos1, compressor);
+        BufferedOutputStream bos2 =
+            new BufferedOutputStream(new FinishOnFlushCompressionStream(cos),
+                DATA_OBUF_SIZE);
+        return bos2;
+      }
+    },
+
+    GZ(TFile.COMPRESSION_GZ) {
+      private transient DefaultCodec codec;
+
+      @Override
+      synchronized CompressionCodec getCodec() {
+        if (codec == null) {
+          codec = new DefaultCodec();
+          codec.setConf(conf);
+        }
+
+        return codec;
+      }
+
+      @Override
+      public synchronized InputStream createDecompressionStream(
+          InputStream downStream, Decompressor decompressor,
+          int downStreamBufferSize) throws IOException {
+        // Set the internal buffer size to read from down stream.
+        if (downStreamBufferSize > 0) {
+          codec.getConf().setInt("io.file.buffer.size", downStreamBufferSize);
+        }
+        CompressionInputStream cis =
+            codec.createInputStream(downStream, decompressor);
+        BufferedInputStream bis2 = new BufferedInputStream(cis, DATA_IBUF_SIZE);
+        return bis2;
+      }
+
+      @Override
+      public synchronized OutputStream createCompressionStream(
+          OutputStream downStream, Compressor compressor,
+          int downStreamBufferSize) throws IOException {
+        OutputStream bos1 = null;
+        if (downStreamBufferSize > 0) {
+          bos1 = new BufferedOutputStream(downStream, downStreamBufferSize);
+        } else {
+          bos1 = downStream;
+        }
+        codec.getConf().setInt("io.file.buffer.size", 32 * 1024);
+        CompressionOutputStream cos =
+            codec.createOutputStream(bos1, compressor);
+        BufferedOutputStream bos2 =
+            new BufferedOutputStream(new FinishOnFlushCompressionStream(cos),
+                DATA_OBUF_SIZE);
+        return bos2;
+      }
+
+      @Override
+      public boolean isSupported() {
+        return true;
+      }
+    },
+
+    NONE(TFile.COMPRESSION_NONE) {
+      @Override
+      CompressionCodec getCodec() {
+        return null;
+      }
+
+      @Override
+      public synchronized InputStream createDecompressionStream(
+          InputStream downStream, Decompressor decompressor,
+          int downStreamBufferSize) throws IOException {
+        if (downStreamBufferSize > 0) {
+          return new BufferedInputStream(downStream, downStreamBufferSize);
+        }
+        return downStream;
+      }
+
+      @Override
+      public synchronized OutputStream createCompressionStream(
+          OutputStream downStream, Compressor compressor,
+          int downStreamBufferSize) throws IOException {
+        if (downStreamBufferSize > 0) {
+          return new BufferedOutputStream(downStream, downStreamBufferSize);
+        }
+
+        return downStream;
+      }
+
+      @Override
+      public boolean isSupported() {
+        return true;
+      }
+    };
+
+    // We require that all compression related settings are configured
+    // statically in the Configuration object.
+    protected static final Configuration conf = new Configuration();
+    private final String compressName;
+    // data input buffer size to absorb small reads from application.
+    private static final int DATA_IBUF_SIZE = 1 * 1024;
+    // data output buffer size to absorb small writes from application.
+    private static final int DATA_OBUF_SIZE = 4 * 1024;
+    public static final String CONF_LZO_CLASS =
+        "io.compression.codec.lzo.class";
+
+    Algorithm(String name) {
+      this.compressName = name;
+    }
+
+    abstract CompressionCodec getCodec() throws IOException;
+
+    public abstract InputStream createDecompressionStream(
+        InputStream downStream, Decompressor decompressor,
+        int downStreamBufferSize) throws IOException;
+
+    public abstract OutputStream createCompressionStream(
+        OutputStream downStream, Compressor compressor, int downStreamBufferSize)
+        throws IOException;
+
+    public abstract boolean isSupported();
+
+    public Compressor getCompressor() throws IOException {
+      CompressionCodec codec = getCodec();
+      if (codec != null) {
+        Compressor compressor = CodecPool.getCompressor(codec);
+        if (compressor != null) {
+          if (compressor.finished()) {
+            // Somebody returns the compressor to CodecPool but is still using
+            // it.
+            LOG.warn("Compressor obtained from CodecPool already finished()");
+          } else {
+            LOG.debug("Got a compressor: " + compressor.hashCode());
+          }
+          /**
+           * Following statement is necessary to get around bugs in 0.18 where a
+           * compressor is referenced after returned back to the codec pool.
+           */
+          compressor.reset();
+        }
+        return compressor;
+      }
+      return null;
+    }
+
+    public void returnCompressor(Compressor compressor) {
+      if (compressor != null) {
+        LOG.debug("Return a compressor: " + compressor.hashCode());
+        CodecPool.returnCompressor(compressor);
+      }
+    }
+
+    public Decompressor getDecompressor() throws IOException {
+      CompressionCodec codec = getCodec();
+      if (codec != null) {
+        Decompressor decompressor = CodecPool.getDecompressor(codec);
+        if (decompressor != null) {
+          if (decompressor.finished()) {
+            // Somebody returns the decompressor to CodecPool but is still using
+            // it.
+            LOG.warn("Deompressor obtained from CodecPool already finished()");
+          } else {
+            LOG.debug("Got a decompressor: " + decompressor.hashCode());
+          }
+          /**
+           * Following statement is necessary to get around bugs in 0.18 where a
+           * decompressor is referenced after returned back to the codec pool.
+           */
+          decompressor.reset();
+        }
+        return decompressor;
+      }
+
+      return null;
+    }
+
+    public void returnDecompressor(Decompressor decompressor) {
+      if (decompressor != null) {
+        LOG.debug("Returned a decompressor: " + decompressor.hashCode());
+        CodecPool.returnDecompressor(decompressor);
+      }
+    }
+
+    public String getName() {
+      return compressName;
+    }
+  }
+
+  static Algorithm getCompressionAlgorithmByName(String compressName) {
+    Algorithm[] algos = Algorithm.class.getEnumConstants();
+
+    for (Algorithm a : algos) {
+      if (a.getName().equals(compressName)) {
+        return a;
+      }
+    }
+
+    throw new IllegalArgumentException(
+        "Unsupported compression algorithm name: " + compressName);
+  }
+
+  static String[] getSupportedAlgorithms() {
+    Algorithm[] algos = Algorithm.class.getEnumConstants();
+
+    ArrayList<String> ret = new ArrayList<String>();
+    for (Algorithm a : algos) {
+      if (a.isSupported()) {
+        ret.add(a.getName());
+      }
+    }
+    return ret.toArray(new String[ret.size()]);
+  }
+}

Added: hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/MetaBlockAlreadyExists.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/MetaBlockAlreadyExists.java?rev=793163&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/MetaBlockAlreadyExists.java (added)
+++ hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/MetaBlockAlreadyExists.java Sat Jul 11 01:22:14 2009
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.io.file.tfile;
+
+import java.io.IOException;
+
+/**
+ * Exception - Meta Block with the same name already exists.
+ */
+@SuppressWarnings("serial")
+public class MetaBlockAlreadyExists extends IOException {
+  /**
+   * Constructor
+   * 
+   * @param s
+   *          message.
+   */
+  MetaBlockAlreadyExists(String s) {
+    super(s);
+  }
+}

Added: hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/MetaBlockDoesNotExist.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/MetaBlockDoesNotExist.java?rev=793163&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/MetaBlockDoesNotExist.java (added)
+++ hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/MetaBlockDoesNotExist.java Sat Jul 11 01:22:14 2009
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.io.file.tfile;
+
+import java.io.IOException;
+
+/**
+ * Exception - No such Meta Block with the given name.
+ */
+@SuppressWarnings("serial")
+public class MetaBlockDoesNotExist extends IOException {
+  /**
+   * Constructor
+   * 
+   * @param s
+   *          message.
+   */
+  MetaBlockDoesNotExist(String s) {
+    super(s);
+  }
+}

Added: hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/RawComparable.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/RawComparable.java?rev=793163&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/RawComparable.java (added)
+++ hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/RawComparable.java Sat Jul 11 01:22:14 2009
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.io.file.tfile;
+
+import java.util.Collections;
+import java.util.Comparator;
+
+import org.apache.hadoop.io.RawComparator;
+
+/**
+ * Interface for objects that can be compared through {@link RawComparator}.
+ * This is useful in places where we need a single object reference to specify a
+ * range of bytes in a byte array, such as {@link Comparable} or
+ * {@link Collections#binarySearch(java.util.List, Object, Comparator)}
+ * 
+ * The actual comparison among RawComparable's requires an external
+ * RawComparator and it is applications' responsibility to ensure two
+ * RawComparable are supposed to be semantically comparable with the same
+ * RawComparator.
+ */
+public interface RawComparable {
+  /**
+   * Get the underlying byte array.
+   * 
+   * @return The underlying byte array.
+   */
+  abstract byte[] buffer();
+
+  /**
+   * Get the offset of the first byte in the byte array.
+   * 
+   * @return The offset of the first byte in the byte array.
+   */
+  abstract int offset();
+
+  /**
+   * Get the size of the byte range in the byte array.
+   * 
+   * @return The size of the byte range in the byte array.
+   */
+  abstract int size();
+}

Added: hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/SimpleBufferedOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/SimpleBufferedOutputStream.java?rev=793163&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/SimpleBufferedOutputStream.java (added)
+++ hadoop/common/branches/branch-0.20/src/core/org/apache/hadoop/io/file/tfile/SimpleBufferedOutputStream.java Sat Jul 11 01:22:14 2009
@@ -0,0 +1,77 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.io.file.tfile;
+
+import java.io.FilterOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+
+/**
+ * A simplified BufferedOutputStream with borrowed buffer, and allow users to
+ * see how much data have been buffered.
+ */
+class SimpleBufferedOutputStream extends FilterOutputStream {
+  protected byte buf[]; // the borrowed buffer
+  protected int count = 0; // bytes used in buffer.
+
+  // Constructor
+  public SimpleBufferedOutputStream(OutputStream out, byte[] buf) {
+    super(out);
+    this.buf = buf;
+  }
+
+  private void flushBuffer() throws IOException {
+    if (count > 0) {
+      out.write(buf, 0, count);
+      count = 0;
+    }
+  }
+
+  @Override
+  public void write(int b) throws IOException {
+    if (count >= buf.length) {
+      flushBuffer();
+    }
+    buf[count++] = (byte) b;
+  }
+
+  @Override
+  public void write(byte b[], int off, int len) throws IOException {
+    if (len >= buf.length) {
+      flushBuffer();
+      out.write(b, off, len);
+      return;
+    }
+    if (len > buf.length - count) {
+      flushBuffer();
+    }
+    System.arraycopy(b, off, buf, count, len);
+    count += len;
+  }
+
+  @Override
+  public synchronized void flush() throws IOException {
+    flushBuffer();
+    out.flush();
+  }
+
+  // Get the size of internal buffer being used.
+  public int size() {
+    return count;
+  }
+}