You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2009/09/10 01:00:14 UTC
svn commit: r813158 - in /hadoop/hbase/trunk: CHANGES.txt
src/java/org/apache/hadoop/hbase/io/hfile/HFile.java
Author: stack
Date: Wed Sep 9 23:00:13 2009
New Revision: 813158
URL: http://svn.apache.org/viewvc?rev=813158&view=rev
Log:
HBASE-1818 HFile code review and refinement -- reversing patch... doesn't pass tests
Modified:
hadoop/hbase/trunk/CHANGES.txt
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java
Modified: hadoop/hbase/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=813158&r1=813157&r2=813158&view=diff
==============================================================================
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Wed Sep 9 23:00:13 2009
@@ -34,7 +34,6 @@
HBASE-1800 Too many ZK connections
HBASE-1819 Update to 0.20.1 hadoop and zk 3.2.1
HBASE-1820 Update jruby from 1.2 to 1.3.1
- HBASE-1818 HFile code review and refinement (Shubert Zhang via Stack)
OPTIMIZATIONS
Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java?rev=813158&r1=813157&r2=813158&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java Wed Sep 9 23:00:13 2009
@@ -168,7 +168,7 @@
protected String name;
// Total uncompressed bytes, maybe calculate a compression ratio later.
- private long totalBytes = 0;
+ private int totalBytes = 0;
// Total # of key/value entries, ie: how many times add() was called.
private int entryCount = 0;
@@ -320,12 +320,13 @@
*/
private void finishBlock() throws IOException {
if (this.out == null) return;
- int size = releaseCompressingStream(this.out);
+ long size = releaseCompressingStream(this.out);
this.out = null;
blockKeys.add(firstKey);
+ int written = longToInt(size);
blockOffsets.add(Long.valueOf(blockBegin));
- blockDataSizes.add(Integer.valueOf(size));
- this.totalBytes += size;
+ blockDataSizes.add(Integer.valueOf(written));
+ this.totalBytes += written;
}
/*
@@ -334,10 +335,10 @@
*/
private void newBlock() throws IOException {
// This is where the next block begins.
- this.blockBegin = outputStream.getPos();
+ blockBegin = outputStream.getPos();
this.out = getCompressingStream();
this.out.write(DATABLOCKMAGIC);
- this.firstKey = null;
+ firstKey = null;
}
/*
@@ -512,7 +513,7 @@
}
if (this.lastKeyBuffer != null) {
if (this.comparator.compare(this.lastKeyBuffer, this.lastKeyOffset,
- this.lastKeyLength, key, offset, length) >= 0) {
+ this.lastKeyLength, key, offset, length) > 0) {
throw new IOException("Added a key not lexically larger than" +
" previous key=" + Bytes.toString(key, offset, length) +
", lastkey=" + Bytes.toString(this.lastKeyBuffer, this.lastKeyOffset,
@@ -619,7 +620,7 @@
appendFileInfo(this.fileinfo, FileInfo.AVG_KEY_LEN,
Bytes.toBytes(avgKeyLen), false);
int avgValueLen = this.entryCount == 0? 0:
- (int)(this.valuelength/this.entryCount);
+ (int)(this.keylength/this.entryCount);
appendFileInfo(this.fileinfo, FileInfo.AVG_VALUE_LEN,
Bytes.toBytes(avgValueLen), false);
appendFileInfo(this.fileinfo, FileInfo.COMPARATOR,
@@ -859,7 +860,7 @@
if (trailer.metaIndexCount == 0) {
return null; // there are no meta blocks
}
- if ((metaIndex == null) || (metaIndex.count == 0)) {
+ if (metaIndex == null) {
throw new IOException("Meta index not loaded");
}
byte [] mbname = Bytes.toBytes(metaBlockName);
@@ -875,14 +876,16 @@
ByteBuffer buf = decompress(metaIndex.blockOffsets[block],
longToInt(blockSize), metaIndex.blockDataSizes[block]);
- if (buf == null)
- return null;
byte [] magic = new byte[METABLOCKMAGIC.length];
buf.get(magic, 0, magic.length);
if (! Arrays.equals(magic, METABLOCKMAGIC)) {
throw new IOException("Meta magic is bad in block " + block);
}
+ // Toss the header. May have to remove later due to performance.
+ buf.compact();
+ buf.limit(buf.limit() - METABLOCKMAGIC.length);
+ buf.rewind();
return buf;
}
/**
@@ -895,7 +898,7 @@
if (blockIndex == null) {
throw new IOException("Block index not loaded");
}
- if (block < 0 || block >= blockIndex.count) {
+ if (block < 0 || block > blockIndex.count) {
throw new IOException("Requested block is out of range: " + block +
", max: " + blockIndex.count);
}
@@ -932,15 +935,16 @@
}
ByteBuffer buf = decompress(blockIndex.blockOffsets[block],
longToInt(onDiskBlockSize), this.blockIndex.blockDataSizes[block]);
- if (buf == null) {
- throw new IOException("Decompress block failure " + block);
- }
byte [] magic = new byte[DATABLOCKMAGIC.length];
buf.get(magic, 0, magic.length);
if (!Arrays.equals(magic, DATABLOCKMAGIC)) {
throw new IOException("Data magic is bad in block " + block);
}
+ // Toss the header. May have to remove later due to performance.
+ buf.compact();
+ buf.limit(buf.limit() - DATABLOCKMAGIC.length);
+ buf.rewind();
// Cache the block
if(cacheBlock && cache != null) {
@@ -1245,10 +1249,8 @@
}
if (block != null && currBlock == 0) {
block.rewind();
- block.position(DATABLOCKMAGIC.length);
currKeyLen = block.getInt();
currValueLen = block.getInt();
- return true;
}
currBlock = 0;
block = reader.readBlock(currBlock, cacheBlocks);
@@ -1271,7 +1273,6 @@
} else {
// we are already in the same block, just rewind to seek again.
block.rewind();
- block.position(DATABLOCKMAGIC.length);
}
}
}
@@ -1365,7 +1366,7 @@
}
/*
- * The block index for a HFile.
+ * The block index for a RFile.
* Used reading.
*/
static class BlockIndex implements HeapSize {