You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2008/05/16 00:10:50 UTC
svn commit: r656868 [6/10] - in /hadoop/hbase/trunk: ./
src/java/org/apache/hadoop/hbase/ src/java/org/apache/hadoop/hbase/client/
src/java/org/apache/hadoop/hbase/filter/
src/java/org/apache/hadoop/hbase/hql/ src/java/org/apache/hadoop/hbase/io/
src/j...
Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStore.java Thu May 15 15:10:47 2008
@@ -52,11 +52,10 @@
import org.apache.hadoop.hbase.filter.RowFilterInterface;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.io.TextSequence;
+import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.io.MapFile;
import org.apache.hadoop.io.SequenceFile;
-import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.StringUtils;
@@ -104,7 +103,8 @@
private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
- final Text storeName;
+ final byte [] storeName;
+ private final String storeNameStr;
/*
* Sorted Map of readers keyed by sequence id (Most recent should be last in
@@ -180,8 +180,9 @@
this.ttl *= 1000;
this.memcache = new Memcache(this.ttl);
this.compactionDir = HRegion.getCompactionDir(basedir);
- this.storeName =
- new Text(this.info.getEncodedName() + "/" + this.family.getFamilyName());
+ this.storeName = Bytes.toBytes(this.info.getEncodedName() + "/" +
+ Bytes.toString(this.family.getName()));
+ this.storeNameStr = Bytes.toString(this.storeName);
// By default, we compact if an HStore has more than
// MIN_COMMITS_FOR_COMPACTION map files
@@ -203,12 +204,12 @@
}
Path mapdir = HStoreFile.getMapDir(basedir, info.getEncodedName(),
- family.getFamilyName());
+ family.getName());
if (!fs.exists(mapdir)) {
fs.mkdirs(mapdir);
}
Path infodir = HStoreFile.getInfoDir(basedir, info.getEncodedName(),
- family.getFamilyName());
+ family.getName());
if (!fs.exists(infodir)) {
fs.mkdirs(infodir);
}
@@ -218,7 +219,7 @@
this.bloomFilter = null;
} else {
this.filterDir = HStoreFile.getFilterDir(basedir, info.getEncodedName(),
- family.getFamilyName());
+ family.getName());
if (!fs.exists(filterDir)) {
fs.mkdirs(filterDir);
}
@@ -232,9 +233,9 @@
// loadHStoreFiles also computes the max sequence id internally.
this.maxSeqId = -1L;
this.storefiles.putAll(loadHStoreFiles(infodir, mapdir));
- if (LOG.isDebugEnabled()) {
+ if (LOG.isDebugEnabled() && this.storefiles.size() > 0) {
LOG.debug("Loaded " + this.storefiles.size() + " file(s) in hstore " +
- this.storeName + ", max sequence id " + this.maxSeqId);
+ Bytes.toString(this.storeName) + ", max sequence id " + this.maxSeqId);
}
try {
@@ -244,7 +245,7 @@
// HADOOP-1700; for now keep going but this is probably not what we want
// long term. If we got here there has been data-loss
LOG.warn("Exception processing reconstruction log " + reconstructionLog +
- " opening " + this.storeName +
+ " opening " + Bytes.toString(this.storeName) +
" -- continuing. Probably DATA LOSS!", e);
}
@@ -317,10 +318,10 @@
}
// Check this edit is for me. Also, guard against writing
// METACOLUMN info such as HBASE::CACHEFLUSH entries
- Text column = val.getColumn();
- if (column.equals(HLog.METACOLUMN)
- || !key.getRegionName().equals(info.getRegionName())
- || !HStoreKey.extractFamily(column).equals(family.getFamilyName())) {
+ byte [] column = val.getColumn();
+ if (Bytes.equals(column, HLog.METACOLUMN)
+ || !Bytes.equals(key.getRegionName(), info.getRegionName())
+ || !HStoreKey.matchingFamily(family.getName(), column)) {
continue;
}
HStoreKey k = new HStoreKey(key.getRow(), column, val.getTimestamp());
@@ -386,7 +387,7 @@
reference = readSplitInfo(p, fs);
}
curfile = new HStoreFile(conf, fs, basedir, info.getEncodedName(),
- family.getFamilyName(), fid, reference);
+ family.getName(), fid, reference);
storeSize += curfile.length();
long storeSeqId = -1;
try {
@@ -450,7 +451,7 @@
Filter bloomFilter = null;
if(fs.exists(filterFile)) {
if (LOG.isDebugEnabled()) {
- LOG.debug("loading bloom filter for " + this.storeName);
+ LOG.debug("loading bloom filter for " + this.storeNameStr);
}
BloomFilterDescriptor.BloomFilterType type =
@@ -482,7 +483,7 @@
}
} else {
if (LOG.isDebugEnabled()) {
- LOG.debug("creating bloom filter for " + this.storeName);
+ LOG.debug("creating bloom filter for " + this.storeNameStr);
}
BloomFilterDescriptor.BloomFilterType type =
@@ -517,7 +518,7 @@
*/
private void flushBloomFilter() throws IOException {
if (LOG.isDebugEnabled()) {
- LOG.debug("flushing bloom filter for " + this.storeName);
+ LOG.debug("flushing bloom filter for " + this.storeNameStr);
}
FSDataOutputStream out =
fs.create(new Path(filterDir, BLOOMFILTER_FILE_NAME));
@@ -527,7 +528,7 @@
out.close();
}
if (LOG.isDebugEnabled()) {
- LOG.debug("flushed bloom filter for " + this.storeName);
+ LOG.debug("flushed bloom filter for " + this.storeNameStr);
}
}
@@ -568,7 +569,7 @@
synchronized (this.storefiles) {
result = new ArrayList<HStoreFile>(storefiles.values());
}
- LOG.debug("closed " + this.storeName);
+ LOG.debug("closed " + this.storeNameStr);
return result;
} finally {
this.lock.writeLock().unlock();
@@ -621,7 +622,7 @@
long now = System.currentTimeMillis();
// A. Write the Maps out to the disk
HStoreFile flushedFile = new HStoreFile(conf, fs, basedir,
- info.getEncodedName(), family.getFamilyName(), -1L, null);
+ info.getEncodedName(), family.getName(), -1L, null);
MapFile.Writer out = flushedFile.getWriter(this.fs, this.compression,
this.bloomFilter);
@@ -641,8 +642,7 @@
for (Map.Entry<HStoreKey, byte []> es: cache.entrySet()) {
HStoreKey curkey = es.getKey();
byte[] bytes = es.getValue();
- TextSequence f = HStoreKey.extractFamily(curkey.getColumn());
- if (f.equals(this.family.getFamilyName())) {
+ if (HStoreKey.matchingFamily(this.family.getName(), curkey.getColumn())) {
if (ttl == HConstants.FOREVER ||
now < curkey.getTimestamp() + ttl) {
entries++;
@@ -775,11 +775,14 @@
* @return mid key if a split is needed, null otherwise
* @throws IOException
*/
- Text compact(final boolean force) throws IOException {
+ byte [] compact(final boolean force) throws IOException {
synchronized (compactLock) {
long maxId = -1;
List<HStoreFile> filesToCompact = null;
synchronized (storefiles) {
+ if (this.storefiles.size() <= 0) {
+ return null;
+ }
filesToCompact = new ArrayList<HStoreFile>(this.storefiles.values());
if (!force && !hasReferences(filesToCompact) &&
filesToCompact.size() < compactionThreshold) {
@@ -800,7 +803,7 @@
// Step through them, writing to the brand-new MapFile
HStoreFile compactedOutputFile = new HStoreFile(conf, fs,
- this.compactionDir, info.getEncodedName(), family.getFamilyName(),
+ this.compactionDir, info.getEncodedName(), family.getName(),
-1L, null);
if (LOG.isDebugEnabled()) {
LOG.debug("started compaction of " + filesToCompact.size() +
@@ -822,7 +825,7 @@
completeCompaction(filesToCompact, compactedOutputFile);
if (LOG.isDebugEnabled()) {
- LOG.debug("Completed compaction of " + this.storeName +
+ LOG.debug("Completed compaction of " + this.storeNameStr +
" store size is " + StringUtils.humanReadableInt(storeSize));
}
}
@@ -883,10 +886,10 @@
long now = System.currentTimeMillis();
int timesSeen = 0;
- Text lastRow = new Text();
- Text lastColumn = new Text();
+ byte [] lastRow = null;
+ byte [] lastColumn = null;
// Map of a row deletes keyed by column with a list of timestamps for value
- Map<Text, List<Long>> deletes = null;
+ Map<byte [], List<Long>> deletes = null;
while (numDone < done.length) {
// Find the reader with the smallest key. If two files have same key
// but different values -- i.e. one is delete and other is non-delete
@@ -909,13 +912,13 @@
// Reflect the current key/val in the output
HStoreKey sk = keys[smallestKey];
- if(lastRow.equals(sk.getRow())
- && lastColumn.equals(sk.getColumn())) {
+ if (Bytes.equals(lastRow, sk.getRow())
+ && Bytes.equals(lastColumn, sk.getColumn())) {
timesSeen++;
} else {
- timesSeen = 1;
+ timesSeen = 0;
// We are on to a new row. Create a new deletes list.
- deletes = new HashMap<Text, List<Long>>();
+ deletes = new TreeMap<byte [], List<Long>>(Bytes.BYTES_COMPARATOR);
}
byte [] value = (vals[smallestKey] == null)?
@@ -924,7 +927,7 @@
timesSeen <= family.getMaxVersions()) {
// Keep old versions until we have maxVersions worth.
// Then just skip them.
- if (sk.getRow().getLength() != 0 && sk.getColumn().getLength() != 0) {
+ if (sk.getRow().length != 0 && sk.getColumn().length != 0) {
// Only write out objects which have a non-zero length key and
// value
if (ttl == HConstants.FOREVER || now < sk.getTimestamp() + ttl) {
@@ -938,8 +941,8 @@
}
// Update last-seen items
- lastRow.set(sk.getRow());
- lastColumn.set(sk.getColumn());
+ lastRow = sk.getRow();
+ lastColumn = sk.getColumn();
// Advance the smallest key. If that reader's all finished, then
// mark it as done.
@@ -962,7 +965,7 @@
try {
rdrs[i].close();
} catch (IOException e) {
- LOG.warn("Exception closing reader for " + this.storeName, e);
+ LOG.warn("Exception closing reader for " + this.storeNameStr, e);
}
}
}
@@ -984,7 +987,7 @@
* passed value is HGlobals.deleteBytes.
*/
private boolean isDeleted(final HStoreKey hsk, final byte [] value,
- final boolean checkMemcache, final Map<Text, List<Long>> deletes) {
+ final boolean checkMemcache, final Map<byte [], List<Long>> deletes) {
if (checkMemcache && memcache.isDeleted(hsk)) {
return true;
}
@@ -1042,7 +1045,7 @@
try {
// 1. Moving the new MapFile into place.
HStoreFile finalCompactedFile = new HStoreFile(conf, fs, basedir,
- info.getEncodedName(), family.getFamilyName(), -1, null);
+ info.getEncodedName(), family.getName(), -1, null);
if (LOG.isDebugEnabled()) {
LOG.debug("moving " + FSUtils.getPath(compactedFile.getMapFilePath()) +
" to " + FSUtils.getPath(finalCompactedFile.getMapFilePath()));
@@ -1093,7 +1096,8 @@
}
} catch (IOException e) {
e = RemoteExceptionHandler.checkIOException(e);
- LOG.error("Failed replacing compacted files for " + this.storeName +
+ LOG.error("Failed replacing compacted files for " +
+ this.storeNameStr +
". Compacted file is " + finalCompactedFile.toString() +
". Files replaced are " + compactedFiles.toString() +
" some of which may have been already removed", e);
@@ -1120,10 +1124,12 @@
*
* The returned object should map column names to Cells.
*/
- void getFull(HStoreKey key, final Set<Text> columns, Map<Text, Cell> results)
+ void getFull(HStoreKey key, final Set<byte []> columns,
+ Map<byte [], Cell> results)
throws IOException {
- Map<Text, Long> deletes = new HashMap<Text, Long>();
-
+ Map<byte [], Long> deletes =
+ new TreeMap<byte [], Long>(Bytes.BYTES_COMPARATOR);
+
// if the key is null, we're not even looking for anything. return.
if (key == null) {
return;
@@ -1152,7 +1158,7 @@
}
private void getFullFromMapFile(MapFile.Reader map, HStoreKey key,
- Set<Text> columns, Map<Text, Long> deletes, Map<Text, Cell> results)
+ Set<byte []> columns, Map<byte [], Long> deletes, Map<byte [], Cell> results)
throws IOException {
synchronized(map) {
long now = System.currentTimeMillis();
@@ -1167,7 +1173,7 @@
return;
}
do {
- Text readcol = readkey.getColumn();
+ byte [] readcol = readkey.getColumn();
// if we're looking for this column (or all of them), and there isn't
// already a value for this column in the results map, and the key we
@@ -1182,18 +1188,18 @@
// recent delete timestamp, record it for later
if (!deletes.containsKey(readcol)
|| deletes.get(readcol).longValue() < readkey.getTimestamp()) {
- deletes.put(new Text(readcol), readkey.getTimestamp());
+ deletes.put(readcol, readkey.getTimestamp());
}
} else if (!(deletes.containsKey(readcol)
&& deletes.get(readcol).longValue() >= readkey.getTimestamp()) ) {
// So the cell itself isn't a delete, but there may be a delete
// pending from earlier in our search. Only record this result if
// there aren't any pending deletes.
- if (!(deletes.containsKey(readcol)
- && deletes.get(readcol).longValue() >= readkey.getTimestamp())) {
+ if (!(deletes.containsKey(readcol) &&
+ deletes.get(readcol).longValue() >= readkey.getTimestamp())) {
if (ttl == HConstants.FOREVER ||
now < readkey.getTimestamp() + ttl) {
- results.put(new Text(readcol),
+ results.put(readcol,
new Cell(readval.get(), readkey.getTimestamp()));
// need to reinstantiate the readval so we can reuse it,
// otherwise next iteration will destroy our result
@@ -1205,7 +1211,7 @@
}
}
}
- } else if(key.getRow().compareTo(readkey.getRow()) < 0) {
+ } else if (Bytes.compareTo(key.getRow(), readkey.getRow()) < 0) {
// if we've crossed into the next row, then we can just stop
// iterating
break;
@@ -1253,7 +1259,8 @@
// This List of deletes should not large since we are only keeping rows
// and columns that match those set on the scanner and which have delete
// values. If memory usage becomes an issue, could redo as bloom filter.
- Map<Text, List<Long>> deletes = new HashMap<Text, List<Long>>();
+ Map<byte [], List<Long>> deletes =
+ new TreeMap<byte [], List<Long>>(Bytes.BYTES_COMPARATOR);
// This code below is very close to the body of the getKeys method.
MapFile.Reader[] maparray = getReaders();
for(int i = maparray.length - 1; i >= 0; i--) {
@@ -1337,8 +1344,7 @@
* @throws IOException
*/
List<HStoreKey> getKeys(final HStoreKey origin, final int versions)
- throws IOException {
-
+ throws IOException {
List<HStoreKey> keys = this.memcache.getKeys(origin, versions);
if (versions != ALL_VERSIONS && keys.size() >= versions) {
return keys;
@@ -1414,13 +1420,13 @@
* with stricly increasing timestamps. This method assumes this pattern of
* writes in order to make it reasonably performant.
*/
- Text getRowKeyAtOrBefore(final Text row)
+ byte [] getRowKeyAtOrBefore(final byte [] row)
throws IOException{
// Map of HStoreKeys that are candidates for holding the row key that
// most closely matches what we're looking for. We'll have to update it
// deletes found all over the place as we go along before finally reading
// the best key out of it at the end.
- SortedMap<HStoreKey, Long> candidateKeys = new TreeMap<HStoreKey, Long>();
+ SortedMap<HStoreKey, Long> candidateKeys = new TreeMap<HStoreKey, Long>();
// Obtain read lock
this.lock.readLock().lock();
@@ -1446,14 +1452,13 @@
* Check an individual MapFile for the row at or before a given key
* and timestamp
*/
- private void rowAtOrBeforeFromMapFile(MapFile.Reader map, Text row,
+ private void rowAtOrBeforeFromMapFile(MapFile.Reader map, final byte [] row,
SortedMap<HStoreKey, Long> candidateKeys)
throws IOException {
HStoreKey searchKey = null;
ImmutableBytesWritable readval = new ImmutableBytesWritable();
HStoreKey readkey = new HStoreKey();
HStoreKey strippedKey = null;
-
synchronized(map) {
// don't bother with the rest of this if the file is empty
map.reset();
@@ -1462,7 +1467,6 @@
}
long now = System.currentTimeMillis();
-
// if there aren't any candidate keys yet, we'll do some things slightly
// different
if (candidateKeys.isEmpty()) {
@@ -1472,7 +1476,7 @@
// save time and add the last key to the candidates.
HStoreKey finalKey = new HStoreKey();
map.finalKey(finalKey);
- if (finalKey.getRow().compareTo(row) < 0) {
+ if (Bytes.compareTo(finalKey.getRow(), row) < 0) {
candidateKeys.put(stripTimestamp(finalKey),
new Long(finalKey.getTimestamp()));
return;
@@ -1489,7 +1493,7 @@
do {
// if we have an exact match on row, and it's not a delete, save this
// as a candidate key
- if (readkey.getRow().equals(row)) {
+ if (Bytes.equals(readkey.getRow(), row)) {
if (!HLogEdit.isDeleted(readval.get())) {
if (ttl == HConstants.FOREVER ||
now < readkey.getTimestamp() + ttl) {
@@ -1502,7 +1506,7 @@
}
}
}
- } else if (readkey.getRow().compareTo(row) > 0 ) {
+ } else if (Bytes.compareTo(readkey.getRow(), row) > 0 ) {
// if the row key we just read is beyond the key we're searching for,
// then we're done. return.
return;
@@ -1538,7 +1542,7 @@
// save time and add the last key to the candidates.
HStoreKey finalKey = new HStoreKey();
map.finalKey(finalKey);
- if (finalKey.getRow().compareTo(searchKey.getRow()) < 0) {
+ if (Bytes.compareTo(finalKey.getRow(), searchKey.getRow()) < 0) {
strippedKey = stripTimestamp(finalKey);
// if the candidate keys has a cell like this one already,
@@ -1568,7 +1572,7 @@
do {
// if we have an exact match on row, and it's not a delete, save this
// as a candidate key
- if (readkey.getRow().equals(row)) {
+ if (Bytes.equals(readkey.getRow(), row)) {
strippedKey = stripTimestamp(readkey);
if (!HLogEdit.isDeleted(readval.get())) {
if (ttl == HConstants.FOREVER ||
@@ -1593,7 +1597,7 @@
}
}
}
- } else if (readkey.getRow().compareTo(row) > 0 ) {
+ } else if (Bytes.compareTo(readkey.getRow(), row) > 0 ) {
// if the row key we just read is beyond the key we're searching for,
// then we're done. return.
return;
@@ -1644,9 +1648,9 @@
*/
private boolean cellMatches(HStoreKey origin, HStoreKey target){
// if the origin's column is empty, then we're matching any column
- if (origin.getColumn().equals(new Text())){
+ if (Bytes.equals(origin.getColumn(), HConstants.EMPTY_BYTE_ARRAY)){
// if the row matches, then...
- if (target.getRow().equals(origin.getRow())) {
+ if (Bytes.equals(target.getRow(), origin.getRow())) {
// check the timestamp
return target.getTimestamp() <= origin.getTimestamp();
}
@@ -1665,9 +1669,9 @@
*/
private boolean rowMatches(HStoreKey origin, HStoreKey target){
// if the origin's column is empty, then we're matching any column
- if (origin.getColumn().equals(new Text())){
+ if (Bytes.equals(origin.getColumn(), HConstants.EMPTY_BYTE_ARRAY)) {
// if the row matches, then...
- return target.getRow().equals(origin.getRow());
+ return Bytes.equals(target.getRow(), origin.getRow());
}
// otherwise, we want to match on row and column
return target.matchesRowCol(origin);
@@ -1678,7 +1682,7 @@
*
* @return midKey if store can be split, null otherwise
*/
- Text checkSplit() {
+ byte [] checkSplit() {
if (this.storefiles.size() <= 0) {
return null;
}
@@ -1726,14 +1730,14 @@
if (mk != null) {
// if the midkey is the same as the first and last keys, then we cannot
// (ever) split this region.
- if (mk.getRow().equals(firstKey.getRow()) &&
- mk.getRow().equals(lastKey.getRow())) {
+ if (Bytes.equals(mk.getRow(), firstKey.getRow()) &&
+ Bytes.equals(mk.getRow(), lastKey.getRow())) {
return null;
}
return mk.getRow();
}
} catch(IOException e) {
- LOG.warn("Failed getting store size for " + this.storeName, e);
+ LOG.warn("Failed getting store size for " + this.storeNameStr, e);
} finally {
this.lock.readLock().unlock();
}
@@ -1752,8 +1756,8 @@
/**
* Return a scanner for both the memcache and the HStore files
*/
- InternalScanner getScanner(long timestamp, Text targetCols[],
- Text firstRow, RowFilterInterface filter)
+ InternalScanner getScanner(long timestamp, byte [][] targetCols,
+ byte [] firstRow, RowFilterInterface filter)
throws IOException {
lock.readLock().lock();
try {
@@ -1766,7 +1770,7 @@
/** {@inheritDoc} */
@Override
public String toString() {
- return this.storeName.toString();
+ return this.storeNameStr;
}
/*
Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java Thu May 15 15:10:47 2008
@@ -39,10 +39,10 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.io.BlockFSInputStream;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.io.MapFile;
import org.apache.hadoop.io.SequenceFile;
-import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.onelab.filter.Filter;
@@ -50,6 +50,7 @@
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HStoreKey;
/**
@@ -113,8 +114,8 @@
private final static Random rand = new Random();
private final Path basedir;
- private final String encodedRegionName;
- private final Text colFamily;
+ private final int encodedRegionName;
+ private final byte [] colFamily;
private final long fileId;
private final HBaseConfiguration conf;
private final FileSystem fs;
@@ -131,13 +132,13 @@
* @throws IOException
*/
HStoreFile(HBaseConfiguration conf, FileSystem fs, Path basedir,
- String encodedRegionName, Text colFamily, long fileId,
+ int encodedRegionName, byte [] colFamily, long fileId,
final Reference ref) throws IOException {
this.conf = conf;
this.fs = fs;
this.basedir = basedir;
this.encodedRegionName = encodedRegionName;
- this.colFamily = new Text(colFamily);
+ this.colFamily = colFamily;
long id = fileId;
if (id == -1) {
@@ -145,7 +146,7 @@
Path testpath = null;
do {
id = Math.abs(rand.nextLong());
- testpath = new Path(mapdir, createHStoreFilename(id, null));
+ testpath = new Path(mapdir, createHStoreFilename(id, -1));
} while(fs.exists(testpath));
}
this.fileId = id;
@@ -165,12 +166,12 @@
return reference;
}
- String getEncodedRegionName() {
- return encodedRegionName;
+ int getEncodedRegionName() {
+ return this.encodedRegionName;
}
/** @return the column family */
- Text getColFamily() {
+ byte [] getColFamily() {
return colFamily;
}
@@ -187,18 +188,22 @@
return getMapFilePath(encodedRegionName, fileId,
reference.getEncodedRegionName());
}
- return getMapFilePath(encodedRegionName, fileId, null);
+ return getMapFilePath(this.encodedRegionName, fileId);
}
private Path getMapFilePath(final Reference r) {
if (r == null) {
return getMapFilePath();
}
- return getMapFilePath(r.getEncodedRegionName(), r.getFileId(), null);
+ return getMapFilePath(r.getEncodedRegionName(), r.getFileId());
}
- private Path getMapFilePath(final String encodedName, final long fid,
- final String ern) {
+ private Path getMapFilePath(final int encodedName, final long fid) {
+ return getMapFilePath(encodedName, fid, HRegionInfo.NO_HASH);
+ }
+
+ private Path getMapFilePath(final int encodedName, final long fid,
+ final int ern) {
return new Path(HStoreFile.getMapDir(basedir, encodedName, colFamily),
createHStoreFilename(fid, ern));
}
@@ -210,11 +215,15 @@
reference.getEncodedRegionName());
}
- return getInfoFilePath(encodedRegionName, fileId, null);
+ return getInfoFilePath(encodedRegionName, fileId);
+ }
+
+ private Path getInfoFilePath(final int encodedName, final long fid) {
+ return getInfoFilePath(encodedName, fid, HRegionInfo.NO_HASH);
}
- private Path getInfoFilePath(final String encodedName, final long fid,
- final String ern) {
+ private Path getInfoFilePath(final int encodedName, final long fid,
+ final int ern) {
return new Path(HStoreFile.getInfoDir(basedir, encodedName, colFamily),
createHStoreFilename(fid, ern));
}
@@ -293,8 +302,7 @@
long loadInfo(FileSystem fs) throws IOException {
Path p = null;
if (isReference()) {
- p = getInfoFilePath(reference.getEncodedRegionName(),
- reference.getFileId(), null);
+ p = getInfoFilePath(reference.getEncodedRegionName(), reference.getFileId());
} else {
p = getInfoFilePath();
}
@@ -400,7 +408,6 @@
public synchronized MapFile.Reader getReader(final FileSystem fs,
final Filter bloomFilter, final boolean blockCacheEnabled)
throws IOException {
-
if (isReference()) {
return new HStoreFile.HalfMapFileReader(fs,
getMapFilePath(reference).toString(), conf,
@@ -475,55 +482,72 @@
return r.equals(Range.top);
}
+ private static String createHStoreFilename(final long fid) {
+ return createHStoreFilename(fid, HRegionInfo.NO_HASH);
+ }
+
private static String createHStoreFilename(final long fid,
- final String encodedRegionName) {
- return Long.toString(fid) +
- ((encodedRegionName != null) ? "." + encodedRegionName : "");
+ final int encodedRegionName) {
+ return Long.toString(fid) +
+ ((encodedRegionName != HRegionInfo.NO_HASH)?
+ "." + encodedRegionName : "");
}
-
+
/**
- * @param dir
- * @param encodedRegionName
- * @param colFamily
+ * @param dir Base directory
+ * @param encodedRegionName Encoding of region name.
+ * @param f Column family.
* @return path for map file directory
*/
- public static Path getMapDir(Path dir, String encodedRegionName, Text colFamily) {
- return new Path(dir, new Path(encodedRegionName,
- new Path(colFamily.toString(), HSTORE_DATFILE_DIR)));
+ public static Path getMapDir(Path dir, int encodedRegionName,
+ final byte [] f) {
+ return getFamilySubDir(dir, encodedRegionName, f, HSTORE_DATFILE_DIR);
}
/**
- * @param dir
- * @param encodedRegionName
- * @param colFamily
+ * @param dir Base directory
+ * @param encodedRegionName Encoding of region name.
+ * @param f Column family.
* @return the info directory path
*/
- public static Path getInfoDir(Path dir, String encodedRegionName, Text colFamily) {
- return new Path(dir, new Path(encodedRegionName,
- new Path(colFamily.toString(), HSTORE_INFO_DIR)));
+ public static Path getInfoDir(Path dir, int encodedRegionName, byte [] f) {
+ return getFamilySubDir(dir, encodedRegionName, f, HSTORE_INFO_DIR);
}
/**
- * @param dir
- * @param encodedRegionName
- * @param colFamily
+ * @param dir Base directory
+ * @param encodedRegionName Encoding of region name.
+ * @param f Column family.
* @return the bloom filter directory path
*/
- public static Path getFilterDir(Path dir, String encodedRegionName, Text colFamily) {
- return new Path(dir, new Path(encodedRegionName,
- new Path(colFamily.toString(), HSTORE_FILTER_DIR)));
+ public static Path getFilterDir(Path dir, int encodedRegionName,
+ final byte [] f) {
+ return getFamilySubDir(dir, encodedRegionName, f, HSTORE_FILTER_DIR);
+ }
+
+ /*
+ * @param base Base directory
+ * @param encodedRegionName Encoding of region name.
+ * @param f Column family.
+ * @param subdir Subdirectory to create under column family/store directory.
+ * @return
+ */
+ private static Path getFamilySubDir(final Path base,
+ final int encodedRegionName, final byte [] f, final String subdir) {
+ return new Path(base, new Path(Integer.toString(encodedRegionName),
+ new Path(Bytes.toString(f), subdir)));
}
/*
* Data structure to hold reference to a store file over in another region.
*/
static class Reference implements Writable {
- private String encodedRegionName;
+ private int encodedRegionName;
private long fileid;
private Range region;
private HStoreKey midkey;
- Reference(final String ern, final long fid, final HStoreKey m,
+ Reference(final int ern, final long fid, final HStoreKey m,
final Range fr) {
this.encodedRegionName = ern;
this.fileid = fid;
@@ -532,7 +556,7 @@
}
Reference() {
- this(null, -1, null, Range.bottom);
+ this(-1, -1, null, Range.bottom);
}
long getFileId() {
@@ -547,8 +571,8 @@
return midkey;
}
- String getEncodedRegionName() {
- return encodedRegionName;
+ int getEncodedRegionName() {
+ return this.encodedRegionName;
}
/** {@inheritDoc} */
@@ -561,7 +585,7 @@
/** {@inheritDoc} */
public void write(DataOutput out) throws IOException {
- out.writeUTF(encodedRegionName);
+ out.writeInt(this.encodedRegionName);
out.writeLong(fileid);
// Write true if we're doing top of the file.
out.writeBoolean(isTopFileRegion(region));
@@ -570,7 +594,7 @@
/** {@inheritDoc} */
public void readFields(DataInput in) throws IOException {
- encodedRegionName = in.readUTF();
+ this.encodedRegionName = in.readInt();
fileid = in.readLong();
boolean tmp = in.readBoolean();
// If true, set region to top.
Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStoreScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStoreScanner.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStoreScanner.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HStoreScanner.java Thu May 15 15:10:47 2008
@@ -21,19 +21,19 @@
package org.apache.hadoop.hbase.regionserver;
import java.io.IOException;
-import java.util.TreeMap;
-import java.util.SortedMap;
+import java.util.ArrayList;
import java.util.Iterator;
-import java.util.Map;
import java.util.List;
-import java.util.ArrayList;
+import java.util.Map;
+import java.util.SortedMap;
+import java.util.TreeMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.io.Text;
+import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HStoreKey;
import org.apache.hadoop.hbase.filter.RowFilterInterface;
-import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.util.Bytes;
/**
* Scanner scans both the memcache and the HStore
@@ -42,7 +42,7 @@
static final Log LOG = LogFactory.getLog(HStoreScanner.class);
private InternalScanner[] scanners;
- private TreeMap<Text, byte []>[] resultSets;
+ private TreeMap<byte [], byte []>[] resultSets;
private HStoreKey[] keys;
private boolean wildcardMatch = false;
private boolean multipleMatchers = false;
@@ -51,8 +51,8 @@
/** Create an Scanner with a handle on the memcache and HStore files. */
@SuppressWarnings("unchecked")
- HStoreScanner(HStore store, Text[] targetCols, Text firstRow, long timestamp,
- RowFilterInterface filter)
+ HStoreScanner(HStore store, byte [][] targetCols, byte [] firstRow,
+ long timestamp, RowFilterInterface filter)
throws IOException {
this.store = store;
this.dataFilter = filter;
@@ -87,7 +87,7 @@
// All results will match the required column-set and scanTime.
for (int i = 0; i < scanners.length; i++) {
keys[i] = new HStoreKey();
- resultSets[i] = new TreeMap<Text, byte []>();
+ resultSets[i] = new TreeMap<byte [], byte []>(Bytes.BYTES_COMPARATOR);
if(scanners[i] != null && !scanners[i].next(keys[i], resultSets[i])) {
closeScanner(i);
}
@@ -105,7 +105,7 @@
}
/** {@inheritDoc} */
- public boolean next(HStoreKey key, SortedMap<Text, byte[]> results)
+ public boolean next(HStoreKey key, SortedMap<byte [], byte[]> results)
throws IOException {
// Filtered flag is set by filters. If a cell has been 'filtered out'
@@ -114,15 +114,15 @@
boolean moreToFollow = true;
while (filtered && moreToFollow) {
// Find the lowest-possible key.
- Text chosenRow = null;
+ byte [] chosenRow = null;
long chosenTimestamp = -1;
for (int i = 0; i < this.keys.length; i++) {
if (scanners[i] != null &&
(chosenRow == null ||
- (keys[i].getRow().compareTo(chosenRow) < 0) ||
- ((keys[i].getRow().compareTo(chosenRow) == 0) &&
+ (Bytes.compareTo(keys[i].getRow(), chosenRow) < 0) ||
+ ((Bytes.compareTo(keys[i].getRow(), chosenRow) == 0) &&
(keys[i].getTimestamp() > chosenTimestamp)))) {
- chosenRow = new Text(keys[i].getRow());
+ chosenRow = keys[i].getRow();
chosenTimestamp = keys[i].getTimestamp();
}
}
@@ -136,7 +136,7 @@
// Here we are setting the passed in key with current row+timestamp
key.setRow(chosenRow);
key.setVersion(chosenTimestamp);
- key.setColumn(HConstants.EMPTY_TEXT);
+ key.setColumn(HConstants.EMPTY_BYTE_ARRAY);
// Keep list of deleted cell keys within this row. We need this
// because as we go through scanners, the delete record may be in an
// early scanner and then the same record with a non-delete, non-null
@@ -150,7 +150,7 @@
while ((scanners[i] != null
&& !filtered
&& moreToFollow)
- && (keys[i].getRow().compareTo(chosenRow) == 0)) {
+ && (Bytes.compareTo(keys[i].getRow(), chosenRow) == 0)) {
// If we are doing a wild card match or there are multiple
// matchers per column, we need to scan all the older versions of
// this row to pick up the rest of the family members
@@ -164,9 +164,9 @@
// but this had the effect of overwriting newer
// values with older ones. So now we only insert
// a result if the map does not contain the key.
- HStoreKey hsk = new HStoreKey(key.getRow(), HConstants.EMPTY_TEXT,
+ HStoreKey hsk = new HStoreKey(key.getRow(), HConstants.EMPTY_BYTE_ARRAY,
key.getTimestamp());
- for (Map.Entry<Text, byte[]> e : resultSets[i].entrySet()) {
+ for (Map.Entry<byte [], byte[]> e : resultSets[i].entrySet()) {
hsk.setColumn(e.getKey());
if (HLogEdit.isDeleted(e.getValue())) {
if (!deletes.contains(hsk)) {
@@ -202,7 +202,7 @@
// If the current scanner is non-null AND has a lower-or-equal
// row label, then its timestamp is bad. We need to advance it.
while ((scanners[i] != null) &&
- (keys[i].getRow().compareTo(chosenRow) <= 0)) {
+ (Bytes.compareTo(keys[i].getRow(), chosenRow) <= 0)) {
resultSets[i].clear();
if (!scanners[i].next(keys[i], resultSets[i])) {
closeScanner(i);
@@ -266,7 +266,7 @@
}
}
- public Iterator<Map.Entry<HStoreKey, SortedMap<Text, byte[]>>> iterator() {
+ public Iterator<Map.Entry<HStoreKey, SortedMap<byte [], byte[]>>> iterator() {
throw new UnsupportedOperationException("Unimplemented serverside. " +
"next(HStoreKey, StortedMap(...) is more efficient");
}
Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java Thu May 15 15:10:47 2008
@@ -22,7 +22,6 @@
import java.io.Closeable;
import java.io.IOException;
import java.util.SortedMap;
-import org.apache.hadoop.io.Text;
import org.apache.hadoop.hbase.HStoreKey;
/**
@@ -50,7 +49,7 @@
* @return true if data was returned
* @throws IOException
*/
- public boolean next(HStoreKey key, SortedMap<Text, byte[]> results)
+ public boolean next(HStoreKey key, SortedMap<byte [], byte[]> results)
throws IOException;
/**
Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java Thu May 15 15:10:47 2008
@@ -24,14 +24,13 @@
import java.rmi.UnexpectedException;
import java.util.ArrayList;
import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
+import java.util.TreeSet;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.commons.logging.Log;
@@ -39,8 +38,7 @@
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HStoreKey;
import org.apache.hadoop.hbase.io.Cell;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.WritableComparable;
+import org.apache.hadoop.hbase.util.Bytes;
/**
@@ -200,25 +198,25 @@
* @return Return lowest of a or b or null if both a and b are null
*/
@SuppressWarnings("unchecked")
- private WritableComparable getLowest(final WritableComparable a,
- final WritableComparable b) {
+ private byte [] getLowest(final byte [] a,
+ final byte [] b) {
if (a == null) {
return b;
}
if (b == null) {
return a;
}
- return a.compareTo(b) <= 0? a: b;
+ return Bytes.compareTo(a, b) <= 0? a: b;
}
/**
* @param row Find the row that comes after this one.
* @return Next row or null if none found
*/
- Text getNextRow(final Text row) {
+ byte [] getNextRow(final byte [] row) {
this.lock.readLock().lock();
try {
- return (Text)getLowest(getNextRow(row, this.memcache),
+ return getLowest(getNextRow(row, this.memcache),
getNextRow(row, this.snapshot));
} finally {
this.lock.readLock().unlock();
@@ -231,9 +229,9 @@
* This method synchronizes on passed map while iterating it.
* @return Next row or null if none found.
*/
- private Text getNextRow(final Text row,
+ private byte [] getNextRow(final byte [] row,
final SortedMap<HStoreKey, byte []> map) {
- Text result = null;
+ byte [] result = null;
// Synchronize on the map to make the tailMap making 'safe'.
synchronized (map) {
// Make an HSK with maximum timestamp so we get past most of the current
@@ -243,7 +241,7 @@
// Iterate until we fall into the next row; i.e. move off current row
for (Map.Entry<HStoreKey, byte []> es: tailMap.entrySet()) {
HStoreKey itKey = es.getKey();
- if (itKey.getRow().compareTo(row) <= 0) {
+ if (Bytes.compareTo(itKey.getRow(), row) <= 0) {
continue;
}
// Note: Not suppressing deletes or expired cells.
@@ -262,8 +260,8 @@
* @param deletes Map to accumulate deletes found.
* @param results Where to stick row results found.
*/
- void getFull(HStoreKey key, Set<Text> columns, Map<Text, Long> deletes,
- Map<Text, Cell> results) {
+ void getFull(HStoreKey key, Set<byte []> columns, Map<byte [], Long> deletes,
+ Map<byte [], Cell> results) {
this.lock.readLock().lock();
try {
// The synchronizations here are because internalGet iterates
@@ -279,34 +277,31 @@
}
private void internalGetFull(SortedMap<HStoreKey, byte[]> map, HStoreKey key,
- Set<Text> columns, Map<Text, Long> deletes, Map<Text, Cell> results) {
-
+ Set<byte []> columns, Map<byte [], Long> deletes,
+ Map<byte [], Cell> results) {
if (map.isEmpty() || key == null) {
return;
}
-
List<HStoreKey> victims = new ArrayList<HStoreKey>();
SortedMap<HStoreKey, byte[]> tailMap = map.tailMap(key);
long now = System.currentTimeMillis();
for (Map.Entry<HStoreKey, byte []> es: tailMap.entrySet()) {
HStoreKey itKey = es.getKey();
- Text itCol = itKey.getColumn();
+ byte [] itCol = itKey.getColumn();
if (results.get(itCol) == null && key.matchesWithoutColumn(itKey)) {
- byte [] val = tailMap.get(itKey);
-
if (columns == null || columns.contains(itKey.getColumn())) {
+ byte [] val = tailMap.get(itKey);
if (HLogEdit.isDeleted(val)) {
if (!deletes.containsKey(itCol)
|| deletes.get(itCol).longValue() < itKey.getTimestamp()) {
- deletes.put(new Text(itCol), Long.valueOf(itKey.getTimestamp()));
+ deletes.put(itCol, Long.valueOf(itKey.getTimestamp()));
}
} else if (!(deletes.containsKey(itCol)
&& deletes.get(itCol).longValue() >= itKey.getTimestamp())) {
// Skip expired cells
if (ttl == HConstants.FOREVER ||
now < itKey.getTimestamp() + ttl) {
- results.put(new Text(itCol),
- new Cell(val, itKey.getTimestamp()));
+ results.put(itCol, new Cell(val, itKey.getTimestamp()));
} else {
victims.add(itKey);
if (LOG.isDebugEnabled()) {
@@ -315,7 +310,7 @@
}
}
}
- } else if (key.getRow().compareTo(itKey.getRow()) < 0) {
+ } else if (Bytes.compareTo(key.getRow(), itKey.getRow()) < 0) {
break;
}
}
@@ -329,7 +324,7 @@
* @param candidateKeys Map of candidate keys (Accumulation over lots of
* lookup over stores and memcaches)
*/
- void getRowKeyAtOrBefore(final Text row,
+ void getRowKeyAtOrBefore(final byte [] row,
SortedMap<HStoreKey, Long> candidateKeys) {
this.lock.readLock().lock();
try {
@@ -345,14 +340,12 @@
}
private void internalGetRowKeyAtOrBefore(SortedMap<HStoreKey, byte []> map,
- Text key, SortedMap<HStoreKey, Long> candidateKeys) {
-
+ byte [] key, SortedMap<HStoreKey, Long> candidateKeys) {
HStoreKey strippedKey = null;
// we want the earliest possible to start searching from
HStoreKey search_key = candidateKeys.isEmpty() ?
new HStoreKey(key) : new HStoreKey(candidateKeys.firstKey().getRow());
-
Iterator<HStoreKey> key_iterator = null;
HStoreKey found_key = null;
ArrayList<HStoreKey> victims = new ArrayList<HStoreKey>();
@@ -363,14 +356,15 @@
// if there are items in the tail map, there's either a direct match to
// the search key, or a range of values between the first candidate key
// and the ultimate search key (or the end of the cache)
- if (!tailMap.isEmpty() && tailMap.firstKey().getRow().compareTo(key) <= 0) {
+ if (!tailMap.isEmpty() &&
+ Bytes.compareTo(tailMap.firstKey().getRow(), key) <= 0) {
key_iterator = tailMap.keySet().iterator();
// keep looking at cells as long as they are no greater than the
// ultimate search key and there's still records left in the map.
do {
found_key = key_iterator.next();
- if (found_key.getRow().compareTo(key) <= 0) {
+ if (Bytes.compareTo(found_key.getRow(), key) <= 0) {
strippedKey = stripTimestamp(found_key);
if (HLogEdit.isDeleted(tailMap.get(found_key))) {
if (candidateKeys.containsKey(strippedKey)) {
@@ -393,7 +387,7 @@
}
}
}
- } while (found_key.getRow().compareTo(key) <= 0
+ } while (Bytes.compareTo(found_key.getRow(), key) <= 0
&& key_iterator.hasNext());
} else {
// the tail didn't contain any keys that matched our criteria, or was
@@ -412,13 +406,14 @@
HStoreKey[] cells =
headMap.keySet().toArray(new HStoreKey[headMap.keySet().size()]);
- Text lastRowFound = null;
+ byte [] lastRowFound = null;
for(int i = cells.length - 1; i >= 0; i--) {
HStoreKey thisKey = cells[i];
// if the last row we found a candidate key for is different than
// the row of the current candidate, we can stop looking.
- if (lastRowFound != null && !lastRowFound.equals(thisKey.getRow())) {
+ if (lastRowFound != null &&
+ !Bytes.equals(lastRowFound, thisKey.getRow())) {
break;
}
@@ -587,10 +582,10 @@
HStoreKey key = es.getKey();
// if there's no column name, then compare rows and timestamps
- if (origin.getColumn().toString().equals("")) {
+ if (origin.getColumn().length == 0) {
// if the current and origin row don't match, then we can jump
// out of the loop entirely.
- if (!key.getRow().equals(origin.getRow())) {
+ if (!Bytes.equals(key.getRow(), origin.getRow())) {
break;
}
// if the rows match but the timestamp is newer, skip it so we can
@@ -644,7 +639,7 @@
* @return a scanner over the keys in the Memcache
*/
InternalScanner getScanner(long timestamp,
- Text targetCols[], Text firstRow)
+ final byte [][] targetCols, final byte [] firstRow)
throws IOException {
this.lock.readLock().lock();
try {
@@ -660,11 +655,11 @@
//////////////////////////////////////////////////////////////////////////////
private class MemcacheScanner extends HAbstractScanner {
- private Text currentRow;
- private Set<Text> columns = null;
+ private byte [] currentRow;
+ private Set<byte []> columns = null;
- MemcacheScanner(final long timestamp, final Text targetCols[],
- final Text firstRow)
+ MemcacheScanner(final long timestamp, final byte [] targetCols[],
+ final byte [] firstRow)
throws IOException {
// Call to super will create ColumnMatchers and whether this is a regex
// scanner or not. Will also save away timestamp. Also sorts rows.
@@ -675,7 +670,7 @@
// columns.
this.columns = null;
if (!isWildcardScanner()) {
- this.columns = new HashSet<Text>();
+ this.columns = new TreeSet<byte []>(Bytes.BYTES_COMPARATOR);
for (int i = 0; i < targetCols.length; i++) {
this.columns.add(targetCols[i]);
}
@@ -684,15 +679,19 @@
/** {@inheritDoc} */
@Override
- public boolean next(HStoreKey key, SortedMap<Text, byte []> results)
+ public boolean next(HStoreKey key, SortedMap<byte [], byte []> results)
throws IOException {
if (this.scannerClosed) {
return false;
}
- Map<Text, Long> deletes = new HashMap<Text, Long>();
+ // This is a treemap rather than a Hashmap because then I can have a
+ // byte array as key -- because I can independently specify a comparator.
+ Map<byte [], Long> deletes =
+ new TreeMap<byte [], Long>(Bytes.BYTES_COMPARATOR);
// Catch all row results in here. These results are ten filtered to
// ensure they match column name regexes, or if none, added to results.
- Map<Text, Cell> rowResults = new HashMap<Text, Cell>();
+ Map<byte [], Cell> rowResults =
+ new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
if (results.size() > 0) {
results.clear();
}
@@ -708,12 +707,12 @@
key.setVersion(this.timestamp);
getFull(key, isWildcardScanner() ? null : this.columns, deletes,
rowResults);
- for (Map.Entry<Text, Long> e: deletes.entrySet()) {
+ for (Map.Entry<byte [], Long> e: deletes.entrySet()) {
rowResults.put(e.getKey(),
- new Cell(HLogEdit.deleteBytes.get(), e.getValue()));
+ new Cell(HLogEdit.deleteBytes.get(), e.getValue().longValue()));
}
- for (Map.Entry<Text, Cell> e: rowResults.entrySet()) {
- Text column = e.getKey();
+ for (Map.Entry<byte [], Cell> e: rowResults.entrySet()) {
+ byte [] column = e.getKey();
Cell c = e.getValue();
if (isWildcardScanner()) {
// Check the results match. We only check columns, not timestamps.
@@ -736,4 +735,4 @@
}
}
}
-}
+}
\ No newline at end of file
Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/RegionUnavailableListener.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/RegionUnavailableListener.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/RegionUnavailableListener.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/RegionUnavailableListener.java Thu May 15 15:10:47 2008
@@ -19,7 +19,6 @@
*/
package org.apache.hadoop.hbase.regionserver;
-import org.apache.hadoop.io.Text;
/**
* Used as a callback mechanism so that an HRegion can notify the HRegionServer
@@ -33,12 +32,12 @@
* outstanding transactions.
* @param regionName
*/
- public void closing(final Text regionName);
+ public void closing(final byte [] regionName);
/**
* <code>regionName</code> is closed and no longer available.
* Listener should clean up any references to <code>regionName</code>
* @param regionName
*/
- public void closed(final Text regionName);
+ public void closed(final byte [] regionName);
}
Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java Thu May 15 15:10:47 2008
@@ -27,8 +27,8 @@
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HStoreKey;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.MapFile;
-import org.apache.hadoop.io.Text;
/**
* A scanner that iterates through HStore files
@@ -50,7 +50,7 @@
private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
public StoreFileScanner(final HStore store, final long timestamp,
- final Text[] targetCols, final Text firstRow)
+ final byte [][] targetCols, final byte [] firstRow)
throws IOException {
super(timestamp, targetCols);
this.store = store;
@@ -71,7 +71,7 @@
* @param firstRow
* @throws IOException
*/
- private void openReaders(final Text firstRow) throws IOException {
+ private void openReaders(final byte [] firstRow) throws IOException {
if (this.readers != null) {
for (int i = 0; i < this.readers.length; i++) {
this.readers[i].close();
@@ -92,7 +92,7 @@
// Advance the readers to the first pos.
for (i = 0; i < readers.length; i++) {
keys[i] = new HStoreKey();
- if (firstRow.getLength() != 0) {
+ if (firstRow != null && firstRow.length != 0) {
if (findFirstRow(i, firstRow)) {
continue;
}
@@ -130,7 +130,7 @@
* @see org.apache.hadoop.hbase.regionserver.InternalScanner#next(org.apache.hadoop.hbase.HStoreKey, java.util.SortedMap)
*/
@Override
- public boolean next(HStoreKey key, SortedMap<Text, byte []> results)
+ public boolean next(HStoreKey key, SortedMap<byte [], byte []> results)
throws IOException {
if (this.scannerClosed) {
return false;
@@ -145,12 +145,11 @@
if (viableRow.getRow() != null) {
key.setRow(viableRow.getRow());
key.setVersion(viableRow.getTimestamp());
- key.setColumn(new Text(""));
for (int i = 0; i < keys.length; i++) {
// Fetch the data
while ((keys[i] != null)
- && (keys[i].getRow().compareTo(viableRow.getRow()) == 0)) {
+ && (Bytes.compareTo(keys[i].getRow(), viableRow.getRow()) == 0)) {
// If we are doing a wild card match or there are multiple matchers
// per column, we need to scan all the older versions of this row
@@ -164,7 +163,7 @@
if(columnMatch(i)) {
// We only want the first result for any specific family member
if(!results.containsKey(keys[i].getColumn())) {
- results.put(new Text(keys[i].getColumn()), vals[i]);
+ results.put(keys[i].getColumn(), vals[i]);
insertedItem = true;
}
}
@@ -177,7 +176,7 @@
// Advance the current scanner beyond the chosen row, to
// a valid timestamp, so we're ready next time.
while ((keys[i] != null)
- && ((keys[i].getRow().compareTo(viableRow.getRow()) <= 0)
+ && ((Bytes.compareTo(keys[i].getRow(), viableRow.getRow()) <= 0)
|| (keys[i].getTimestamp() > this.timestamp)
|| (! columnMatch(i)))) {
getNext(i);
@@ -192,19 +191,19 @@
// Data stucture to hold next, viable row (and timestamp).
class ViableRow {
- private final Text row;
+ private final byte [] row;
private final long ts;
- ViableRow(final Text r, final long t) {
+ ViableRow(final byte [] r, final long t) {
this.row = r;
this.ts = t;
}
- public Text getRow() {
+ byte [] getRow() {
return this.row;
}
- public long getTimestamp() {
+ long getTimestamp() {
return this.ts;
}
}
@@ -215,7 +214,7 @@
*/
private ViableRow getNextViableRow() throws IOException {
// Find the next viable row label (and timestamp).
- Text viableRow = null;
+ byte [] viableRow = null;
long viableTimestamp = -1;
long now = System.currentTimeMillis();
long ttl = store.ttl;
@@ -224,11 +223,11 @@
&& (columnMatch(i))
&& (keys[i].getTimestamp() <= this.timestamp)
&& ((viableRow == null)
- || (keys[i].getRow().compareTo(viableRow) < 0)
- || ((keys[i].getRow().compareTo(viableRow) == 0)
+ || (Bytes.compareTo(keys[i].getRow(), viableRow) < 0)
+ || ((Bytes.compareTo(keys[i].getRow(), viableRow) == 0)
&& (keys[i].getTimestamp() > viableTimestamp)))) {
if (ttl == HConstants.FOREVER || now < keys[i].getTimestamp() + ttl) {
- viableRow = new Text(keys[i].getRow());
+ viableRow = keys[i].getRow();
viableTimestamp = keys[i].getTimestamp();
} else {
if (LOG.isDebugEnabled()) {
@@ -248,7 +247,7 @@
* @param firstRow seek to this row
* @return true if this is the first row or if the row was not found
*/
- boolean findFirstRow(int i, Text firstRow) throws IOException {
+ boolean findFirstRow(int i, final byte [] firstRow) throws IOException {
ImmutableBytesWritable ibw = new ImmutableBytesWritable();
HStoreKey firstKey
= (HStoreKey)readers[i].getClosest(new HStoreKey(firstRow), ibw);
@@ -350,7 +349,8 @@
// up so future call to next will start here.
ViableRow viableRow = getNextViableRow();
openReaders(viableRow.getRow());
- LOG.debug("Replaced Scanner Readers at row " + viableRow.getRow());
+ LOG.debug("Replaced Scanner Readers at row " +
+ Bytes.toString(viableRow.getRow()));
} finally {
this.lock.writeLock().unlock();
}
Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/GenericHandler.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/GenericHandler.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/GenericHandler.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/GenericHandler.java Thu May 15 15:10:47 2008
@@ -32,13 +32,12 @@
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.io.Text;
-import org.mortbay.servlet.MultiPartResponse;
import org.znerd.xmlenc.LineBreak;
import org.znerd.xmlenc.XMLOutputter;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.io.Cell;
+import org.apache.hadoop.hbase.util.Bytes;
/**
* GenericHandler contains some basic common stuff that all the individual
@@ -228,13 +227,12 @@
* @throws IOException
*/
protected void outputColumnsXml(final XMLOutputter outputter,
- final Map<Text, Cell> m)
+ final Map<byte [], Cell> m)
throws IllegalStateException, IllegalArgumentException, IOException {
- for (Map.Entry<Text, Cell> e: m.entrySet()) {
+ for (Map.Entry<byte [], Cell> e: m.entrySet()) {
outputter.startTag(COLUMN);
doElement(outputter, "name",
- org.apache.hadoop.hbase.util.Base64.encodeBytes(
- e.getKey().getBytes()));
+ org.apache.hadoop.hbase.util.Base64.encodeBytes(e.getKey()));
// We don't know String from binary data so we always base64 encode.
doElement(outputter, "value",
org.apache.hadoop.hbase.util.Base64.encodeBytes(e.getValue().getValue()));
@@ -259,6 +257,6 @@
* Get an HTable instance by it's table name.
*/
protected HTable getTable(final String tableName) throws IOException {
- return new HTable(this.conf, new Text(tableName));
+ return new HTable(this.conf, Bytes.toBytes(tableName));
}
}
Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/ScannerHandler.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/ScannerHandler.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/ScannerHandler.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/ScannerHandler.java Thu May 15 15:10:47 2008
@@ -35,6 +35,7 @@
import org.apache.hadoop.hbase.client.Scanner;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.JenkinsHash;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.io.RowResult;
@@ -178,7 +179,7 @@
// write the row key
doElement(outputter, "name",
- org.apache.hadoop.hbase.util.Base64.encodeBytes(rowResult.getRow().getBytes()));
+ org.apache.hadoop.hbase.util.Base64.encodeBytes(rowResult.getRow()));
outputColumnsXml(outputter, rowResult);
outputter.endTag();
@@ -244,14 +245,14 @@
// get the list of columns we're supposed to interact with
String[] raw_columns = request.getParameterValues(COLUMN);
- Text [] columns = null;
+ byte [][] columns = null;
if (raw_columns != null) {
- columns = new Text [raw_columns.length];
+ columns = new byte [raw_columns.length][];
for (int i = 0; i < raw_columns.length; i++) {
// I think this decoding is redundant.
columns[i] =
- new Text(URLDecoder.decode(raw_columns[i], HConstants.UTF8_ENCODING));
+ Bytes.toBytes(URLDecoder.decode(raw_columns[i], HConstants.UTF8_ENCODING));
}
} else {
// TODO: Need to put into the scanner all of the table's column
@@ -264,14 +265,14 @@
String raw_ts = request.getParameter(TIMESTAMP);
// TODO: Are these decodings redundant?
- Text startRow = request.getParameter(START_ROW) == null?
+ byte [] startRow = request.getParameter(START_ROW) == null?
HConstants.EMPTY_START_ROW:
- new Text(URLDecoder.decode(request.getParameter(START_ROW),
+ Bytes.toBytes(URLDecoder.decode(request.getParameter(START_ROW),
HConstants.UTF8_ENCODING));
// Empty start row is same value as empty end row.
- Text endRow = request.getParameter(END_ROW) == null?
+ byte [] endRow = request.getParameter(END_ROW) == null?
HConstants.EMPTY_START_ROW:
- new Text(URLDecoder.decode(request.getParameter(END_ROW),
+ Bytes.toBytes(URLDecoder.decode(request.getParameter(END_ROW),
HConstants.UTF8_ENCODING));
Scanner scanner = (request.getParameter(END_ROW) == null)?
Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/TableHandler.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/TableHandler.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/TableHandler.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/TableHandler.java Thu May 15 15:10:47 2008
@@ -22,10 +22,11 @@
import java.io.IOException;
import java.io.PrintWriter;
import java.net.URLDecoder;
-import java.util.HashMap;
+import java.util.Collection;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
+import java.util.TreeMap;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
@@ -33,17 +34,16 @@
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.io.Text;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.io.BatchUpdate;
-
-import org.mortbay.servlet.MultiPartResponse;
+import org.apache.hadoop.hbase.io.Cell;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.io.Text;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
@@ -135,9 +135,9 @@
// They want full row returned.
// Presumption is that this.table has already been focused on target table.
- Map<Text, Cell> result = timestampStr == null ?
- table.getRow(new Text(row))
- : table.getRow(new Text(row), Long.parseLong(timestampStr));
+ Map<byte [], Cell> result = timestampStr == null ?
+ table.getRow(Bytes.toBytes(row))
+ : table.getRow(Bytes.toBytes(row), Long.parseLong(timestampStr));
if (result == null || result.size() == 0) {
doNotFound(response, "Row not found!");
@@ -153,7 +153,7 @@
}
}
} else {
- Map<Text, Cell> prefiltered_result = table.getRow(new Text(row));
+ Map<byte [], Cell> prefiltered_result = table.getRow(Bytes.toBytes(row));
if (prefiltered_result == null || prefiltered_result.size() == 0) {
doNotFound(response, "Row not found!");
@@ -166,16 +166,14 @@
}
// output map that will contain the filtered results
- Map<Text, Cell> m = new HashMap<Text, Cell>();
+ Map<byte [], Cell> m =
+ new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
// get an array of all the columns retrieved
- Text[] columns_retrieved =
- prefiltered_result.keySet().toArray(
- new Text[prefiltered_result.keySet().size()]);
+ Set<byte []> columns_retrieved = prefiltered_result.keySet();
// copy over those cells with requested column names
- for(int i = 0; i < columns_retrieved.length; i++){
- Text current_column = (Text)columns_retrieved[i];
+ for(byte [] current_column: columns_retrieved) {
if(requested_columns_set.contains(current_column.toString())){
m.put(current_column, prefiltered_result.get(current_column));
}
@@ -201,7 +199,7 @@
* @throws IOException
*/
private void outputRowXml(final HttpServletResponse response,
- final Map<Text, Cell> result)
+ final Map<byte [], Cell> result)
throws IOException {
setResponseHeader(response, result.size() > 0? 200: 204,
ContentType.XML.toString());
@@ -349,7 +347,7 @@
final HttpServletResponse response)
throws IOException {
// Presumption is that this.table has already been focused on target table.
- Text [] startKeys = table.getStartKeys();
+ byte [][] startKeys = table.getStartKeys();
// Presumption is that this.table has already been set against target table
switch (ContentType.getContentType(request.getHeader(ACCEPT))) {
case XML:
@@ -410,18 +408,16 @@
outputter.startTag("table");
doElement(outputter, "name", descriptor.getName().toString());
outputter.startTag("columnfamilies");
- for (Map.Entry<Text, HColumnDescriptor> e:
- descriptor.getFamilies().entrySet()) {
+ for (HColumnDescriptor e: descriptor.getFamilies()) {
outputter.startTag("columnfamily");
- doElement(outputter, "name", e.getKey().toString());
- HColumnDescriptor hcd = e.getValue();
- doElement(outputter, "compression", hcd.getCompression().toString());
+ doElement(outputter, "name", Bytes.toString(e.getName()));
+ doElement(outputter, "compression", e.getCompression().toString());
doElement(outputter, "bloomfilter",
- hcd.getBloomFilter() == null? "NONE": hcd.getBloomFilter().toString());
+ e.getBloomFilter() == null? "NONE": e.getBloomFilter().toString());
doElement(outputter, "max-versions",
- Integer.toString(hcd.getMaxVersions()));
+ Integer.toString(e.getMaxVersions()));
doElement(outputter, "maximum-cell-size",
- Integer.toString(hcd.getMaxValueLength()));
+ Integer.toString(e.getMaxValueLength()));
outputter.endTag();
}
outputter.endTag();
Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/thrift/ThriftServer.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/thrift/ThriftServer.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/thrift/ThriftServer.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/thrift/ThriftServer.java Thu May 15 15:10:47 2008
@@ -22,7 +22,6 @@
import java.nio.charset.MalformedInputException;
import java.util.AbstractMap;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.TreeMap;
@@ -30,13 +29,17 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HStoreKey;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MasterNotRunningException;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Scanner;
+import org.apache.hadoop.hbase.io.BatchUpdate;
+import org.apache.hadoop.hbase.io.Cell;
+import org.apache.hadoop.hbase.io.RowResult;
import org.apache.hadoop.hbase.thrift.generated.AlreadyExists;
import org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor;
import org.apache.hadoop.hbase.thrift.generated.Hbase;
@@ -46,12 +49,8 @@
import org.apache.hadoop.hbase.thrift.generated.NotFound;
import org.apache.hadoop.hbase.thrift.generated.RegionDescriptor;
import org.apache.hadoop.hbase.thrift.generated.ScanEntry;
+import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.Text;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Scanner;
-import org.apache.hadoop.hbase.io.Cell;
-import org.apache.hadoop.hbase.io.RowResult;
-import org.apache.hadoop.hbase.io.BatchUpdate;
import com.facebook.thrift.TException;
import com.facebook.thrift.protocol.TBinaryProtocol;
@@ -149,13 +148,13 @@
* @throws IllegalArgument
* @throws IOError
*/
- Text getText(byte[] buf) throws IOError {
+ byte [] getText(byte[] buf) throws IOError {
try {
Text.validateUTF8(buf);
} catch (MalformedInputException e) {
throw new IOError("invalid UTF-8 encoding in row or column name");
}
- return new Text(buf);
+ return buf;
}
//
@@ -183,7 +182,7 @@
LOG.debug("getTableRegions: " + new String(tableName));
try {
HTable table = getTable(tableName);
- Text[] startKeys = table.getStartKeys();
+ byte [][] startKeys = table.getStartKeys();
ArrayList<RegionDescriptor> regions = new ArrayList<RegionDescriptor>();
for (int i = 0; i < startKeys.length; i++) {
RegionDescriptor region = new RegionDescriptor();
@@ -276,12 +275,13 @@
}
try {
HTable table = getTable(tableName);
- Map<Text, Cell> values =
+ Map<byte [], Cell> values =
table.getRow(getText(row), timestamp);
// copy the map from type <Text, Cell> to <byte[], byte[]>
- HashMap<byte[], byte[]> returnValues = new HashMap<byte[], byte[]>();
- for (Entry<Text, Cell> e : values.entrySet()) {
- returnValues.put(e.getKey().getBytes(), e.getValue().getValue());
+ TreeMap<byte[], byte[]> returnValues =
+ new TreeMap<byte[], byte[]>(Bytes.BYTES_COMPARATOR);
+ for (Entry<byte [], Cell> e : values.entrySet()) {
+ returnValues.put(e.getKey(), e.getValue().getValue());
}
return returnValues;
} catch (IOException e) {
@@ -353,11 +353,11 @@
LOG.debug("createTable: table=" + new String(tableName));
}
try {
- Text tableStr = getText(tableName);
+ byte [] tableStr = getText(tableName);
if (admin.tableExists(tableStr)) {
throw new AlreadyExists("table name already in use");
}
- HTableDescriptor desc = new HTableDescriptor(tableStr.toString());
+ HTableDescriptor desc = new HTableDescriptor(tableStr);
for (ColumnDescriptor col : columnFamilies) {
HColumnDescriptor colDesc = ThriftUtilities.colDescFromThrift(col);
desc.addFamily(colDesc);
@@ -378,7 +378,7 @@
LOG.debug("deleteTable: table=" + new String(tableName));
}
try {
- Text tableStr = getText(tableName);
+ byte [] tableStr = getText(tableName);
if (!admin.tableExists(tableStr)) {
throw new NotFound();
}
@@ -460,11 +460,11 @@
}
ScanEntry retval = new ScanEntry();
- retval.row = results.getRow().getBytes();
- retval.columns = new HashMap<byte[], byte[]>(results.size());
+ retval.row = results.getRow();
+ retval.columns = new TreeMap<byte[], byte[]>(Bytes.BYTES_COMPARATOR);
- for (Map.Entry<Text, Cell> e : results.entrySet()) {
- retval.columns.put(e.getKey().getBytes(), e.getValue().getValue());
+ for (Map.Entry<byte [], Cell> e : results.entrySet()) {
+ retval.columns.put(e.getKey(), e.getValue().getValue());
}
return retval;
}
@@ -477,7 +477,7 @@
}
try {
HTable table = getTable(tableName);
- Text[] columnsText = new Text[columns.size()];
+ byte [][] columnsText = new byte[columns.size()][];
for (int i = 0; i < columns.size(); ++i) {
columnsText[i] = getText(columns.get(i));
}
@@ -498,7 +498,7 @@
}
try {
HTable table = getTable(tableName);
- Text[] columnsText = new Text[columns.size()];
+ byte [][] columnsText = new byte[columns.size()][];
for (int i = 0; i < columns.size(); ++i) {
columnsText[i] = getText(columns.get(i));
}
@@ -519,7 +519,7 @@
}
try {
HTable table = getTable(tableName);
- Text[] columnsText = new Text[columns.size()];
+ byte [][] columnsText = new byte[columns.size()][];
for (int i = 0; i < columns.size(); ++i) {
columnsText[i] = getText(columns.get(i));
}
@@ -541,7 +541,7 @@
}
try {
HTable table = getTable(tableName);
- Text[] columnsText = new Text[columns.size()];
+ byte [][] columnsText = new byte[columns.size()][];
for (int i = 0; i < columns.size(); ++i) {
columnsText[i] = getText(columns.get(i));
}
@@ -559,13 +559,14 @@
LOG.debug("getColumnDescriptors: table=" + new String(tableName));
}
try {
- HashMap<byte[], ColumnDescriptor> columns = new HashMap<byte[], ColumnDescriptor>();
+ TreeMap<byte[], ColumnDescriptor> columns =
+ new TreeMap<byte[], ColumnDescriptor>(Bytes.BYTES_COMPARATOR);
HTable table = getTable(tableName);
HTableDescriptor desc = table.getMetadata();
- for (Entry<Text, HColumnDescriptor> e : desc.families().entrySet()) {
- ColumnDescriptor col = ThriftUtilities.colDescFromHbase(e.getValue());
+ for (HColumnDescriptor e : desc.getFamilies()) {
+ ColumnDescriptor col = ThriftUtilities.colDescFromHbase(e);
columns.put(col.name, col);
}
return columns;
Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java Thu May 15 15:10:47 2008
@@ -23,7 +23,6 @@
import org.apache.hadoop.hbase.HColumnDescriptor.CompressionType;
import org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor;
import org.apache.hadoop.hbase.thrift.generated.IllegalArgument;
-import org.apache.hadoop.io.Text;
public class ThriftUtilities {
@@ -57,7 +56,7 @@
if (in.name == null || in.name.length <= 0) {
throw new IllegalArgument("column name is empty");
}
- HColumnDescriptor col = new HColumnDescriptor(new Text(in.name),
+ HColumnDescriptor col = new HColumnDescriptor(in.name,
in.maxVersions, comp, in.inMemory, in.blockCacheEnabled,
in.maxValueLength, in.timeToLive, bloom);
return col;
@@ -73,7 +72,7 @@
*/
static public ColumnDescriptor colDescFromHbase(HColumnDescriptor in) {
ColumnDescriptor col = new ColumnDescriptor();
- col.name = in.getName().getBytes();
+ col.name = in.getName();
col.maxVersions = in.getMaxVersions();
col.compression = in.getCompression().toString();
col.inMemory = in.isInMemory();
@@ -88,4 +87,4 @@
return col;
}
-}
+}
\ No newline at end of file
Added: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Bytes.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Bytes.java?rev=656868&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Bytes.java (added)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Bytes.java Thu May 15 15:10:47 2008
@@ -0,0 +1,269 @@
+package org.apache.hadoop.hbase.util;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.nio.ByteBuffer;
+import java.util.Comparator;
+
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.WritableComparator;
+
+public class Bytes {
+ /**
+ * Size of long in bytes
+ */
+ public static final int SIZEOF_LONG = Long.SIZE/Byte.SIZE;
+
+ /**
+ * Size of int in bytes
+ */
+ public static final int SIZEOF_INT = Integer.SIZE/Byte.SIZE;
+
+ /**
+ * Pass this to TreeMaps where byte [] are keys.
+ */
+ public static Comparator<byte []> BYTES_COMPARATOR =
+ new Comparator<byte []>() {
+ public int compare(byte [] left, byte [] right) {
+ return compareTo(left, right);
+ }
+ };
+
+ /**
+ * @param in Input to read from.
+ * @return byte array read off <code>in</code>
+ * @throws IOException
+ */
+ public static byte [] readByteArray(final DataInput in)
+ throws IOException {
+ byte [] result = new byte[in.readInt()];
+ in.readFully(result, 0, result.length);
+ return result;
+ }
+
+ /**
+ * @param out
+ * @param b
+ * @throws IOException
+ */
+ public static void writeByteArray(final DataOutput out, final byte [] b)
+ throws IOException {
+ out.writeInt(b.length);
+ out.write(b, 0, b.length);
+ }
+
+ /**
+ * @param b Presumed UTF-8 encoded byte array.
+ * @return String made from <code>b</code>
+ */
+ public static String toString(final byte [] b) {
+ String result = null;
+ try {
+ result = new String(b, HConstants.UTF8_ENCODING);
+ } catch (UnsupportedEncodingException e) {
+ e.printStackTrace();
+ }
+ return result;
+ }
+
+
+ /**
+ * Converts a string to a UTF-8 byte array.
+ * @param s
+ * @return the byte array
+ */
+ public static byte[] toBytes(String s) {
+ if (s == null) {
+ throw new IllegalArgumentException("string cannot be null");
+ }
+ byte [] result = null;
+ try {
+ result = s.getBytes(HConstants.UTF8_ENCODING);
+ } catch (UnsupportedEncodingException e) {
+ e.printStackTrace();
+ }
+ return result;
+ }
+
+ /**
+ * Convert a long value to a byte array
+ * @param val
+ * @return the byte array
+ */
+ public static byte[] toBytes(final long val) {
+ ByteBuffer bb = ByteBuffer.allocate(SIZEOF_LONG);
+ bb.putLong(val);
+ return bb.array();
+ }
+
+ /**
+ * Converts a byte array to a long value
+ * @param bytes
+ * @return the long value
+ */
+ public static long toLong(byte[] bytes) {
+ if (bytes == null || bytes.length == 0) {
+ return -1L;
+ }
+ return ByteBuffer.wrap(bytes).getLong();
+ }
+
+ /**
+ * Convert an int value to a byte array
+ * @param val
+ * @return the byte array
+ */
+ public static byte[] toBytes(final int val) {
+ ByteBuffer bb = ByteBuffer.allocate(SIZEOF_INT);
+ bb.putInt(val);
+ return bb.array();
+ }
+
+ /**
+ * Converts a byte array to a long value
+ * @param bytes
+ * @return the long value
+ */
+ public static long toInt(byte[] bytes) {
+ if (bytes == null || bytes.length == 0) {
+ return -1L;
+ }
+ return ByteBuffer.wrap(bytes).getInt();
+ }
+
+ /**
+ * @param left
+ * @param right
+ * @return 0 if equal, < 0 if left is less than right, etc.
+ */
+ public static int compareTo(final byte [] left, final byte [] right) {
+ return compareTo(left, 0, left.length, right, 0, right.length);
+ }
+
+ /**
+ * @param left
+ * @param right
+ * @param leftOffset Where to start comparing in the left buffer
+ * @param rightOffset Where to start comparing in the right buffer
+ * @param leftLength How much to compare from the left buffer
+ * @param rightLength How much to compare from the right buffer
+ * @return 0 if equal, < 0 if left is less than right, etc.
+ */
+ public static int compareTo(final byte [] left, final int leftOffset,
+ final int leftLength, final byte [] right, final int rightOffset,
+ final int rightLength) {
+ return WritableComparator.compareBytes(left,leftOffset, leftLength,
+ right, rightOffset, rightLength);
+ }
+
+ /**
+ * @param left
+ * @param right
+ * @return True if equal
+ */
+ public static boolean equals(final byte [] left, final byte [] right) {
+ return left == null && right == null? true:
+ left == null && right != null? false:
+ left != null && right == null? false:
+ left.length != right.length? false:
+ compareTo(left, right) == 0;
+ }
+
+ /**
+ * @param b
+ * @return Runs {@link WritableComparator#hashBytes(byte[], int)} on the
+ * passed in array. This method is what {@link Text} and
+ * {@link ImmutableBytesWritable} use calculating hash code.
+ */
+ public static int hashCode(final byte [] b) {
+ return hashCode(b, b.length);
+ }
+
+ /**
+ * @param b
+ * @return Runs {@link WritableComparator#hashBytes(byte[], int)} on the
+ * passed in array. This method is what {@link Text} and
+ * {@link ImmutableBytesWritable} use calculating hash code.
+ */
+ public static int hashCode(final byte [] b, final int length) {
+ return WritableComparator.hashBytes(b, length);
+ }
+
+ /**
+ * @param b
+ * @return A hash of <code>b</code> as an Integer that can be used as key in
+ * Maps.
+ */
+ public static Integer mapKey(final byte [] b) {
+ return Integer.valueOf(hashCode(b));
+ }
+
+ /**
+ * @param b
+ * @return A hash of <code>b</code> as an Integer that can be used as key in
+ * Maps.
+ */
+ public static Integer mapKey(final byte [] b, final int length) {
+ return Integer.valueOf(hashCode(b, length));
+ }
+
+ /**
+ * @param a
+ * @param b
+ * @return New array that has a in lower half and b in upper half.
+ */
+ public static byte [] add(final byte [] a, final byte [] b) {
+ return add(a, b, HConstants.EMPTY_BYTE_ARRAY);
+ }
+
+ /**
+ * @param a
+ * @param b
+ * @param c
+ * @return New array made from a, b and c
+ */
+ public static byte [] add(final byte [] a, final byte [] b, final byte [] c) {
+ byte [] result = new byte[a.length + b.length + c.length];
+ System.arraycopy(a, 0, result, 0, a.length);
+ System.arraycopy(b, 0, result, a.length, b.length);
+ System.arraycopy(c, 0, result, a.length + b.length, c.length);
+ return result;
+ }
+
+
+ /**
+ * @param t
+ * @return Array of byte arrays made from passed array of Text
+ */
+ public static byte [][] toByteArrays(final Text [] t) {
+ byte [][] result = new byte[t.length][];
+ for (int i = 0; i < t.length; i++) {
+ result[i] = t[i].getBytes();
+ }
+ return result;
+ }
+
+ /**
+ * @param column
+ * @return A byte array of a byte array where first and only entry is
+ * <code>column</code>
+ */
+ public static byte [][] toByteArrays(final String column) {
+ return toByteArrays(toBytes(column));
+ }
+
+ /**
+ * @param column
+ * @return A byte array of a byte array where first and only entry is
+ * <code>column</code>
+ */
+ public static byte [][] toByteArrays(final byte [] column) {
+ byte [][] result = new byte[1][];
+ result[0] = column;
+ return result;
+ }
+}
\ No newline at end of file