You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2009/04/12 12:39:58 UTC
svn commit: r764289 [1/8] - in /hadoop/hbase/trunk: ./
src/java/org/apache/hadoop/hbase/ src/java/org/apache/hadoop/hbase/client/
src/java/org/apache/hadoop/hbase/filter/ src/java/org/apache/hadoop/hbase/io/
src/java/org/apache/hadoop/hbase/io/hfile/ s...
Author: stack
Date: Sun Apr 12 10:39:55 2009
New Revision: 764289
URL: http://svn.apache.org/viewvc?rev=764289&view=rev
Log:
HBASE-1234 Change HBase StoreKey format
Added:
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/KeyValue.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestKeyValue.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/DisabledTestColumnValueFilter.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/DisabledTestInclusiveStopRowFilter.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/DisabledTestPageRowFilter.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/DisabledTestPrefixRowFilter.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/DisabledTestRegExpRowFilter.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/DisabledTestRowFilterAfterWrite.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/DisabledTestRowFilterOnMultipleFamilies.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/DisabledTestRowFilterSet.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/DisabledTestStopRowFilter.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/DisabledTestWhileMatchRowFilter.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/DisableTestCompaction.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/DisabledTestAtomicIncrement.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/thrift/DisabledTestThriftServer.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestBytes.java
Removed:
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/TestBytes.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/TestColumnValueFilter.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/TestInclusiveStopRowFilter.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/TestPageRowFilter.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/TestPrefixRowFilter.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/TestRegExpRowFilter.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/TestRowFilterAfterWrite.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/TestRowFilterOnMultipleFamilies.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/TestRowFilterSet.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/TestStopRowFilter.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/filter/TestWhileMatchRowFilter.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestAtomicIncrement.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestCompaction.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/thrift/TestThriftServer.java
Modified:
hadoop/hbase/trunk/CHANGES.txt
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HMerge.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionInfo.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HStoreKey.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HTableDescriptor.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/MetaScanner.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/PageRowFilter.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/PrefixRowFilter.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/RegExpRowFilter.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/RowFilterInterface.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/RowFilterSet.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/StopRowFilter.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/WhileMatchRowFilter.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/Cell.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HalfHFileReader.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/HbaseMapWritable.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/Reference.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/RowResult.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFile.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/BaseScanner.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HAbstractScanner.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLog.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLogEdit.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLogKey.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/Store.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/tableindexed/IndexedRegion.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionalHLogManager.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionalRegion.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionalRegionServer.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Bytes.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Merge.java
hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/MetaUtils.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/HBaseTestCase.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestScannerAPI.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TimestampTestBase.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestForceSplit.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestHTable.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/io/hfile/TestHFile.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestDeleteAll.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestGet2.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHLog.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHMemcache.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestHRegion.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestScanner.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestSplit.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/TestMergeTool.java
Modified: hadoop/hbase/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=764289&r1=764288&r2=764289&view=diff
==============================================================================
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Sun Apr 12 10:39:55 2009
@@ -11,6 +11,7 @@
hbase.master) (Nitay Joffe via Stack)
HBASE-1289 Remove "hbase.fully.distributed" option and update docs
(Nitay Joffe via Stack)
+ HBASE-1234 Change HBase StoreKey format
BUG FIXES
HBASE-1140 "ant clean test" fails (Nitay Joffe via Stack)
Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HMerge.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HMerge.java?rev=764289&r1=764288&r2=764289&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HMerge.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HMerge.java Sun Apr 12 10:39:55 2009
@@ -24,7 +24,6 @@
import java.util.List;
import java.util.NoSuchElementException;
import java.util.Random;
-import java.util.TreeMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -331,12 +330,10 @@
HConstants.LATEST_TIMESTAMP, null);
try {
- HStoreKey key = new HStoreKey();
- TreeMap<byte [], Cell> results =
- new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
- while(rootScanner.next(key, results)) {
- for(Cell c: results.values()) {
- HRegionInfo info = Writables.getHRegionInfoOrNull(c.getValue());
+ List<KeyValue> results = new ArrayList<KeyValue>();
+ while(rootScanner.next(results)) {
+ for(KeyValue kv: results) {
+ HRegionInfo info = Writables.getHRegionInfoOrNull(kv.getValue());
if (info != null) {
metaRegions.add(info);
}
Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionInfo.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionInfo.java?rev=764289&r1=764288&r2=764289&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionInfo.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionInfo.java Sun Apr 12 10:39:55 2009
@@ -24,6 +24,7 @@
import java.io.IOException;
import java.util.Arrays;
+import org.apache.hadoop.hbase.KeyValue.KVComparator;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.JenkinsHash;
import org.apache.hadoop.io.VersionedWritable;
@@ -465,4 +466,12 @@
this.splitRequest = b;
return old;
}
+
+ /**
+ * @return Comparator to use comparing {@link KeyValue}s.
+ */
+ public KVComparator getComparator() {
+ return isRootRegion()? KeyValue.ROOT_COMPARATOR: isMetaRegion()?
+ KeyValue.META_COMPARATOR: KeyValue.COMPARATOR;
+ }
}
\ No newline at end of file
Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HStoreKey.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HStoreKey.java?rev=764289&r1=764288&r2=764289&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HStoreKey.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HStoreKey.java Sun Apr 12 10:39:55 2009
@@ -36,6 +36,7 @@
/**
* A Key for a stored row.
+ * @deprecated Replaced by {@link KeyValue}.
*/
public class HStoreKey implements WritableComparable<HStoreKey>, HeapSize {
/**
@@ -242,7 +243,7 @@
return equalsTwoRowKeys(getRow(), other.getRow()) &&
getTimestamp() >= other.getTimestamp();
}
-
+
/**
* Compares the row and column family of two keys
*
Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HTableDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HTableDescriptor.java?rev=764289&r1=764288&r2=764289&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HTableDescriptor.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HTableDescriptor.java Sun Apr 12 10:39:55 2009
@@ -27,6 +27,7 @@
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
+import java.util.TreeMap;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.client.tableindexed.IndexSpecification;
@@ -99,8 +100,8 @@
private volatile Boolean root = null;
// Key is hash of the family name.
- private final Map<Integer, HColumnDescriptor> families =
- new HashMap<Integer, HColumnDescriptor>();
+ private final Map<byte [], HColumnDescriptor> families =
+ new TreeMap<byte [], HColumnDescriptor>(KeyValue.FAMILY_COMPARATOR);
// Key is indexId
private final Map<String, IndexSpecification> indexes =
@@ -115,7 +116,7 @@
this.nameAsString = Bytes.toString(this.name);
setMetaFlags(name);
for(HColumnDescriptor descriptor : families) {
- this.families.put(Bytes.mapKey(descriptor.getName()), descriptor);
+ this.families.put(descriptor.getName(), descriptor);
}
}
@@ -130,7 +131,7 @@
this.nameAsString = Bytes.toString(this.name);
setMetaFlags(name);
for(HColumnDescriptor descriptor : families) {
- this.families.put(Bytes.mapKey(descriptor.getName()), descriptor);
+ this.families.put(descriptor.getName(), descriptor);
}
for(IndexSpecification index : indexes) {
this.indexes.put(index.getIndexId(), index);
@@ -190,7 +191,7 @@
this.nameAsString = Bytes.toString(this.name);
setMetaFlags(this.name);
for (HColumnDescriptor c: desc.families.values()) {
- this.families.put(Bytes.mapKey(c.getName()), new HColumnDescriptor(c));
+ this.families.put(c.getName(), new HColumnDescriptor(c));
}
for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
desc.values.entrySet()) {
@@ -455,7 +456,7 @@
if (family.getName() == null || family.getName().length <= 0) {
throw new NullPointerException("Family name cannot be null or empty");
}
- this.families.put(Bytes.mapKey(family.getName()), family);
+ this.families.put(family.getName(), family);
}
/**
@@ -464,19 +465,9 @@
* @return true if the table contains the specified family name
*/
public boolean hasFamily(final byte [] c) {
- return hasFamily(c, HStoreKey.getFamilyDelimiterIndex(c));
- }
-
- /**
- * Checks to see if this table contains the given column family
- * @param c Family name or column name.
- * @param index Index to column family delimiter
- * @return true if the table contains the specified family name
- */
- public boolean hasFamily(final byte [] c, final int index) {
// If index is -1, then presume we were passed a column family name minus
// the colon delimiter.
- return families.containsKey(Bytes.mapKey(c, index == -1? c.length: index));
+ return families.containsKey(c);
}
/**
@@ -571,7 +562,7 @@
for (int i = 0; i < numFamilies; i++) {
HColumnDescriptor c = new HColumnDescriptor();
c.readFields(in);
- families.put(Bytes.mapKey(c.getName()), c);
+ families.put(c.getName(), c);
}
indexes.clear();
if (version < 4) {
@@ -657,7 +648,7 @@
* passed in column.
*/
public HColumnDescriptor getFamily(final byte [] column) {
- return this.families.get(HStoreKey.getFamilyMapKey(column));
+ return this.families.get(column);
}
/**
@@ -666,7 +657,7 @@
* passed in column.
*/
public HColumnDescriptor removeFamily(final byte [] column) {
- return this.families.remove(HStoreKey.getFamilyMapKey(column));
+ return this.families.remove(column);
}
/**
Added: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/KeyValue.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/KeyValue.java?rev=764289&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/KeyValue.java (added)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/KeyValue.java Sun Apr 12 10:39:55 2009
@@ -0,0 +1,1398 @@
+/**
+ * Copyright 2009 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.io.RawComparator;
+
+/**
+ * An HBase Key/Value. Instances of this class are immutable. They are not
+ * comparable but Comparators are provided Comparators change with context,
+ * whether user table or a catalog table comparison context. Its
+ * important that you use the appropriate comparator comparing rows in
+ * particular. There are Comparators for KeyValue instances and then for
+ * just the Key portion of a KeyValue used mostly in {@link HFile}.
+ *
+ * <p>KeyValue wraps a byte array and has offset and length for passed array
+ * at where to start interpreting the content as a KeyValue blob. The KeyValue
+ * blob format inside the byte array is:
+ * <code><keylength> <valuelength> <key> <value></code>
+ * Key is decomposed as:
+ * <code><rowlength> <row> <columnfamilylength> <columnfamily> <columnqualifier> <timestamp> <keytype></code>
+ * Rowlength maximum is Short.MAX_SIZE, column family length maximum is
+ * Byte.MAX_SIZE, and column qualifier + key length must be < Integer.MAX_SIZE.
+ * The column does not contain the family/qualifier delimiter.
+ *
+ * <p>TODO: Group Key-only compartors and operations into a Key class, just
+ * for neatness sake, if can figure what to call it.
+ */
+public class KeyValue {
+ static final Log LOG = LogFactory.getLog(KeyValue.class);
+
+ /**
+ * Colon character in UTF-8
+ */
+ public static final char COLUMN_FAMILY_DELIMITER = ':';
+
+ /**
+ * Comparator for plain key/values; i.e. non-catalog table key/values.
+ */
+ public static KVComparator COMPARATOR = new KVComparator();
+
+ /**
+ * Comparator for plain key; i.e. non-catalog table key. Works on Key portion
+ * of KeyValue only.
+ */
+ public static KeyComparator KEY_COMPARATOR = new KeyComparator();
+
+ /**
+ * A {@link KVComparator} for <code>.META.</code> catalog table
+ * {@link KeyValue}s.
+ */
+ public static KVComparator META_COMPARATOR = new MetaComparator();
+
+ /**
+ * A {@link KVComparator} for <code>.META.</code> catalog table
+ * {@link KeyValue} keys.
+ */
+ public static KeyComparator META_KEY_COMPARATOR = new MetaKeyComparator();
+
+ /**
+ * A {@link KVComparator} for <code>-ROOT-</code> catalog table
+ * {@link KeyValue}s.
+ */
+ public static KVComparator ROOT_COMPARATOR = new RootComparator();
+
+ /**
+ * A {@link KVComparator} for <code>-ROOT-</code> catalog table
+ * {@link KeyValue} keys.
+ */
+ public static KeyComparator ROOT_KEY_COMPARATOR = new RootKeyComparator();
+
+ /**
+ * Comparator that compares the family portion of columns only.
+ * Use this making NavigableMaps of Stores or when you need to compare
+ * column family portion only of two column names.
+ */
+ public static final RawComparator<byte []> FAMILY_COMPARATOR =
+ new RawComparator<byte []> () {
+ public int compare(byte [] a, int ao, int al, byte [] b, int bo, int bl) {
+ int indexa = KeyValue.getDelimiter(a, ao, al, COLUMN_FAMILY_DELIMITER);
+ if (indexa < 0) {
+ indexa = al;
+ }
+ int indexb = KeyValue.getDelimiter(b, bo, bl, COLUMN_FAMILY_DELIMITER);
+ if (indexb < 0) {
+ indexb = bl;
+ }
+ return Bytes.compareTo(a, ao, indexa, b, bo, indexb);
+ }
+
+ public int compare(byte[] a, byte[] b) {
+ return compare(a, 0, a.length, b, 0, b.length);
+ }
+ };
+
+ // Size of the timestamp and type byte on end of a key -- a long + a byte.
+ private static final int TIMESTAMP_TYPE_SIZE =
+ Bytes.SIZEOF_LONG /* timestamp */ +
+ Bytes.SIZEOF_BYTE /*keytype*/;
+
+ // Size of the length shorts and bytes in key.
+ private static final int KEY_INFRASTRUCTURE_SIZE =
+ Bytes.SIZEOF_SHORT /*rowlength*/ +
+ Bytes.SIZEOF_BYTE /*columnfamilylength*/ +
+ TIMESTAMP_TYPE_SIZE;
+
+ // How far into the key the row starts at. First thing to read is the short
+ // that says how long the row is.
+ private static final int ROW_OFFSET =
+ Bytes.SIZEOF_INT /*keylength*/ +
+ Bytes.SIZEOF_INT /*valuelength*/;
+
+ // Size of the length ints in a KeyValue datastructure.
+ private static final int KEYVALUE_INFRASTRUCTURE_SIZE = ROW_OFFSET;
+
+ /**
+ * Key type.
+ * Has space for other key types to be added later. Cannot rely on
+ * enum ordinals . They change if item is removed or moved. Do our own codes.
+ */
+ public static enum Type {
+ Put((byte)4),
+ Delete((byte)8),
+ DeleteColumn((byte)16),
+ DeleteFamily((byte)32),
+ // Maximum is used when searching; you look from maximum on down.
+ Maximum((byte)255);
+
+ private final byte code;
+
+ Type(final byte c) {
+ this.code = c;
+ }
+
+ public byte getCode() {
+ return this.code;
+ }
+
+ /**
+ * Cannot rely on enum ordinals . They change if item is removed or moved.
+ * Do our own codes.
+ * @param b
+ * @return Type associated with passed code.
+ */
+ public static Type codeToType(final byte b) {
+ // This is messy repeating each type here below but no way around it; we
+ // can't use the enum ordinal.
+ if (b == Put.getCode()) {
+ return Put;
+ } else if (b == Delete.getCode()) {
+ return Delete;
+ } else if (b == DeleteColumn.getCode()) {
+ return DeleteColumn;
+ } else if (b == DeleteFamily.getCode()) {
+ return DeleteFamily;
+ } else if (b == Maximum.getCode()) {
+ return Maximum;
+ }
+ throw new RuntimeException("Unknown code " + b);
+ }
+ }
+
+ /**
+ * Lowest possible key.
+ * Makes a Key with highest possible Timestamp, empty row and column. No
+ * key can be equal or lower than this one in memcache or in store file.
+ */
+ public static final KeyValue LOWESTKEY =
+ new KeyValue(HConstants.EMPTY_BYTE_ARRAY, HConstants.LATEST_TIMESTAMP);
+
+ private final byte [] bytes;
+ private final int offset;
+ private final int length;
+
+ /**
+ * Creates a KeyValue from the start of the specified byte array.
+ * Presumes <code>bytes</code> content is formatted as a KeyValue blob.
+ * @param bytes byte array
+ */
+ public KeyValue(final byte [] bytes) {
+ this(bytes, 0);
+ }
+
+ /**
+ * Creates a KeyValue from the specified byte array and offset.
+ * Presumes <code>bytes</code> content starting at <code>offset</code> is
+ * formatted as a KeyValue blob.
+ * @param bytes byte array
+ * @param offset offset to start of KeyValue
+ */
+ public KeyValue(final byte [] bytes, final int offset) {
+ this(bytes, offset, getLength(bytes, offset));
+ }
+
+ /**
+ * Creates a KeyValue from the specified byte array, starting at offset, and
+ * for length <code>length</code>.
+ * @param bytes byte array
+ * @param offset offset to start of the KeyValue
+ * @param length length of the KeyValue
+ */
+ public KeyValue(final byte [] bytes, final int offset, final int length) {
+ this.bytes = bytes;
+ this.offset = offset;
+ this.length = length;
+ }
+
+ /**
+ * @param row
+ * @param timestamp
+ * @return KeyValue structure filled with specified values.
+ * @throws IllegalArgumentException
+ */
+ public KeyValue(final String row, final long timestamp) {
+ this(Bytes.toBytes(row), timestamp);
+ }
+
+ /**
+ * @param row
+ * @param timestamp
+ * @return KeyValue structure filled with specified values.
+ * @throws IllegalArgumentException
+ */
+ public KeyValue(final byte [] row, final long timestamp) {
+ this(row, null, timestamp, Type.Put, null);
+ }
+
+ /**
+ * @param row
+ * @param column Column with delimiter between family and qualifier
+ * @return KeyValue structure filled with specified values.
+ * @throws IllegalArgumentException
+ */
+ public KeyValue(final String row, final String column) {
+ this(row, column, null);
+ }
+
+ /**
+ * @param row
+ * @param column Column with delimiter between family and qualifier
+ * @return KeyValue structure filled with specified values.
+ * @throws IllegalArgumentException
+ */
+ public KeyValue(final byte [] row, final byte [] column) {
+ this(row, column, null);
+ }
+
+ /**
+ * @param row
+ * @param column Column with delimiter between family and qualifier
+ * @param value
+ * @return KeyValue structure filled with specified values.
+ * @throws IllegalArgumentException
+ */
+ public KeyValue(final String row, final String column, final byte [] value) {
+ this(Bytes.toBytes(row), Bytes.toBytes(column), value);
+ }
+
+ /**
+ * @param row
+ * @param column Column with delimiter between family and qualifier
+ * @param value
+ * @return KeyValue structure filled with specified values.
+ * @throws IllegalArgumentException
+ */
+ public KeyValue(final byte [] row, final byte [] column, final byte [] value) {
+ this(row, column, HConstants.LATEST_TIMESTAMP, value);
+ }
+
+
+ /**
+ * @param row
+ * @param column Column with delimiter between family and qualifier
+ * @param ts
+ * @return KeyValue structure filled with specified values.
+ * @throws IllegalArgumentException
+ */
+ public KeyValue(final String row, final String column, final long ts) {
+ this(row, column, ts, null);
+ }
+
+ /**
+ * @param row
+ * @param column Column with delimiter between family and qualifier
+ * @param ts
+ * @return KeyValue structure filled with specified values.
+ * @throws IllegalArgumentException
+ */
+ public KeyValue(final byte [] row, final byte [] column, final long ts) {
+ this(row, column, ts, Type.Put);
+ }
+
+ /**
+ * @param row
+ * @param column Column with delimiter between family and qualifier
+ * @param timestamp
+ * @param value
+ * @return KeyValue structure filled with specified values.
+ * @throws IllegalArgumentException
+ */
+ public KeyValue(final String row, final String column,
+ final long timestamp, final byte [] value) {
+ this(Bytes.toBytes(row),
+ column == null? HConstants.EMPTY_BYTE_ARRAY: Bytes.toBytes(column),
+ timestamp, value);
+ }
+
+ /**
+ * @param row
+ * @param column Column with delimiter between family and qualifier
+ * @param timestamp
+ * @param value
+ * @return KeyValue structure filled with specified values.
+ * @throws IllegalArgumentException
+ */
+ public KeyValue(final byte [] row, final byte [] column,
+ final long timestamp, final byte [] value) {
+ this(row, column, timestamp, Type.Put, value);
+ }
+
+ /**
+ * @param row
+ * @param column Column with delimiter between family and qualifier
+ * @param timestamp
+ * @param type
+ * @param value
+ * @return KeyValue structure filled with specified values.
+ * @throws IllegalArgumentException
+ */
+ public KeyValue(final String row, final String column,
+ final long timestamp, final Type type, final byte [] value) {
+ this(Bytes.toBytes(row), Bytes.toBytes(column), timestamp, type,
+ value);
+ }
+
+ /**
+ * @param row
+ * @param column Column with delimiter between family and qualifier
+ * @param timestamp
+ * @param type
+ * @return KeyValue structure filled with specified values.
+ * @throws IllegalArgumentException
+ */
+ public KeyValue(final byte [] row, final byte [] column,
+ final long timestamp, final Type type) {
+ this(row, 0, row.length, column, 0, column == null? 0: column.length,
+ timestamp, type, null, 0, -1);
+ }
+
+ /**
+ * @param row
+ * @param column Column with delimiter between family and qualifier
+ * @param timestamp
+ * @param type
+ * @param value
+ * @return KeyValue structure filled with specified values.
+ * @throws IllegalArgumentException
+ */
+ public KeyValue(final byte [] row, final byte [] column,
+ final long timestamp, final Type type, final byte [] value) {
+ this(row, 0, row.length, column, 0, column == null? 0: column.length,
+ timestamp, type, value, 0, value == null? 0: value.length);
+ }
+
+ /**
+ * @param row
+ * @param roffset
+ * @param rlength
+ * @param column Column with delimiter between family and qualifier
+ * @param coffset Where to start reading the column.
+ * @param clength How long column is (including the family/qualifier delimiter.
+ * @param timestamp
+ * @param type
+ * @param value
+ * @param voffset
+ * @param vlength
+ * @return KeyValue
+ * @throws IllegalArgumentException
+ */
+ public KeyValue(final byte [] row, final int roffset, final int rlength,
+ final byte [] column, final int coffset, int clength,
+ final long timestamp, final Type type,
+ final byte [] value, final int voffset, int vlength) {
+ this.bytes = createByteArray(row, roffset, rlength, column, coffset,
+ clength, timestamp, type, value, voffset, vlength);
+ this.length = bytes.length;
+ this.offset = 0;
+ }
+
+ /**
+ * Write KeyValue format into a byte array.
+ * @param row
+ * @param roffset
+ * @param rlength
+ * @param column
+ * @param coffset
+ * @param clength
+ * @param timestamp
+ * @param type
+ * @param value
+ * @param voffset
+ * @param vlength
+ * @return
+ */
+ static byte [] createByteArray(final byte [] row, final int roffset,
+ final int rlength,
+ final byte [] column, final int coffset, int clength,
+ final long timestamp, final Type type,
+ final byte [] value, final int voffset, int vlength) {
+ if (rlength > Short.MAX_VALUE) {
+ throw new IllegalArgumentException("Row > " + Short.MAX_VALUE);
+ }
+ if (row == null) {
+ throw new IllegalArgumentException("Row is null");
+ }
+ // If column is non-null, figure where the delimiter is at.
+ int delimiteroffset = 0;
+ if (column != null && column.length > 0) {
+ delimiteroffset = getFamilyDelimiterIndex(column, coffset, clength);
+ if (delimiteroffset > Byte.MAX_VALUE) {
+ throw new IllegalArgumentException("Family > " + Byte.MAX_VALUE);
+ }
+ }
+ // Value length
+ vlength = value == null? 0: vlength;
+ // Column length - minus delimiter
+ clength = column == null || column.length == 0? 0: clength - 1;
+ long longkeylength = KEY_INFRASTRUCTURE_SIZE + rlength + clength;
+ if (longkeylength > Integer.MAX_VALUE) {
+ throw new IllegalArgumentException("keylength " + longkeylength + " > " +
+ Integer.MAX_VALUE);
+ }
+ int keylength = (int)longkeylength;
+ // Allocate right-sized byte array.
+ byte [] bytes = new byte[KEYVALUE_INFRASTRUCTURE_SIZE + keylength + vlength];
+ // Write key, value and key row length.
+ int pos = 0;
+ pos = Bytes.putInt(bytes, pos, keylength);
+ pos = Bytes.putInt(bytes, pos, vlength);
+ pos = Bytes.putShort(bytes, pos, (short)(rlength & 0x0000ffff));
+ pos = Bytes.putBytes(bytes, pos, row, roffset, rlength);
+ // Write out column family length.
+ pos = Bytes.putByte(bytes, pos, (byte)(delimiteroffset & 0x0000ff));
+ if (column != null && column.length != 0) {
+ // Write family.
+ pos = Bytes.putBytes(bytes, pos, column, coffset, delimiteroffset);
+ // Write qualifier.
+ delimiteroffset++;
+ pos = Bytes.putBytes(bytes, pos, column, coffset + delimiteroffset,
+ column.length - delimiteroffset);
+ }
+ pos = Bytes.putLong(bytes, pos, timestamp);
+ pos = Bytes.putByte(bytes, pos, type.getCode());
+ if (value != null && value.length > 0) {
+ pos = Bytes.putBytes(bytes, pos, value, voffset, vlength);
+ }
+ return bytes;
+ }
+
+ // Needed doing 'contains' on List.
+ public boolean equals(Object other) {
+ KeyValue kv = (KeyValue)other;
+ // Comparing bytes should be fine doing equals test. Shouldn't have to
+ // worry about special .META. comparators doing straight equals.
+ boolean result = Bytes.BYTES_RAWCOMPARATOR.compare(getBuffer(),
+ getKeyOffset(), getKeyLength(),
+ kv.getBuffer(), kv.getKeyOffset(), kv.getKeyLength()) == 0;
+ return result;
+ }
+
+ /**
+ * @param timestamp
+ * @return Clone of bb's key portion with only the row and timestamp filled in.
+ * @throws IOException
+ */
+ public KeyValue cloneRow(final long timestamp) {
+ return new KeyValue(getBuffer(), getRowOffset(), getRowLength(),
+ null, 0, 0, timestamp, Type.codeToType(getType()), null, 0, 0);
+ }
+
+ /**
+ * @return Clone of bb's key portion with type set to Type.Delete.
+ * @throws IOException
+ */
+ public KeyValue cloneDelete() {
+ return createKey(Type.Delete);
+ }
+
+ /**
+ * @return Clone of bb's key portion with type set to Type.Maximum. Use this
+ * doing lookups where you are doing getClosest. Using Maximum, you'll be
+ * sure to trip over all of the other key types since Maximum sorts first.
+ * @throws IOException
+ */
+ public KeyValue cloneMaximum() {
+ return createKey(Type.Maximum);
+ }
+
+ /*
+ * Make a clone with the new type.
+ * Does not copy value.
+ * @param newtype New type to set on clone of this key.
+ * @return Clone of this key with type set to <code>newtype</code>
+ */
+ private KeyValue createKey(final Type newtype) {
+ int keylength= getKeyLength();
+ int l = keylength + ROW_OFFSET;
+ byte [] other = new byte[l];
+ System.arraycopy(getBuffer(), getOffset(), other, 0, l);
+ // Set value length to zero.
+ Bytes.putInt(other, Bytes.SIZEOF_INT, 0);
+ // Set last byte, the type, to new type
+ other[l - 1] = newtype.getCode();
+ return new KeyValue(other, 0, other.length);
+ }
+
+ public String toString() {
+ return keyToString(this.bytes, this.offset + ROW_OFFSET, getKeyLength()) +
+ "/vlen=" + getValueLength();
+ }
+
+ /**
+ * @param b Key portion of a KeyValue.
+ * @return Key as a String.
+ */
+ public static String keyToString(final byte [] k) {
+ return keyToString(k, 0, k.length);
+ }
+
+ /**
+ * @param b Key portion of a KeyValue.
+ * @param o Offset to start of key
+ * @param l Length of key.
+ * @return Key as a String.
+ */
+ public static String keyToString(final byte [] b, final int o, final int l) {
+ int rowlength = Bytes.toShort(b, o);
+ String row = Bytes.toString(b, o + Bytes.SIZEOF_SHORT, rowlength);
+ int columnoffset = o + Bytes.SIZEOF_SHORT + 1 + rowlength;
+ int familylength = b[columnoffset - 1];
+ int columnlength = l - ((columnoffset - o) + TIMESTAMP_TYPE_SIZE);
+ String family = familylength == 0? "":
+ Bytes.toString(b, columnoffset, familylength);
+ String qualifier = columnlength == 0? "":
+ Bytes.toString(b, columnoffset + familylength,
+ columnlength - familylength);
+ long timestamp = Bytes.toLong(b, o + (l - TIMESTAMP_TYPE_SIZE));
+ byte type = b[o + l - 1];
+ return row + "/" + family +
+ (family != null && family.length() > 0? COLUMN_FAMILY_DELIMITER: "") +
+ qualifier + "/" + timestamp + "/" + Type.codeToType(type);
+ }
+
+ /**
+ * @return The byte array backing this KeyValue.
+ */
+ public byte [] getBuffer() {
+ return this.bytes;
+ }
+
+ /**
+ * @return Offset into {@link #getBuffer()} at which this KeyValue starts.
+ */
+ public int getOffset() {
+ return this.offset;
+ }
+
+ /**
+ * @return Length of bytes this KeyValue occupies in {@link #getBuffer()}.
+ */
+ public int getLength() {
+ return length;
+ }
+
+ /*
+ * Determines the total length of the KeyValue stored in the specified
+ * byte array and offset. Includes all headers.
+ * @param bytes byte array
+ * @param offset offset to start of the KeyValue
+ * @return length of entire KeyValue, in bytes
+ */
+ private static int getLength(byte [] bytes, int offset) {
+ return (2 * Bytes.SIZEOF_INT) +
+ Bytes.toInt(bytes, offset) +
+ Bytes.toInt(bytes, offset + Bytes.SIZEOF_INT);
+ }
+
+ /**
+ * @return Copy of the key portion only. Used compacting and testing.
+ */
+ public byte [] getKey() {
+ int keylength = getKeyLength();
+ byte [] key = new byte[keylength];
+ System.arraycopy(getBuffer(), ROW_OFFSET, key, 0, keylength);
+ return key;
+ }
+
+ /**
+ * @return Key offset in backing buffer..
+ */
+ public int getKeyOffset() {
+ return this.offset + ROW_OFFSET;
+ }
+
+ /**
+ * @return Row length.
+ */
+ public short getRowLength() {
+ return Bytes.toShort(this.bytes, getKeyOffset());
+ }
+
+ /**
+ * @return Offset into backing buffer at which row starts.
+ */
+ public int getRowOffset() {
+ return getKeyOffset() + Bytes.SIZEOF_SHORT;
+ }
+
+ /**
+ * Do not use this unless you have to.
+ * Use {@link #getBuffer()} with appropriate offsets and lengths instead.
+ * @return Row in a new byte array.
+ */
+ public byte [] getRow() {
+ int o = getRowOffset();
+ short l = getRowLength();
+ byte [] result = new byte[l];
+ System.arraycopy(getBuffer(), o, result, 0, l);
+ return result;
+ }
+
+ /**
+ * @return Timestamp
+ */
+ public long getTimestamp() {
+ return getTimestamp(getKeyLength());
+ }
+
+ /**
+ * @param keylength Pass if you have it to save on a int creation.
+ * @return Timestamp
+ */
+ long getTimestamp(final int keylength) {
+ int tsOffset = getTimestampOffset(keylength);
+ return Bytes.toLong(this.bytes, tsOffset);
+ }
+
+ /**
+ * @param keylength Pass if you have it to save on a int creation.
+ * @return Offset into backing buffer at which timestamp starts.
+ */
+ int getTimestampOffset(final int keylength) {
+ return getKeyOffset() + keylength - TIMESTAMP_TYPE_SIZE;
+ }
+
+ /**
+ * @return True if a {@link Type#Delete}.
+ */
+ public boolean isDeleteType() {
+ return getType() == Type.Delete.getCode();
+ }
+
+ /**
+ * @return Type of this KeyValue.
+ */
+ byte getType() {
+ return getType(getKeyLength());
+ }
+
+ /**
+ * @param keylength Pass if you have it to save on a int creation.
+ * @return Type of this KeyValue.
+ */
+ byte getType(final int keylength) {
+ return this.bytes[this.offset + keylength - 1 + ROW_OFFSET];
+ }
+
+ /**
+ * @return Length of key portion.
+ */
+ public int getKeyLength() {
+ return Bytes.toInt(this.bytes, this.offset);
+ }
+
+ /**
+ * @return Value length
+ */
+ public int getValueLength() {
+ return Bytes.toInt(this.bytes, this.offset + Bytes.SIZEOF_INT);
+ }
+
+ /**
+ * @return Offset into backing buffer at which value starts.
+ */
+ public int getValueOffset() {
+ return getKeyOffset() + getKeyLength();
+ }
+
+ /**
+ * Do not use unless you have to. Use {@link #getBuffer()} with appropriate
+ * offset and lengths instead.
+ * @return Value in a new byte array.
+ */
+ public byte [] getValue() {
+ int o = getValueOffset();
+ int l = getValueLength();
+ byte [] result = new byte[l];
+ System.arraycopy(getBuffer(), o, result, 0, l);
+ return result;
+ }
+
+ /**
+ * @return Offset into backing buffer at which the column begins
+ */
+ public int getColumnOffset() {
+ return getColumnOffset(getRowLength());
+ }
+
+ /**
+ * @param rowlength Pass if you have it to save on an int creation.
+ * @return Offset into backing buffer at which the column begins
+ */
+ public int getColumnOffset(final int rowlength) {
+ return getRowOffset() + rowlength + 1;
+ }
+
+ /**
+ * @param columnoffset Pass if you have it to save on an int creation.
+ * @return Length of family portion of column.
+ */
+ int getFamilyLength(final int columnoffset) {
+ return this.bytes[columnoffset - 1];
+ }
+
+ /**
+ * @param columnoffset Pass if you have it to save on an int creation.
+ * @return Length of column.
+ */
+ public int getColumnLength(final int columnoffset) {
+ return getColumnLength(columnoffset, getKeyLength());
+ }
+
+ int getColumnLength(final int columnoffset, final int keylength) {
+ return (keylength + ROW_OFFSET) - (columnoffset - this.offset) -
+ TIMESTAMP_TYPE_SIZE;
+ }
+
+ /**
+ * @param family
+ * @return True if matching families.
+ */
+ public boolean matchingFamily(final byte [] family) {
+ int o = getColumnOffset();
+ // Family length byte is just before the column starts.
+ int l = this.bytes[o - 1];
+ return Bytes.compareTo(family, 0, family.length, this.bytes, o, l) == 0;
+ }
+
+ /**
+ * @param column Column minus its delimiter
+ * @return True if column matches.
+ * @see #matchingColumn(byte[])
+ */
+ public boolean matchingColumnNoDelimiter(final byte [] column) {
+ int o = getColumnOffset();
+ int l = getColumnLength(o);
+ return compareColumns(getBuffer(), o, l, column, 0, column.length) == 0;
+ }
+
+ /**
+ * @param column Column with delimiter
+ * @return True if column matches.
+ */
+ public boolean matchingColumn(final byte [] column) {
+ int index = getFamilyDelimiterIndex(column, 0, column.length);
+ int o = getColumnOffset();
+ int l = getColumnLength(o);
+ int result = Bytes.compareTo(getBuffer(), o, index, column, 0, index);
+ if (result != 0) {
+ return false;
+ }
+ return Bytes.compareTo(getBuffer(), o + index, l - index,
+ column, index + 1, column.length - (index + 1)) == 0;
+ }
+
+ /**
+ * @param left
+ * @param loffset
+ * @param llength
+ * @param right
+ * @param roffset
+ * @param rlength
+ * @return
+ */
+ static int compareColumns(final byte [] left, final int loffset,
+ final int llength, final byte [] right, final int roffset,
+ final int rlength) {
+ return Bytes.compareTo(left, loffset, llength, right, roffset, rlength);
+ }
+
+ /**
+ * @return True if non-null row and column.
+ */
+ public boolean nonNullRowAndColumn() {
+ return getRowLength() > 0 && !isEmptyColumn();
+ }
+
+ /**
+ * @return Returns column String with delimiter added back. Expensive!
+ */
+ public String getColumnString() {
+ int o = getColumnOffset();
+ int l = getColumnLength(o);
+ int familylength = getFamilyLength(o);
+ return Bytes.toString(this.bytes, o, familylength) +
+ COLUMN_FAMILY_DELIMITER + Bytes.toString(this.bytes,
+ o + familylength, l - familylength);
+ }
+
+ /**
+ * Do not use this unless you have to.
+ * Use {@link #getBuffer()} with appropriate offsets and lengths instead.
+ * @return Returns column. Makes a copy. Inserts delimiter.
+ */
+ public byte [] getColumn() {
+ int o = getColumnOffset();
+ int l = getColumnLength(o);
+ int familylength = getFamilyLength(o);
+ byte [] result = new byte[l + 1];
+ System.arraycopy(getBuffer(), o, result, 0, familylength);
+ result[familylength] = COLUMN_FAMILY_DELIMITER;
+ System.arraycopy(getBuffer(), o + familylength, result,
+ familylength + 1, l - familylength);
+ return result;
+ }
+
+ /**
+ * @return True if column is empty.
+ */
+ public boolean isEmptyColumn() {
+ return getColumnLength(getColumnOffset()) == 0;
+ }
+
+ /**
+ * @param b
+ * @return Index of the family-qualifier colon delimiter character in passed
+ * buffer.
+ */
+ public static int getFamilyDelimiterIndex(final byte [] b, final int offset,
+ final int length) {
+ return getRequiredDelimiter(b, offset, length, COLUMN_FAMILY_DELIMITER);
+ }
+
+ private static int getRequiredDelimiter(final byte [] b,
+ final int offset, final int length, final int delimiter) {
+ int index = getDelimiter(b, offset, length, delimiter);
+ if (index < 0) {
+ throw new IllegalArgumentException("No " + (char)delimiter + " in <" +
+ Bytes.toString(b) + ">" + ", length=" + length + ", offset=" + offset);
+ }
+ return index;
+ }
+
+ static int getRequiredDelimiterInReverse(final byte [] b,
+ final int offset, final int length, final int delimiter) {
+ int index = getDelimiterInReverse(b, offset, length, delimiter);
+ if (index < 0) {
+ throw new IllegalArgumentException("No " + delimiter + " in <" +
+ Bytes.toString(b) + ">" + ", length=" + length + ", offset=" + offset);
+ }
+ return index;
+ }
+
+ /*
+ * @param b
+ * @param delimiter
+ * @return Index of delimiter having started from end of <code>b</code> moving
+ * leftward.
+ */
+ static int getDelimiter(final byte [] b, int offset, final int length,
+ final int delimiter) {
+ if (b == null) {
+ throw new NullPointerException();
+ }
+ int result = -1;
+ for (int i = offset; i < length + offset; i++) {
+ if (b[i] == delimiter) {
+ result = i;
+ break;
+ }
+ }
+ return result;
+ }
+
+ /*
+ * @param b
+ * @param delimiter
+ * @return Index of delimiter
+ */
+ static int getDelimiterInReverse(final byte [] b, final int offset,
+ final int length, final int delimiter) {
+ if (b == null) {
+ throw new NullPointerException();
+ }
+ int result = -1;
+ for (int i = (offset + length) - 1; i >= offset; i--) {
+ if (b[i] == delimiter) {
+ result = i;
+ break;
+ }
+ }
+ return result;
+ }
+
+ /**
+ * A {@link KVComparator} for <code>-ROOT-</code> catalog table
+ * {@link KeyValue}s.
+ */
+ public static class RootComparator extends MetaComparator {
+ private final KeyComparator rawcomparator = new RootKeyComparator();
+
+ public KeyComparator getRawComparator() {
+ return this.rawcomparator;
+ }
+ }
+
+ /**
+ * A {@link KVComparator} for <code>.META.</code> catalog table
+ * {@link KeyValue}s.
+ */
+ public static class MetaComparator extends KVComparator {
+ private final KeyComparator rawcomparator = new MetaKeyComparator();
+
+ public KeyComparator getRawComparator() {
+ return this.rawcomparator;
+ }
+ }
+
+ /**
+ * Compare KeyValues.
+ * Hosts a {@link KeyComparator}.
+ */
+ public static class KVComparator implements java.util.Comparator<KeyValue> {
+ private final KeyComparator rawcomparator = new KeyComparator();
+
+ /**
+ * @return RawComparator that can compare the Key portion of a KeyValue.
+ * Used in hfile where indices are the Key portion of a KeyValue.
+ */
+ public KeyComparator getRawComparator() {
+ return this.rawcomparator;
+ }
+
+ public int compare(final KeyValue left, final KeyValue right) {
+ return getRawComparator().compare(left.getBuffer(),
+ left.getOffset() + ROW_OFFSET, left.getKeyLength(),
+ right.getBuffer(), right.getOffset() + ROW_OFFSET,
+ right.getKeyLength());
+ }
+
+ public int compareTimestamps(final KeyValue left, final KeyValue right) {
+ return compareTimestamps(left, left.getKeyLength(), right,
+ right.getKeyLength());
+ }
+
+ int compareTimestamps(final KeyValue left, final int lkeylength,
+ final KeyValue right, final int rkeylength) {
+ // Compare timestamps
+ long ltimestamp = left.getTimestamp(lkeylength);
+ long rtimestamp = right.getTimestamp(rkeylength);
+ return getRawComparator().compareTimestamps(ltimestamp, rtimestamp);
+ }
+
+ /**
+ * @param left
+ * @param right
+ * @return Result comparing rows.
+ */
+ public int compareRows(final KeyValue left, final KeyValue right) {
+ return compareRows(left, left.getRowLength(), right, right.getRowLength());
+ }
+
+ /**
+ * @param left
+ * @param lrowlength Length of left row.
+ * @param right
+ * @param rrowlength Length of right row.
+ * @return Result comparing rows.
+ */
+ public int compareRows(final KeyValue left, final short lrowlength,
+ final KeyValue right, final short rrowlength) {
+ return getRawComparator().compareRows(left.getBuffer(),
+ left.getRowOffset(), lrowlength,
+ right.getBuffer(), right.getRowOffset(), rrowlength);
+ }
+
+ /**
+ * @param left
+ * @param row
+ * @return
+ */
+ public int compareRows(final KeyValue left, final byte [] row) {
+ return getRawComparator().compareRows(left.getBuffer(),
+ left.getRowOffset(), left.getRowLength(), row, 0, row.length);
+ }
+
+ public int compareRows(byte [] left, int loffset, int llength,
+ byte [] right, int roffset, int rlength) {
+ return getRawComparator().compareRows(left, loffset, llength,
+ right, roffset, rlength);
+ }
+
+ public int compareColumns(final KeyValue left, final byte [] right,
+ final int roffset, final int rlength) {
+ int offset = left.getColumnOffset();
+ int length = left.getColumnLength(offset);
+ return getRawComparator().compareColumns(left.getBuffer(), offset, length,
+ right, roffset, rlength);
+ }
+
+ int compareColumns(final KeyValue left, final short lrowlength,
+ final int lkeylength, final KeyValue right, final short rrowlength,
+ final int rkeylength) {
+ int loffset = left.getColumnOffset(lrowlength);
+ int roffset = right.getColumnOffset(rrowlength);
+ int llength = left.getColumnLength(loffset, lkeylength);
+ int rlength = right.getColumnLength(roffset, rkeylength);
+ return getRawComparator().compareColumns(left.getBuffer(), loffset,
+ llength,
+ right.getBuffer(), roffset, rlength);
+ }
+
+ /**
+ * Compares the row and column of two keyvalues
+ * @param left
+ * @param right
+ * @return True if same row and column.
+ */
+ public boolean matchingRowColumn(final KeyValue left,
+ final KeyValue right) {
+ short lrowlength = left.getRowLength();
+ short rrowlength = right.getRowLength();
+ if (!matchingRows(left, lrowlength, right, rrowlength)) {
+ return false;
+ }
+ int lkeylength = left.getKeyLength();
+ int rkeylength = right.getKeyLength();
+ return compareColumns(left, lrowlength, lkeylength,
+ right, rrowlength, rkeylength) == 0;
+ }
+
+ /**
+ * @param left
+ * @param right
+ * @return True if rows match.
+ */
+ public boolean matchingRows(final KeyValue left, final byte [] right) {
+ return compareRows(left, right) == 0;
+ }
+
+ /**
+ * @param left
+ * @param right
+ * @return True if rows match.
+ */
+ public boolean matchingRows(final KeyValue left, final KeyValue right) {
+ short lrowlength = left.getRowLength();
+ short rrowlength = right.getRowLength();
+ return matchingRows(left, lrowlength, right, rrowlength);
+ }
+
+ /**
+ * @param left
+ * @param lrowlength
+ * @param right
+ * @param rrowlength
+ * @return True if rows match.
+ */
+ public boolean matchingRows(final KeyValue left, final short lrowlength,
+ final KeyValue right, final short rrowlength) {
+ int compare = compareRows(left, lrowlength, right, rrowlength);
+ if (compare != 0) {
+ return false;
+ }
+ return true;
+ }
+
+ public boolean matchingRows(final byte [] left, final int loffset,
+ final int llength,
+ final byte [] right, final int roffset, final int rlength) {
+ int compare = compareRows(left, loffset, llength, right, roffset, rlength);
+ if (compare != 0) {
+ return false;
+ }
+ return true;
+ }
+
+ /**
+ * Compares the row and timestamp of two keys
+ * Was called matchesWithoutColumn in HStoreKey.
+ * @param right Key to compare against.
+ * @return True if same row and timestamp is greater than the timestamp in
+ * <code>right</code>
+ */
+ public boolean matchingRowsGreaterTimestamp(final KeyValue left,
+ final KeyValue right) {
+ short lrowlength = left.getRowLength();
+ short rrowlength = right.getRowLength();
+ if (!matchingRows(left, lrowlength, right, rrowlength)) {
+ return false;
+ }
+ return left.getTimestamp() >= right.getTimestamp();
+ }
+
+ @Override
+ protected Object clone() throws CloneNotSupportedException {
+ return new KVComparator();
+ }
+
+ /**
+ * @return Comparator that ignores timestamps; useful counting versions.
+ * @throws IOException
+ */
+ public KVComparator getComparatorIgnoringTimestamps() {
+ KVComparator c = null;
+ try {
+ c = (KVComparator)this.clone();
+ c.getRawComparator().ignoreTimestamp = true;
+ } catch (CloneNotSupportedException e) {
+ LOG.error("Not supported", e);
+ }
+ return c;
+ }
+
+ /**
+ * @return Comparator that ignores key type; useful checking deletes
+ * @throws IOException
+ */
+ public KVComparator getComparatorIgnoringType() {
+ KVComparator c = null;
+ try {
+ c = (KVComparator)this.clone();
+ c.getRawComparator().ignoreType = true;
+ } catch (CloneNotSupportedException e) {
+ LOG.error("Not supported", e);
+ }
+ return c;
+ }
+ }
+
+ /**
+ * @param row
+ * @return First possible KeyValue on passed <code>row</code>
+ */
+ public static KeyValue createFirstOnRow(final byte [] row) {
+ return createFirstOnRow(row, HConstants.LATEST_TIMESTAMP);
+ }
+
+ /**
+ * @param row
+ * @param ts
+ * @return First possible key on passed <code>row</code> and timestamp.
+ */
+ public static KeyValue createFirstOnRow(final byte [] row,
+ final long ts) {
+ return createFirstOnRow(row, null, ts);
+ }
+
+ /**
+ * @param row
+ * @param ts
+ * @return First possible key on passed <code>row</code>, column and timestamp.
+ */
+ public static KeyValue createFirstOnRow(final byte [] row, final byte [] c,
+ final long ts) {
+ return new KeyValue(row, c, ts, Type.Maximum);
+ }
+
+ /**
+ * @param b
+ * @param o
+ * @param l
+ * @return A KeyValue made of a byte array that holds the key-only part.
+ * Needed to convert hfile index members to KeyValues.
+ */
+ public static KeyValue createKeyValueFromKey(final byte [] b, final int o,
+ final int l) {
+ byte [] newb = new byte[b.length + ROW_OFFSET];
+ System.arraycopy(b, o, newb, ROW_OFFSET, l);
+ Bytes.putInt(newb, 0, b.length);
+ Bytes.putInt(newb, Bytes.SIZEOF_INT, 0);
+ return new KeyValue(newb);
+ }
+
+ /**
+ * Compare key portion of a {@link KeyValue} for keys in <code>-ROOT-<code>
+ * table.
+ */
+ public static class RootKeyComparator extends MetaKeyComparator {
+ public int compareRows(byte [] left, int loffset, int llength,
+ byte [] right, int roffset, int rlength) {
+ // Rows look like this: .META.,ROW_FROM_META,RID
+ // LOG.info("ROOT " + Bytes.toString(left, loffset, llength) +
+ // "---" + Bytes.toString(right, roffset, rlength));
+ final int metalength = 7; // '.META.' length
+ int lmetaOffsetPlusDelimiter = loffset + metalength;
+ int leftFarDelimiter = getDelimiterInReverse(left, lmetaOffsetPlusDelimiter,
+ llength - metalength, HRegionInfo.DELIMITER);
+ int rmetaOffsetPlusDelimiter = roffset + metalength;
+ int rightFarDelimiter = getDelimiterInReverse(right,
+ rmetaOffsetPlusDelimiter, rlength - metalength,
+ HRegionInfo.DELIMITER);
+ if (leftFarDelimiter < 0 && rightFarDelimiter >= 0) {
+ // Nothing between .META. and regionid. Its first key.
+ return -1;
+ } else if (rightFarDelimiter < 0 && leftFarDelimiter >= 0) {
+ return 1;
+ } else if (leftFarDelimiter < 0 && rightFarDelimiter < 0) {
+ return 0;
+ }
+ int result = super.compareRows(left, lmetaOffsetPlusDelimiter,
+ leftFarDelimiter - lmetaOffsetPlusDelimiter,
+ right, rmetaOffsetPlusDelimiter,
+ rightFarDelimiter - rmetaOffsetPlusDelimiter);
+ if (result != 0) {
+ return result;
+ }
+ // Compare last part of row, the rowid.
+ leftFarDelimiter++;
+ rightFarDelimiter++;
+ result = compareRowid(left, leftFarDelimiter,
+ llength - (leftFarDelimiter - loffset),
+ right, rightFarDelimiter, rlength - (rightFarDelimiter - roffset));
+ return result;
+ }
+ }
+
+ /**
+ * Compare key portion of a {@link KeyValue} for keys in <code>.META.</code>
+ * table.
+ */
+ public static class MetaKeyComparator extends KeyComparator {
+ public int compareRows(byte [] left, int loffset, int llength,
+ byte [] right, int roffset, int rlength) {
+ // LOG.info("META " + Bytes.toString(left, loffset, llength) +
+ // "---" + Bytes.toString(right, roffset, rlength));
+ int leftDelimiter = getDelimiter(left, loffset, llength,
+ HRegionInfo.DELIMITER);
+ int rightDelimiter = getDelimiter(right, roffset, rlength,
+ HRegionInfo.DELIMITER);
+ if (leftDelimiter < 0 && rightDelimiter >= 0) {
+ // Nothing between .META. and regionid. Its first key.
+ return -1;
+ } else if (rightDelimiter < 0 && leftDelimiter >= 0) {
+ return 1;
+ } else if (leftDelimiter < 0 && rightDelimiter < 0) {
+ return 0;
+ }
+ // Compare up to the delimiter
+ int result = Bytes.compareTo(left, loffset, leftDelimiter - loffset,
+ right, roffset, rightDelimiter - roffset);
+ if (result != 0) {
+ return result;
+ }
+ // Compare middle bit of the row.
+ // Move past delimiter
+ leftDelimiter++;
+ rightDelimiter++;
+ int leftFarDelimiter = getRequiredDelimiterInReverse(left, leftDelimiter,
+ llength - (leftDelimiter - loffset), HRegionInfo.DELIMITER);
+ int rightFarDelimiter = getRequiredDelimiterInReverse(right,
+ rightDelimiter, rlength - (rightDelimiter - roffset),
+ HRegionInfo.DELIMITER);
+ // Now compare middlesection of row.
+ result = super.compareRows(left, leftDelimiter,
+ leftFarDelimiter - leftDelimiter, right, rightDelimiter,
+ rightFarDelimiter - rightDelimiter);
+ if (result != 0) {
+ return result;
+ }
+ // Compare last part of row, the rowid.
+ leftFarDelimiter++;
+ rightFarDelimiter++;
+ result = compareRowid(left, leftFarDelimiter,
+ llength - (leftFarDelimiter - loffset),
+ right, rightFarDelimiter, rlength - (rightFarDelimiter - roffset));
+ return result;
+ }
+
+ protected int compareRowid(byte[] left, int loffset, int llength,
+ byte[] right, int roffset, int rlength) {
+ return Bytes.compareTo(left, loffset, llength, right, roffset, rlength);
+ }
+ }
+
+ /**
+ * Compare key portion of a {@link KeyValue}
+ */
+ public static class KeyComparator implements RawComparator<byte []> {
+ volatile boolean ignoreTimestamp = false;
+ volatile boolean ignoreType = false;
+
+ public int compare(byte[] left, int loffset, int llength, byte[] right,
+ int roffset, int rlength) {
+ // Compare row
+ short lrowlength = Bytes.toShort(left, loffset);
+ short rrowlength = Bytes.toShort(right, roffset);
+ int compare = compareRows(left, loffset + Bytes.SIZEOF_SHORT,
+ lrowlength,
+ right, roffset + Bytes.SIZEOF_SHORT, rrowlength);
+ if (compare != 0) {
+ return compare;
+ }
+
+ // Compare column family. Start compare past row and family length.
+ int lcolumnoffset = Bytes.SIZEOF_SHORT + lrowlength + 1 + loffset;
+ int rcolumnoffset = Bytes.SIZEOF_SHORT + rrowlength + 1 + roffset;
+ int lcolumnlength = llength - TIMESTAMP_TYPE_SIZE -
+ (lcolumnoffset - loffset);
+ int rcolumnlength = rlength - TIMESTAMP_TYPE_SIZE -
+ (rcolumnoffset - roffset);
+ compare = Bytes.compareTo(left, lcolumnoffset, lcolumnlength, right,
+ rcolumnoffset, rcolumnlength);
+ if (compare != 0) {
+ return compare;
+ }
+
+ if (!this.ignoreTimestamp) {
+ // Get timestamps.
+ long ltimestamp = Bytes.toLong(left,
+ loffset + (llength - TIMESTAMP_TYPE_SIZE));
+ long rtimestamp = Bytes.toLong(right,
+ roffset + (rlength - TIMESTAMP_TYPE_SIZE));
+ compare = compareTimestamps(ltimestamp, rtimestamp);
+ if (compare != 0) {
+ return compare;
+ }
+ }
+
+ if (!this.ignoreType) {
+ // Compare types. Let the delete types sort ahead of puts; i.e. types
+ // of higher numbers sort before those of lesser numbers
+ byte ltype = left[loffset + (llength - 1)];
+ byte rtype = right[roffset + (rlength - 1)];
+ return (0xff & rtype) - (0xff & ltype);
+ }
+ return 0;
+ }
+
+ public int compare(byte[] left, byte[] right) {
+ return compare(left, 0, left.length, right, 0, right.length);
+ }
+
+ protected int compareRows(byte [] left, int loffset, int llength,
+ byte [] right, int roffset, int rlength) {
+ return Bytes.compareTo(left, loffset, llength, right, roffset, rlength);
+ }
+
+ protected int compareColumns(byte [] left, int loffset, int llength,
+ byte [] right, int roffset, int rlength) {
+ return KeyValue.compareColumns(left, loffset, llength, right, roffset, rlength);
+ }
+
+ int compareTimestamps(final long ltimestamp, final long rtimestamp) {
+ // The below older timestamps sorting ahead of newer timestamps looks
+ // wrong but it is intentional. This way, newer timestamps are first
+ // found when we iterate over a memcache and newer versions are the
+ // first we trip over when reading from a store file.
+ if (ltimestamp < rtimestamp) {
+ return 1;
+ } else if (ltimestamp > rtimestamp) {
+ return -1;
+ }
+ return 0;
+ }
+ }
+}
\ No newline at end of file
Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java?rev=764289&r1=764288&r2=764289&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java Sun Apr 12 10:39:55 2009
@@ -548,12 +548,12 @@
}
try {
- // locate the root region
+ // locate the root or meta region
HRegionLocation metaLocation = locateRegion(parentTable, metaKey);
HRegionInterface server =
getHRegionConnection(metaLocation.getServerAddress());
- // Query the root region for the location of the meta region
+ // Query the root or meta region for the location of the meta region
RowResult regionInfoRow = server.getClosestRowBefore(
metaLocation.getRegionInfo().getRegionName(), metaKey,
HConstants.COLUMN_FAMILY);
Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/MetaScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/MetaScanner.java?rev=764289&r1=764288&r2=764289&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/MetaScanner.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/MetaScanner.java Sun Apr 12 10:39:55 2009
@@ -56,7 +56,7 @@
try {
RowResult r = null;
do {
- RowResult[] rrs = connection.getRegionServerWithRetries(callable);
+ RowResult [] rrs = connection.getRegionServerWithRetries(callable);
if (rrs == null || rrs.length == 0 || rrs[0].size() == 0) {
break;
}
Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java?rev=764289&r1=764288&r2=764289&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/ColumnValueFilter.java Sun Apr 12 10:39:55 2009
@@ -23,9 +23,11 @@
import java.io.DataOutput;
import java.io.IOException;
import java.util.Arrays;
+import java.util.List;
import java.util.SortedMap;
import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.io.ObjectWritable;
@@ -123,6 +125,10 @@
}
public boolean filterRowKey(final byte[] rowKey) {
+ return filterRowKey(rowKey, 0, rowKey.length);
+ }
+
+ public boolean filterRowKey(byte[] rowKey, int offset, int length) {
return false;
}
@@ -135,7 +141,14 @@
return false;
}
return filterColumnValue(data);
-
+ }
+
+
+ public boolean filterColumn(byte[] rowKey, int roffset, int rlength,
+ byte[] colunmName, int coffset, int clength, byte[] columnValue,
+ int voffset, int vlength) {
+ if (true) throw new RuntimeException("Not yet implemented");
+ return false;
}
private boolean filterColumnValue(final byte [] data) {
@@ -182,6 +195,12 @@
return this.filterColumnValue(colCell.getValue());
}
+
+ public boolean filterRow(List<KeyValue> results) {
+ if (true) throw new RuntimeException("Not yet implemented");
+ return false;
+ }
+
private int compare(final byte[] b1, final byte[] b2) {
int len = Math.min(b1.length, b2.length);
@@ -206,6 +225,11 @@
// Nothing
}
+
+ public void rowProcessed(boolean filtered, byte[] key, int offset, int length) {
+ // Nothing
+ }
+
public void validate(final byte[][] columns) {
// Nothing
}
@@ -236,5 +260,4 @@
WritableByteArrayComparable.class, new HBaseConfiguration());
out.writeBoolean(filterIfColumnMissing);
}
-
-}
+}
\ No newline at end of file
Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/PageRowFilter.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/PageRowFilter.java?rev=764289&r1=764288&r2=764289&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/PageRowFilter.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/PageRowFilter.java Sun Apr 12 10:39:55 2009
@@ -22,8 +22,10 @@
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
+import java.util.List;
import java.util.SortedMap;
+import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.io.Cell;
/**
@@ -71,6 +73,10 @@
public void rowProcessed(boolean filtered,
byte [] rowKey) {
+ rowProcessed(filtered, rowKey, 0, rowKey.length);
+ }
+
+ public void rowProcessed(boolean filtered, byte[] key, int offset, int length) {
if (!filtered) {
this.rowsAccepted++;
}
@@ -85,12 +91,24 @@
}
public boolean filterRowKey(final byte [] r) {
+ return filterRowKey(r, 0, r.length);
+ }
+
+
+ public boolean filterRowKey(byte[] rowKey, int offset, int length) {
return filterAllRemaining();
}
public boolean filterColumn(final byte [] rowKey,
final byte [] colKey,
final byte[] data) {
+ return filterColumn(rowKey, 0, rowKey.length, colKey, 0, colKey.length,
+ data, 0, data.length);
+ }
+
+ public boolean filterColumn(byte[] rowKey, int roffset, int rlength,
+ byte[] colunmName, int coffset, int clength, byte[] columnValue,
+ int voffset, int vlength) {
return filterAllRemaining();
}
@@ -98,6 +116,10 @@
return filterAllRemaining();
}
+ public boolean filterRow(List<KeyValue> results) {
+ return filterAllRemaining();
+ }
+
public void readFields(final DataInput in) throws IOException {
this.pageSize = in.readLong();
}
Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/PrefixRowFilter.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/PrefixRowFilter.java?rev=764289&r1=764288&r2=764289&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/PrefixRowFilter.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/PrefixRowFilter.java Sun Apr 12 10:39:55 2009
@@ -22,8 +22,10 @@
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
+import java.util.List;
import java.util.SortedMap;
+import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.util.Bytes;
@@ -52,6 +54,10 @@
}
public void rowProcessed(boolean filtered, byte [] key) {
+ rowProcessed(filtered, key, 0, key.length);
+ }
+
+ public void rowProcessed(boolean filtered, byte[] key, int offset, int length) {
// does not care
}
@@ -64,12 +70,17 @@
}
public boolean filterRowKey(final byte [] rowKey) {
+ return filterRowKey(rowKey, 0, rowKey.length);
+ }
+
+
+ public boolean filterRowKey(byte[] rowKey, int offset, int length) {
if (rowKey == null)
return true;
- if (rowKey.length < prefix.length)
+ if (length < prefix.length)
return true;
for(int i = 0;i < prefix.length;i++)
- if (prefix[i] != rowKey[i])
+ if (prefix[i] != rowKey[i + offset])
return true;
return false;
}
@@ -79,10 +90,20 @@
return false;
}
+ public boolean filterColumn(byte[] rowKey, int roffset, int rlength,
+ byte[] colunmName, int coffset, int clength, byte[] columnValue,
+ int voffset, int vlength) {
+ return false;
+ }
+
public boolean filterRow(final SortedMap<byte [], Cell> columns) {
return false;
}
+ public boolean filterRow(List<KeyValue> results) {
+ return false;
+ }
+
public void validate(final byte [][] columns) {
// does not do this
}
Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/RegExpRowFilter.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/RegExpRowFilter.java?rev=764289&r1=764288&r2=764289&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/RegExpRowFilter.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/RegExpRowFilter.java Sun Apr 12 10:39:55 2009
@@ -23,6 +23,7 @@
import java.io.DataOutput;
import java.io.IOException;
import java.util.Arrays;
+import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
@@ -31,6 +32,7 @@
import java.util.Map.Entry;
import java.util.regex.Pattern;
+import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.regionserver.HLogEdit;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.util.Bytes;
@@ -87,6 +89,11 @@
}
public void rowProcessed(boolean filtered, byte [] rowKey) {
+ rowProcessed(filtered, rowKey, 0, rowKey.length);
+ }
+
+
+ public void rowProcessed(boolean filtered, byte[] key, int offset, int length) {
//doesn't care
}
@@ -140,8 +147,12 @@
}
public boolean filterRowKey(final byte [] rowKey) {
+ return filterRowKey(rowKey, 0, rowKey.length);
+ }
+
+ public boolean filterRowKey(byte[] rowKey, int offset, int length) {
return (filtersByRowKey() && rowKey != null)?
- !getRowKeyPattern().matcher(Bytes.toString(rowKey)).matches():
+ !getRowKeyPattern().matcher(Bytes.toString(rowKey, offset, length)).matches():
false;
}
@@ -164,6 +175,14 @@
return false;
}
+
+ public boolean filterColumn(byte[] rowKey, int roffset, int rlength,
+ byte[] colunmName, int coffset, int clength, byte[] columnValue,
+ int voffset, int vlength) {
+ if (true) throw new RuntimeException("Not implemented yet");
+ return false;
+ }
+
public boolean filterRow(final SortedMap<byte [], Cell> columns) {
for (Entry<byte [], Cell> col : columns.entrySet()) {
if (nullColumns.contains(col.getKey())
@@ -179,6 +198,11 @@
return false;
}
+ public boolean filterRow(List<KeyValue> results) {
+ if (true) throw new RuntimeException("NOT YET IMPLEMENTED");
+ return false;
+ }
+
@Deprecated
private boolean filtersByColumnValue() {
return equalsMap != null && equalsMap.size() > 0;
Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/RowFilterInterface.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/RowFilterInterface.java?rev=764289&r1=764288&r2=764289&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/RowFilterInterface.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/RowFilterInterface.java Sun Apr 12 10:39:55 2009
@@ -19,19 +19,20 @@
*/
package org.apache.hadoop.hbase.filter;
+import java.util.List;
import java.util.SortedMap;
-import org.apache.hadoop.io.Writable;
-
+import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.io.Cell;
+import org.apache.hadoop.io.Writable;
/**
*
* Interface used for row-level filters applied to HRegion.HScanner scan
* results during calls to next().
+ * TODO: Make Filters use proper comparator comparing rows.
*/
public interface RowFilterInterface extends Writable {
-
/**
* Resets the state of the filter. Used prior to the start of a Region scan.
*
@@ -48,10 +49,26 @@
* @see RowFilterSet
* @param filtered
* @param key
+ * @deprecated Use {@link #rowProcessed(boolean, byte[], int, int)} instead.
*/
void rowProcessed(boolean filtered, byte [] key);
/**
+ * Called to let filter know the final decision (to pass or filter) on a
+ * given row. With out HScanner calling this, the filter does not know if a
+ * row passed filtering even if it passed the row itself because other
+ * filters may have failed the row. E.g. when this filter is a member of a
+ * RowFilterSet with an OR operator.
+ *
+ * @see RowFilterSet
+ * @param filtered
+ * @param key
+ * @param offset
+ * @param length
+ */
+ void rowProcessed(boolean filtered, byte [] key, int offset, int length);
+
+ /**
* Returns whether or not the filter should always be processed in any
* filtering call. This precaution is necessary for filters that maintain
* state and need to be updated according to their response to filtering
@@ -79,10 +96,21 @@
*
* @param rowKey
* @return true if given row key is filtered and row should not be processed.
+ * @deprecated Use {@link #filterRowKey(byte[], int, int)} instead.
*/
boolean filterRowKey(final byte [] rowKey);
/**
+ * Filters on just a row key. This is the first chance to stop a row.
+ *
+ * @param rowKey
+ * @param offset
+ * @param length
+ * @return true if given row key is filtered and row should not be processed.
+ */
+ boolean filterRowKey(final byte [] rowKey, final int offset, final int length);
+
+ /**
* Filters on row key, column name, and column value. This will take individual columns out of a row,
* but the rest of the row will still get through.
*
@@ -90,9 +118,25 @@
* @param colunmName column name to filter on
* @param columnValue column value to filter on
* @return true if row filtered and should not be processed.
+ * @deprecated Use {@link #filterColumn(byte[], int, int, byte[], int, int, byte[], int, int)}
+ * instead.
*/
- boolean filterColumn(final byte [] rowKey, final byte [] colunmName,
- final byte[] columnValue);
+ boolean filterColumn(final byte [] rowKey, final byte [] columnName,
+ final byte [] columnValue);
+
+ /**
+ * Filters on row key, column name, and column value. This will take individual columns out of a row,
+ * but the rest of the row will still get through.
+ *
+ * @param rowKey row key to filter on.
+ * @param colunmName column name to filter on
+ * @param columnValue column value to filter on
+ * @return true if row filtered and should not be processed.
+ */
+ boolean filterColumn(final byte [] rowKey, final int roffset,
+ final int rlength, final byte [] colunmName, final int coffset,
+ final int clength, final byte [] columnValue, final int voffset,
+ final int vlength);
/**
* Filter on the fully assembled row. This is the last chance to stop a row.
@@ -103,6 +147,14 @@
boolean filterRow(final SortedMap<byte [], Cell> columns);
/**
+ * Filter on the fully assembled row. This is the last chance to stop a row.
+ *
+ * @param results
+ * @return true if row filtered and should not be processed.
+ */
+ boolean filterRow(final List<KeyValue> results);
+
+ /**
* Validates that this filter applies only to a subset of the given columns.
* This check is done prior to opening of scanner due to the limitation that
* filtering of columns is dependent on the retrieval of those columns within
Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/RowFilterSet.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/RowFilterSet.java?rev=764289&r1=764288&r2=764289&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/RowFilterSet.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/RowFilterSet.java Sun Apr 12 10:39:55 2009
@@ -23,11 +23,13 @@
import java.io.DataOutput;
import java.io.IOException;
import java.util.HashSet;
+import java.util.List;
import java.util.Set;
import java.util.SortedMap;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.io.ObjectWritable;
@@ -117,8 +119,12 @@
}
public void rowProcessed(boolean filtered, byte [] rowKey) {
+ rowProcessed(filtered, rowKey, 0, rowKey.length);
+ }
+
+ public void rowProcessed(boolean filtered, byte[] key, int offset, int length) {
for (RowFilterInterface filter : filters) {
- filter.rowProcessed(filtered, rowKey);
+ filter.rowProcessed(filtered, key, offset, length);
}
}
@@ -148,23 +154,30 @@
}
public boolean filterRowKey(final byte [] rowKey) {
+ return filterRowKey(rowKey, 0, rowKey.length);
+ }
+
+
+ public boolean filterRowKey(byte[] rowKey, int offset, int length) {
boolean resultFound = false;
boolean result = operator == Operator.MUST_PASS_ONE;
for (RowFilterInterface filter : filters) {
if (!resultFound) {
if (operator == Operator.MUST_PASS_ALL) {
- if (filter.filterAllRemaining() || filter.filterRowKey(rowKey)) {
+ if (filter.filterAllRemaining() ||
+ filter.filterRowKey(rowKey, offset, length)) {
result = true;
resultFound = true;
}
} else if (operator == Operator.MUST_PASS_ONE) {
- if (!filter.filterAllRemaining() && !filter.filterRowKey(rowKey)) {
+ if (!filter.filterAllRemaining() &&
+ !filter.filterRowKey(rowKey, offset, length)) {
result = false;
resultFound = true;
}
}
} else if (filter.processAlways()) {
- filter.filterRowKey(rowKey);
+ filter.filterRowKey(rowKey, offset, length);
}
}
return result;
@@ -172,25 +185,35 @@
public boolean filterColumn(final byte [] rowKey, final byte [] colKey,
final byte[] data) {
+ return filterColumn(rowKey, 0, rowKey.length, colKey, 0, colKey.length,
+ data, 0, data.length);
+ }
+
+ public boolean filterColumn(byte[] rowKey, int roffset, int rlength,
+ byte[] columnName, int coffset, int clength, byte[] columnValue,
+ int voffset, int vlength) {
boolean resultFound = false;
boolean result = operator == Operator.MUST_PASS_ONE;
for (RowFilterInterface filter : filters) {
if (!resultFound) {
if (operator == Operator.MUST_PASS_ALL) {
if (filter.filterAllRemaining() ||
- filter.filterColumn(rowKey, colKey, data)) {
+ filter.filterColumn(rowKey, roffset, rlength, columnName, coffset,
+ clength, columnValue, voffset, vlength)) {
result = true;
resultFound = true;
}
} else if (operator == Operator.MUST_PASS_ONE) {
if (!filter.filterAllRemaining() &&
- !filter.filterColumn(rowKey, colKey, data)) {
+ !filter.filterColumn(rowKey, roffset, rlength, columnName, coffset,
+ clength, columnValue, voffset, vlength)) {
result = false;
resultFound = true;
}
}
} else if (filter.processAlways()) {
- filter.filterColumn(rowKey, colKey, data);
+ filter.filterColumn(rowKey, roffset, rlength, columnName, coffset,
+ clength, columnValue, voffset, vlength);
}
}
return result;
@@ -219,6 +242,11 @@
return result;
}
+ public boolean filterRow(List<KeyValue> results) {
+ if (true) throw new RuntimeException("Not Yet Implemented");
+ return false;
+ }
+
public void readFields(final DataInput in) throws IOException {
Configuration conf = new HBaseConfiguration();
byte opByte = in.readByte();
@@ -242,5 +270,4 @@
ObjectWritable.writeObject(out, filter, RowFilterInterface.class, conf);
}
}
-
-}
+}
\ No newline at end of file
Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/StopRowFilter.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/StopRowFilter.java?rev=764289&r1=764288&r2=764289&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/StopRowFilter.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/StopRowFilter.java Sun Apr 12 10:39:55 2009
@@ -22,8 +22,10 @@
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
+import java.util.List;
import java.util.SortedMap;
+import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.util.Bytes;
@@ -32,7 +34,6 @@
* equal to a specified rowKey.
*/
public class StopRowFilter implements RowFilterInterface {
-
private byte [] stopRowKey;
/**
@@ -73,6 +74,10 @@
// Doesn't care
}
+ public void rowProcessed(boolean filtered, byte[] key, int offset, int length) {
+ // Doesn't care
+ }
+
public boolean processAlways() {
return false;
}
@@ -82,6 +87,10 @@
}
public boolean filterRowKey(final byte [] rowKey) {
+ return filterRowKey(rowKey, 0, rowKey.length);
+ }
+
+ public boolean filterRowKey(byte[] rowKey, int offset, int length) {
if (rowKey == null) {
if (this.stopRowKey == null) {
return true;
@@ -104,6 +113,12 @@
return filterRowKey(rowKey);
}
+ public boolean filterColumn(byte[] rowKey, int roffset, int rlength,
+ byte[] colunmName, int coffset, int clength, byte[] columnValue,
+ int voffset, int vlength) {
+ return filterRowKey(rowKey, roffset, rlength);
+ }
+
/**
* Because StopRowFilter does not examine column information, this method
* defaults to calling filterAllRemaining().
@@ -114,6 +129,10 @@
return filterAllRemaining();
}
+ public boolean filterRow(List<KeyValue> results) {
+ return filterAllRemaining();
+ }
+
public void readFields(DataInput in) throws IOException {
this.stopRowKey = Bytes.readByteArray(in);
}
Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/WhileMatchRowFilter.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/WhileMatchRowFilter.java?rev=764289&r1=764288&r2=764289&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/WhileMatchRowFilter.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/filter/WhileMatchRowFilter.java Sun Apr 12 10:39:55 2009
@@ -22,8 +22,10 @@
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
+import java.util.List;
import java.util.SortedMap;
+import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.io.Cell;
/**
@@ -34,7 +36,6 @@
* thereafter defer to the result of filterAllRemaining().
*/
public class WhileMatchRowFilter implements RowFilterInterface {
-
private boolean filterAllRemaining = false;
private RowFilterInterface filter;
@@ -84,10 +85,15 @@
}
public boolean filterRowKey(final byte [] rowKey) {
- changeFAR(this.filter.filterRowKey(rowKey));
+ changeFAR(this.filter.filterRowKey(rowKey, 0, rowKey.length));
return filterAllRemaining();
}
-
+
+ public boolean filterRowKey(byte[] rowKey, int offset, int length) {
+ changeFAR(this.filter.filterRowKey(rowKey, offset, length));
+ return filterAllRemaining();
+ }
+
public boolean filterColumn(final byte [] rowKey, final byte [] colKey,
final byte[] data) {
changeFAR(this.filter.filterColumn(rowKey, colKey, data));
@@ -98,7 +104,12 @@
changeFAR(this.filter.filterRow(columns));
return filterAllRemaining();
}
-
+
+ public boolean filterRow(List<KeyValue> results) {
+ changeFAR(this.filter.filterRow(results));
+ return filterAllRemaining();
+ }
+
/**
* Change filterAllRemaining from false to true if value is true, otherwise
* leave as is.
@@ -110,7 +121,11 @@
}
public void rowProcessed(boolean filtered, byte [] rowKey) {
- this.filter.rowProcessed(filtered, rowKey);
+ this.filter.rowProcessed(filtered, rowKey, 0, rowKey.length);
+ }
+
+ public void rowProcessed(boolean filtered, byte[] key, int offset, int length) {
+ this.filter.rowProcessed(filtered, key, offset, length);
}
public void validate(final byte [][] columns) {
@@ -140,4 +155,11 @@
out.writeUTF(this.filter.getClass().getName());
this.filter.write(out);
}
+
+ public boolean filterColumn(byte[] rowKey, int roffset, int rlength,
+ byte[] colunmName, int coffset, int clength, byte[] columnValue,
+ int voffset, int vlength) {
+ // TODO Auto-generated method stub
+ return false;
+ }
}