You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ap...@apache.org on 2010/01/16 02:52:31 UTC
svn commit: r899871 - in /hadoop/hbase/branches/0.20_on_hadoop-0.18.3: ./
conf/ src/contrib/ec2/ src/contrib/ec2/bin/ src/contrib/indexed/
src/contrib/indexed/src/java/org/apache/hadoop/hbase/regionserver/
src/contrib/transactional/src/java/org/apache/...
Author: apurtell
Date: Sat Jan 16 01:52:29 2010
New Revision: 899871
URL: http://svn.apache.org/viewvc?rev=899871&view=rev
Log:
pull up to 0.20.3-dev r899869
Added:
hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/webapps/static/scripts/
Modified:
hadoop/hbase/branches/0.20_on_hadoop-0.18.3/CHANGES.txt
hadoop/hbase/branches/0.20_on_hadoop-0.18.3/NOTICE.txt
hadoop/hbase/branches/0.20_on_hadoop-0.18.3/conf/hbase-default.xml
hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/hbase-ec2-env.sh
hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/hbase-ec2-init-remote.sh
hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/build.xml
hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/indexed/build.xml
hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/indexed/src/java/org/apache/hadoop/hbase/regionserver/IdxRegion.java
hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/transactional/src/java/org/apache/hadoop/hbase/regionserver/transactional/CleanOldTransactionsChore.java
hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/transactional/src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionalRegionServer.java
hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java
hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWrapper.java
hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestScanner.java
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/CHANGES.txt?rev=899871&r1=899870&r2=899871&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/CHANGES.txt Sat Jan 16 01:52:29 2010
@@ -42,7 +42,22 @@
HBASE-2094 hbase-2037 breaks mapreduce jobs going from 0.20.2 to 0.20.3
HBASE-2093 [stargate] RowSpec parse bug (Andrew Purtell via JD)
HBASE-2097 Deadlock between HRegion.put and HRegion.close (Stack via JD)
- HBASE-2100 [EC2] Adjust fs.file-max
+ HBASE-2101 KeyValueSortReducer collapses all values to last passed
+ HBASE-2113 For indexed contrib, fast-forward to next row if no more
+ results left... big performance improvement
+ HBASE-2112 New 'indexed' contrib is missing commons-lang.jar when package
+ HBASE-2119 Fix top-level NOTICES.txt file. Its stale.
+ HBASE-2120 [stargate] Unable to delete column families (Greg Lu via Andrew
+ Purtell)
+ HBASE-2123 Remove 'master' command-line option from PE
+ HBASE-2024 [stargate] Deletes not working as expected (Greg Lu via Andrew
+ Purtell)
+ HBASE-2122 [stargate] Initializing scanner column families doesn't work
+ (Greg Lu via Andrew Purtell)
+ HBASE-2124 Useless exception in HMaster on start
+ HBASE-2127 randomWrite mode of PerformanceEvaluation benchmark program
+ writes only to a small range of keys (Kannan Muthukkaruppan
+ via Stack)
IMPROVEMENTS
HBASE-1970 Export does one version only; make it configurable how many
@@ -52,8 +67,6 @@
(Jeremiah Jacquet via Stack)
HBASE-1987 The Put object has no simple read methods for checking what
has already been added (Ryan Smith via Stack)
- HBASE-2009 [EC2] Support mapreduce
- HBASE-2012 [EC2] LZO support
HBASE-2011 Add zktop like output to HBase's master UI (Lars George via
Andrew Purtell)
HBASE-2018 Updates to .META. blocked under high MemStore load
@@ -70,8 +83,6 @@
Andrew Purtell)
HBASE-2028 Add HTable.incrementColumnValue support to shell (Lars George
via Andrew Purtell)
- HBASE-1982 [EC2] Handle potentially large and uneven instance startup
- times
HBASE-2062 Metrics documentation outdated (Lars George via JD)
HBASE-2045 Update trunk and branch zk to just-release 3.2.2.
HBASE-2074 Improvements to the hadoop-config script (Bassam Tabbara via
@@ -82,9 +93,6 @@
UndeclaredThrowableException; frustrates rolling upgrade
HBASE-2081 Set the retries higher in shell since client pause is lower
HBASE-1956 Export HDFS read and write latency as a metric
- HBASE-2080 [EC2] Support multivolume local instance storage
- HBASE-2083 [EC2] HDFS DataNode no longer required on master
- HBASE-2084 [EC2] JAVA_HOME handling broken
HBASE-2053 Upper bound of outstanding WALs can be overrun
HBASE-1996 Configure scanner buffer in bytes instead of number of rows
(Erik Rozendaal and Dave Latham via Stack)
@@ -92,9 +100,18 @@
(Lars George via Stack)
HBASE-2095 TIF should support more confs for the scanner (Bassam Tabbara
via Andrew Purtell)
+ HBASE-2133 Increase default number of client handlers
NEW FEATURES
HBASE-1961 HBase EC2 scripts
+ HBASE-1982 [EC2] Handle potentially large and uneven instance startup times
+ HBASE-2009 [EC2] Support mapreduce
+ HBASE-2012 [EC2] LZO support
+ HBASE-2080 [EC2] Support multivolume local instance storage
+ HBASE-2083 [EC2] HDFS DataNode no longer required on master
+ HBASE-2084 [EC2] JAVA_HOME handling broken
+ HBASE-2100 [EC2] Adjust fs.file-max
+ HBASE-2103 [EC2] pull version from build
HBASE-2037 Alternate indexed hbase implementation; speeds scans by adding
indexes to regions rather secondary tables
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/NOTICE.txt
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/NOTICE.txt?rev=899871&r1=899870&r2=899871&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/NOTICE.txt (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/NOTICE.txt Sat Jan 16 01:52:29 2010
@@ -3,13 +3,8 @@
In addition, this product includes software developed by:
-
-European Commission project OneLab (http://www.one-lab.org)
-
-
Facebook, Inc. (http://developers.facebook.com/thrift/ -- Page includes the Thrift Software License)
-
JUnit (http://www.junit.org/)
The JSON jar source is here: http://www.json.org/java/index.html
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/conf/hbase-default.xml
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/conf/hbase-default.xml?rev=899871&r1=899870&r2=899871&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/conf/hbase-default.xml (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/conf/hbase-default.xml Sat Jan 16 01:52:29 2010
@@ -153,10 +153,10 @@
</property>
<property>
<name>hbase.regionserver.handler.count</name>
- <value>10</value>
+ <value>25</value>
<description>Count of RPC Server instances spun up on RegionServers
Same property is used by the HMaster for count of master handlers.
- Default is 10.
+ Default is 25.
</description>
</property>
<property>
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/hbase-ec2-env.sh
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/hbase-ec2-env.sh?rev=899871&r1=899870&r2=899871&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/hbase-ec2-env.sh (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/hbase-ec2-env.sh Sat Jan 16 01:52:29 2010
@@ -101,7 +101,7 @@
USER_DATA_FILE=hbase-ec2-init-remote.sh
# The version number of the installed JDK.
-JAVA_VERSION=1.6.0_17
+JAVA_VERSION=1.6.0_18
# SUPPORTED_ARCHITECTURES = ['i386', 'x86_64']
if [ "$SLAVE_INSTANCE_TYPE" = "m1.small" -o "$SLAVE_INSTANCE_TYPE" = "c1.medium" ]; then
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/hbase-ec2-init-remote.sh
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/hbase-ec2-init-remote.sh?rev=899871&r1=899870&r2=899871&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/hbase-ec2-init-remote.sh (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/bin/hbase-ec2-init-remote.sh Sat Jan 16 01:52:29 2010
@@ -83,7 +83,7 @@
# Hadoop configuration
-cat > $HADOOP_HOME/conf/core-site.xml <<EOF
+cat > $HADOOP_HOME/conf/hadoop-site.xml <<EOF
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
@@ -95,16 +95,6 @@
<name>fs.default.name</name>
<value>hdfs://$MASTER_HOST:8020</value>
</property>
-</configuration>
-EOF
-cat > $HADOOP_HOME/conf/hdfs-site.xml <<EOF
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<configuration>
-<property>
- <name>fs.default.name</name>
- <value>hdfs://$MASTER_HOST:8020</value>
-</property>
<property>
<name>dfs.name.dir</name>
<value>$DFS_NAME_DIR</value>
@@ -113,12 +103,6 @@
<name>dfs.data.dir</name>
<value>$DFS_DATA_DIR</value>
</property>
-</configuration>
-EOF
-cat > $HADOOP_HOME/conf/mapred-site.xml <<EOF
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<configuration>
<property>
<name>mapred.job.tracker</name>
<value>$MASTER_HOST:8021</value>
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/build.xml
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/build.xml?rev=899871&r1=899870&r2=899871&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/build.xml (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/ec2/build.xml Sat Jan 16 01:52:29 2010
@@ -19,6 +19,22 @@
<copy todir="${dist.dir}/contrib/${name}">
<fileset dir="${build.dir}"/>
</copy>
+ <exec executable="chmod">
+ <arg value="755"/>
+ <arg value="${dist.dir}/contrib/${name}/bin/hbase-ec2"/>
+ <arg value="${dist.dir}/contrib/${name}/bin/cmd-hbase-cluster"/>
+ <arg value="${dist.dir}/contrib/${name}/bin/create-hbase-image"/>
+ <arg value="${dist.dir}/contrib/${name}/bin/delete-hbase-cluster"/>
+ <arg value="${dist.dir}/contrib/${name}/bin/init-hbase-cluster-secgroups"/>
+ <arg value="${dist.dir}/contrib/${name}/bin/launch-hbase-cluster"/>
+ <arg value="${dist.dir}/contrib/${name}/bin/launch-hbase-master"/>
+ <arg value="${dist.dir}/contrib/${name}/bin/launch-hbase-slaves"/>
+ <arg value="${dist.dir}/contrib/${name}/bin/launch-hbase-zookeeper"/>
+ <arg value="${dist.dir}/contrib/${name}/bin/list-hbase-clusters"/>
+ <arg value="${dist.dir}/contrib/${name}/bin/revoke-hbase-cluster-secgroups"/>
+ <arg value="${dist.dir}/contrib/${name}/bin/terminate-hbase-cluster"/>
+ <arg value="${dist.dir}/contrib/${name}/bin/image/create-hbase-image-remote"/>
+ </exec>
</target>
</project>
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/indexed/build.xml
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/indexed/build.xml?rev=899871&r1=899870&r2=899871&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/indexed/build.xml (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/indexed/build.xml Sat Jan 16 01:52:29 2010
@@ -23,5 +23,23 @@
-->
<project name="indexed" default="jar">
<import file="../build-contrib.xml"/>
-
+
+ <property name="lib.dir" value="${basedir}/lib"/>
+
+ <!--Override ../build-contrib.xml package-->
+ <target name="package" depends="jar" unless="skip.contrib">
+ <mkdir dir="${dist.dir}/contrib/${name}"/>
+ <copy todir="${dist.dir}/contrib/${name}" includeEmptyDirs="false" flatten="true">
+ <fileset dir="${build.dir}">
+ <include name="hbase-${version}-${name}.jar" />
+ </fileset>
+ </copy>
+ <mkdir dir="${dist.dir}/contrib/${name}/lib"/>
+ <copy todir="${dist.dir}/contrib/${name}/lib" overwrite="true">
+ <fileset dir="${lib.dir}">
+ <include name="commons-lang-*.jar" />
+ </fileset>
+ </copy>
+ </target>
+
</project>
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/indexed/src/java/org/apache/hadoop/hbase/regionserver/IdxRegion.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/indexed/src/java/org/apache/hadoop/hbase/regionserver/IdxRegion.java?rev=899871&r1=899870&r2=899871&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/indexed/src/java/org/apache/hadoop/hbase/regionserver/IdxRegion.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/indexed/src/java/org/apache/hadoop/hbase/regionserver/IdxRegion.java Sat Jan 16 01:52:29 2010
@@ -287,6 +287,17 @@
return result;
}
+ /**
+ * {@inheritDoc}
+ * <p/>
+ * Fast forwards the scanner by calling {@link #seekNext()}.
+ */
+ @Override
+ protected void nextRow(byte[] currentRow) throws IOException {
+ seekNext();
+ super.nextRow(currentRow);
+ }
+
protected void seekNext() throws IOException {
KeyValue keyValue;
do {
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/transactional/src/java/org/apache/hadoop/hbase/regionserver/transactional/CleanOldTransactionsChore.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/transactional/src/java/org/apache/hadoop/hbase/regionserver/transactional/CleanOldTransactionsChore.java?rev=899871&r1=899870&r2=899871&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/transactional/src/java/org/apache/hadoop/hbase/regionserver/transactional/CleanOldTransactionsChore.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/transactional/src/java/org/apache/hadoop/hbase/regionserver/transactional/CleanOldTransactionsChore.java Sat Jan 16 01:52:29 2010
@@ -21,6 +21,7 @@
import java.util.concurrent.atomic.AtomicBoolean;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Chore;
import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -42,8 +43,8 @@
public CleanOldTransactionsChore(
final TransactionalRegionServer regionServer,
final AtomicBoolean stopRequest) {
- super(regionServer.getConfiguration().getInt(SLEEP_CONF, DEFAULT_SLEEP),
- stopRequest);
+ super(((Configuration)regionServer.getConfiguration())
+ .getInt(SLEEP_CONF, DEFAULT_SLEEP), stopRequest);
this.regionServer = regionServer;
}
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/transactional/src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionalRegionServer.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/transactional/src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionalRegionServer.java?rev=899871&r1=899870&r2=899871&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/transactional/src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionalRegionServer.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/contrib/transactional/src/java/org/apache/hadoop/hbase/regionserver/transactional/TransactionalRegionServer.java Sat Jan 16 01:52:29 2010
@@ -24,6 +24,7 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HRegionInfo;
@@ -144,7 +145,7 @@
getTransactionalRegion(hri.getRegionName()).prepareToClose();
super.closeRegion(hri, reportWhenCompleted);
}
-
+
public void abort(final byte[] regionName, final long transactionId)
throws IOException {
checkOpen();
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=899871&r1=899870&r2=899871&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java Sat Jan 16 01:52:29 2010
@@ -53,7 +53,10 @@
import java.io.IOException;
import java.io.UnsupportedEncodingException;
+ import java.lang.reflect.Constructor;
+ import java.util.AbstractList;
import java.util.ArrayList;
+ import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.NavigableSet;
@@ -64,9 +67,8 @@
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.ReentrantReadWriteLock;
- import java.lang.reflect.Constructor;
- /**
+/**
* HRegion stores data for a certain region of a table. It stores all columns
* for each row. A given table consists of one or more HRegions.
*
@@ -1747,7 +1749,10 @@
this(scan, null);
}
- private void resetFilters() {
+ /**
+ * Reset both the filter and the old filter.
+ */
+ protected void resetFilters() {
if (filter != null) {
filter.reset();
}
@@ -1764,7 +1769,7 @@
}
results.clear();
boolean returnResult = nextInternal();
- if (!returnResult && filter != null && filter.filterRow()) {
+ if (!returnResult && filterRow()) {
results.clear();
}
outResults.addAll(results);
@@ -1779,71 +1784,78 @@
* @return True if a filter rules the scanner is over, done.
*/
boolean isFilterDone() {
- return this.filter != null && this.filter.filterAllRemaining();
+ return
+ (this.filter != null && this.filter.filterAllRemaining()) ||
+ (this.oldFilter != null && oldFilter.filterAllRemaining());
}
+
/*
- * @return true if there are more rows, false if scanner is done
- * @throws IOException
- */
+ * @return true if there are more rows, false if scanner is done
+ * @throws IOException
+ */
private boolean nextInternal() throws IOException {
- byte [] currentRow = null;
- boolean filterCurrentRow = false;
while (true) {
- KeyValue kv = this.storeHeap.peek();
- if (kv == null) return false;
- byte [] row = kv.getRow();
- boolean samerow = Bytes.equals(currentRow, row);
- if (samerow && filterCurrentRow) {
- // Filter all columns until row changes
- readAndDumpCurrentResult();
- continue;
- }
- if (!samerow) {
- // Continue on the next row:
- currentRow = row;
- filterCurrentRow = false;
- // See if we passed stopRow
- if (this.stopRow != null &&
- comparator.compareRows(this.stopRow, 0, this.stopRow.length,
- currentRow, 0, currentRow.length) <= 0) {
- return false;
+ byte[] currentRow = peekRow();
+ if (isStopRow(currentRow)) {
+ return false;
+ } else if (filterRowKey(currentRow)) {
+ nextRow(currentRow);
+ } else {
+ byte[] nextRow;
+ do {
+ this.storeHeap.next(results);
+ } while (Bytes.equals(currentRow, nextRow = peekRow()));
+
+ final boolean stopRow = isStopRow(nextRow);
+ if (!stopRow && (results.isEmpty() || filterRow())) {
+ // this seems like a redundant step - we already consumed the row
+ // there're no left overs.
+ // the reasons for calling this method are:
+ // 1. reset the filters.
+ // 2. provide a hook to fast forward the row (used by subclasses)
+ nextRow(currentRow);
+ continue;
}
- if (hasResults()) return true;
- }
- // See if current row should be filtered based on row key
- if ((this.filter != null && this.filter.filterRowKey(row, 0, row.length)) ||
- (oldFilter != null && this.oldFilter.filterRowKey(row, 0, row.length))) {
- readAndDumpCurrentResult();
- resetFilters();
- filterCurrentRow = true;
- currentRow = row;
- continue;
+ return !stopRow;
}
- this.storeHeap.next(results);
}
}
- private void readAndDumpCurrentResult() throws IOException {
- this.storeHeap.next(this.results);
- this.results.clear();
- }
-
- /*
- * Do we have results to return or should we continue. Call when we get to
- * the end of a row. Does house cleaning -- clearing results and resetting
- * filters -- if we are to continue.
- * @return True if we should return else false if need to keep going.
+ /**
+ * Reset state and move to the next row.
+ *
+ * @param currentRow the current row
+ * @throws IOException by store heap
*/
- private boolean hasResults() {
- if (this.results.isEmpty() ||
- this.filter != null && this.filter.filterRow()) {
- // Make sure results is empty, reset filters
- this.results.clear();
- resetFilters();
- return false;
+ protected void nextRow(byte[] currentRow) throws IOException {
+ while (Bytes.equals(currentRow, peekRow())) {
+ this.storeHeap.next(MOCKED_LIST);
}
- return true;
+ results.clear();
+ resetFilters();
+ }
+
+ private boolean isStopRow(byte[] currentRow) {
+ return currentRow == null ||
+ (this.stopRow != null &&
+ comparator.compareRows(this.stopRow, 0, this.stopRow.length,
+ currentRow, 0, currentRow.length) <= 0);
+ }
+
+ private boolean filterRow() {
+ return (filter != null && filter.filterRow()) ||
+ oldFilter != null && oldFilter.filterRow(results);
+ }
+
+ private byte[] peekRow() {
+ KeyValue kv = this.storeHeap.peek();
+ return kv == null ? null : kv.getRow();
+ }
+
+ private boolean filterRowKey(byte[] row) {
+ return (this.filter != null && this.filter.filterRowKey(row, 0, row.length)) ||
+ (oldFilter != null && this.oldFilter.filterRowKey(row, 0, row.length));
}
public void close() {
@@ -2617,4 +2629,31 @@
if (bc != null) bc.shutdown();
}
}
-}
+
+ /**
+ * A mocked list implementaion - discards all updates.
+ */
+ private static final List<KeyValue> MOCKED_LIST = new AbstractList<KeyValue>() {
+
+ @Override
+ public void add(int index, KeyValue element) {
+ // do nothing
+ }
+
+ @Override
+ public boolean addAll(int index, Collection<? extends KeyValue> c) {
+ return false; // this list is never changed as a result of an update
+ }
+
+ @Override
+ public KeyValue get(int index) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int size() {
+ return 0;
+ }
+ };
+
+ }
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java?rev=899871&r1=899870&r2=899871&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/metrics/RegionServerMetrics.java Sat Jan 16 01:52:29 2010
@@ -22,7 +22,9 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.metrics.MetricsRate;
+import org.apache.hadoop.hbase.regionserver.HLog;
import org.apache.hadoop.hbase.util.Strings;
import org.apache.hadoop.metrics.MetricsContext;
import org.apache.hadoop.metrics.MetricsRecord;
@@ -155,18 +157,30 @@
this.memstoreSizeMB.pushMetric(this.metricsRecord);
this.regions.pushMetric(this.metricsRecord);
this.requests.pushMetric(this.metricsRecord);
-
+ this.compactionQueueSize.pushMetric(this.metricsRecord);
this.blockCacheSize.pushMetric(this.metricsRecord);
this.blockCacheFree.pushMetric(this.metricsRecord);
this.blockCacheCount.pushMetric(this.metricsRecord);
this.blockCacheHitRatio.pushMetric(this.metricsRecord);
+ // mix in HFile metrics
+ this.fsReadLatency.inc((int)HFile.getReadOps(), HFile.getReadTime());
+ this.fsWriteLatency.inc((int)HFile.getWriteOps(), HFile.getWriteTime());
+ // mix in HLog metrics
+ this.fsWriteLatency.inc((int)HLog.getWriteOps(), HLog.getWriteTime());
+ this.fsSyncLatency.inc((int)HLog.getSyncOps(), HLog.getSyncTime());
+ // push the result
+ this.fsReadLatency.pushMetric(this.metricsRecord);
+ this.fsWriteLatency.pushMetric(this.metricsRecord);
+ this.fsSyncLatency.pushMetric(this.metricsRecord);
}
this.metricsRecord.update();
this.lastUpdate = System.currentTimeMillis();
}
-
+
public void resetAllMinMax() {
- // Nothing to do
+ this.atomicIncrementTime.resetMinMax();
+ this.fsReadLatency.resetMinMax();
+ this.fsWriteLatency.resetMinMax();
}
/**
@@ -202,6 +216,8 @@
Integer.valueOf(this.storefileIndexSizeMB.get()));
sb = Strings.appendKeyValue(sb, "memstoreSize",
Integer.valueOf(this.memstoreSizeMB.get()));
+ sb = Strings.appendKeyValue(sb, "compactionQueueSize",
+ Integer.valueOf(this.compactionQueueSize.get()));
// Duplicate from jvmmetrics because metrics are private there so
// inaccessible.
MemoryUsage memory =
@@ -218,6 +234,13 @@
Long.valueOf(this.blockCacheCount.get()));
sb = Strings.appendKeyValue(sb, "blockCacheHitRatio",
Long.valueOf(this.blockCacheHitRatio.get()));
+ sb = Strings.appendKeyValue(sb, "fsReadLatency",
+ Long.valueOf(this.fsReadLatency.getPreviousIntervalAverageTime()));
+ sb = Strings.appendKeyValue(sb, "fsWriteLatency",
+ Long.valueOf(this.fsWriteLatency.getPreviousIntervalAverageTime()));
+ sb = Strings.appendKeyValue(sb, "fsSyncLatency",
+ Long.valueOf(this.fsSyncLatency.getPreviousIntervalAverageTime()));
+
return sb.toString();
}
-}
+}
\ No newline at end of file
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWrapper.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWrapper.java?rev=899871&r1=899870&r2=899871&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWrapper.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWrapper.java Sat Jan 16 01:52:29 2010
@@ -379,7 +379,7 @@
try {
return readAddressOrThrow(znode, watcher);
} catch (IOException e) {
- LOG.debug("readAddress " +znode, e);
+ LOG.debug("Failed to read: " + e.getMessage());
return null;
}
}
Modified: hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestScanner.java?rev=899871&r1=899870&r2=899871&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestScanner.java (original)
+++ hadoop/hbase/branches/0.20_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/regionserver/TestScanner.java Sat Jan 16 01:52:29 2010
@@ -19,12 +19,6 @@
*/
package org.apache.hadoop.hbase.regionserver;
-import java.io.ByteArrayOutputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.dfs.MiniDFSCluster;
@@ -39,18 +33,26 @@
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.filter.CompareFilter;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.InclusiveStopFilter;
import org.apache.hadoop.hbase.filter.InclusiveStopRowFilter;
import org.apache.hadoop.hbase.filter.PrefixFilter;
import org.apache.hadoop.hbase.filter.PrefixRowFilter;
import org.apache.hadoop.hbase.filter.RowFilterInterface;
+import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
import org.apache.hadoop.hbase.filter.WhileMatchFilter;
import org.apache.hadoop.hbase.filter.WhileMatchRowFilter;
import org.apache.hadoop.hbase.io.hfile.Compression;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Writables;
+import java.io.ByteArrayOutputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
/**
* Test of a long-lived scanner validating as we go.
*/
@@ -118,6 +120,7 @@
}
s.close();
assertEquals(0, count);
+ assertEquals(1, results.size());
// Now do something a bit more imvolved.
scan = new Scan(startrow, stoprow);
scan.addFamily(HConstants.CATALOG_FAMILY);
@@ -525,4 +528,43 @@
LOG.info("Found " + count + " items");
return count;
}
+
+
+ /**
+ * When there's more than one column it changes the configuration of the
+ * KeyValueHeap and triggers a different execution path in the RegionScanner.
+ */
+ public void testScanWithTwoColumns() throws IOException {
+ this.r = createNewHRegion(REGION_INFO.getTableDesc(), null, null);
+ final byte[] row1 = Bytes.toBytes("row1");
+ final byte[] row2 = Bytes.toBytes("row2");
+ final byte[] qual1 = Bytes.toBytes("a");
+ final byte[] qual2 = Bytes.toBytes("b");
+ final byte[] val1 = Bytes.toBytes(1);
+ final byte[] val2 = Bytes.toBytes(-1);
+ /**
+ * prime the region.
+ */
+ Put put1 = new Put(row1);
+ put1.add(HConstants.CATALOG_FAMILY,qual1, val1);
+ put1.add(HConstants.CATALOG_FAMILY,qual2, val1);
+ r.put(put1);
+ Put put2 = new Put(row2);
+ put2.add(HConstants.CATALOG_FAMILY, qual1, val2);
+ put2.add(HConstants.CATALOG_FAMILY, qual2, val2);
+ r.put(put2);
+ /**
+ * Scan for the second row.
+ */
+ Scan scan = new Scan();
+ scan.setFilter(new SingleColumnValueFilter(HConstants.CATALOG_FAMILY,
+ qual2, CompareFilter.CompareOp.EQUAL, val2));
+
+ InternalScanner scanner1 = r.getScanner(scan);
+ List<KeyValue> res = new ArrayList<KeyValue>();
+ assertFalse(scanner1.next(res));
+ assertEquals(2, res.size());
+ scanner1.close();
+ }
+
}