You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by jd...@apache.org on 2010/05/07 22:53:58 UTC
svn commit: r942215 - in /hadoop/hbase/trunk: CHANGES.txt
core/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
core/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java
Author: jdcryans
Date: Fri May 7 20:53:58 2010
New Revision: 942215
URL: http://svn.apache.org/viewvc?rev=942215&view=rev
Log:
HBASE-2503 PriorityQueue isn't thread safe, KeyValueHeap uses it that way
Modified:
hadoop/hbase/trunk/CHANGES.txt
hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java
Modified: hadoop/hbase/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=942215&r1=942214&r2=942215&view=diff
==============================================================================
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Fri May 7 20:53:58 2010
@@ -303,6 +303,7 @@ Release 0.21.0 - Unreleased
HBASE-2482 regions in transition do not get reassigned by master when RS
crashes (Todd Lipcon via Stack)
HBASE-2513 hbase-2414 added bug where we'd tight-loop if no root available
+ HBASE-2503 PriorityQueue isn't thread safe, KeyValueHeap uses it that way
IMPROVEMENTS
HBASE-1760 Cleanup TODOs in HTable
Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=942215&r1=942214&r2=942215&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (original)
+++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java Fri May 7 20:53:58 2010
@@ -34,6 +34,7 @@ package org.apache.hadoop.hbase.regionse
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.NotServingRegionException;
+ import org.apache.hadoop.hbase.UnknownScannerException;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
@@ -1824,6 +1825,8 @@ public class HRegion implements HConstan
private Filter filter;
private List<KeyValue> results = new ArrayList<KeyValue>();
private int batch;
+ // Doesn't need to be volatile, always accessed under a sync'ed method
+ private boolean filterClosed = false;
RegionScanner(Scan scan, List<KeyValueScanner> additionalScanners) {
this.filter = scan.getFilter();
@@ -1858,7 +1861,13 @@ public class HRegion implements HConstan
}
}
- public boolean next(List<KeyValue> outResults, int limit) throws IOException {
+ public synchronized boolean next(List<KeyValue> outResults, int limit)
+ throws IOException {
+ if (this.filterClosed) {
+ throw new UnknownScannerException("Scanner was closed (timed out?) " +
+ "after we renewed it. Could be caused by a very slow scanner " +
+ "or a lengthy garbage collection");
+ }
if (closing.get() || closed.get()) {
close();
throw new NotServingRegionException(regionInfo.getRegionNameAsString() +
@@ -1877,7 +1886,8 @@ public class HRegion implements HConstan
return returnResult;
}
- public boolean next(List<KeyValue> outResults) throws IOException {
+ public synchronized boolean next(List<KeyValue> outResults)
+ throws IOException {
// apply the batching limit by default
return next(outResults, batch);
}
@@ -1885,7 +1895,7 @@ public class HRegion implements HConstan
/*
* @return True if a filter rules the scanner is over, done.
*/
- boolean isFilterDone() {
+ synchronized boolean isFilterDone() {
return this.filter != null && this.filter.filterAllRemaining();
}
@@ -1955,24 +1965,15 @@ public class HRegion implements HConstan
return true;
}
- public void close() {
+ public synchronized void close() {
storeHeap.close();
- }
-
- /**
- *
- * @param scanner to be closed
- */
- public void close(KeyValueScanner scanner) {
- try {
- scanner.close();
- } catch(NullPointerException npe) {}
+ this.filterClosed = true;
}
/**
* @return the current storeHeap
*/
- public KeyValueHeap getStoreHeap() {
+ public synchronized KeyValueHeap getStoreHeap() {
return this.storeHeap;
}
}
Modified: hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java?rev=942215&r1=942214&r2=942215&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java (original)
+++ hadoop/hbase/trunk/core/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java Fri May 7 20:53:58 2010
@@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.HRegionIn
import org.apache.hadoop.hbase.HServerAddress;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.UnknownScannerException;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
@@ -195,6 +196,34 @@ public class TestScanner extends HBaseTe
}
}
+ /**
+ * Test that closing a scanner while a client is using it doesn't throw
+ * NPEs but instead a UnknownScannerException. HBASE-2503
+ * @throws Exception
+ */
+ public void testRaceBetweenClientAndTimeout() throws Exception {
+ try {
+ this.r = createNewHRegion(REGION_INFO.getTableDesc(), null, null);
+ addContent(this.r, HConstants.CATALOG_FAMILY);
+ Scan scan = new Scan();
+ InternalScanner s = r.getScanner(scan);
+ List<KeyValue> results = new ArrayList<KeyValue>();
+ try {
+ s.next(results);
+ s.close();
+ s.next(results);
+ fail("We don't want anything more, we should be failing");
+ } catch (UnknownScannerException ex) {
+ // ok!
+ return;
+ }
+ } finally {
+ this.r.close();
+ this.r.getLog().closeAndDelete();
+ shutdownDfs(this.cluster);
+ }
+ }
+
/** The test!
* @throws IOException
*/