You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2012/08/17 18:46:09 UTC
svn commit: r1374354 [1/5] - in /hbase/trunk/hbase-server/src:
main/java/org/apache/hadoop/hbase/coprocessor/
main/java/org/apache/hadoop/hbase/mapreduce/
main/java/org/apache/hadoop/hbase/master/
main/java/org/apache/hadoop/hbase/regionserver/ main/ja...
Author: stack
Date: Fri Aug 17 16:46:07 2012
New Revision: 1374354
URL: http://svn.apache.org/viewvc?rev=1374354&view=rev
Log:
HBASE-6599 Rename Store (the implementations) as HStore, and HStore (the Interface) as Store
Modified:
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Compactor.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/HFileReadWriteTest.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSelection.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java Fri Aug 17 16:46:07 2012
@@ -40,7 +40,7 @@ import org.apache.hadoop.hbase.regionser
import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
import org.apache.hadoop.hbase.regionserver.RegionScanner;
import org.apache.hadoop.hbase.regionserver.ScanType;
-import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.regionserver.HStore;
import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
@@ -78,7 +78,7 @@ public abstract class BaseRegionObserver
@Override
public InternalScanner preFlushScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
- final Store store, final KeyValueScanner memstoreScanner, final InternalScanner s)
+ final HStore store, final KeyValueScanner memstoreScanner, final InternalScanner s)
throws IOException {
return null;
}
@@ -92,13 +92,13 @@ public abstract class BaseRegionObserver
}
@Override
- public InternalScanner preFlush(ObserverContext<RegionCoprocessorEnvironment> e, Store store,
+ public InternalScanner preFlush(ObserverContext<RegionCoprocessorEnvironment> e, HStore store,
InternalScanner scanner) throws IOException {
return scanner;
}
@Override
- public void postFlush(ObserverContext<RegionCoprocessorEnvironment> e, Store store,
+ public void postFlush(ObserverContext<RegionCoprocessorEnvironment> e, HStore store,
StoreFile resultFile) throws IOException {
}
@@ -113,27 +113,27 @@ public abstract class BaseRegionObserver
@Override
public void preCompactSelection(final ObserverContext<RegionCoprocessorEnvironment> c,
- final Store store, final List<StoreFile> candidates) throws IOException { }
+ final HStore store, final List<StoreFile> candidates) throws IOException { }
@Override
public void postCompactSelection(final ObserverContext<RegionCoprocessorEnvironment> c,
- final Store store, final ImmutableList<StoreFile> selected) { }
+ final HStore store, final ImmutableList<StoreFile> selected) { }
@Override
public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e,
- final Store store, final InternalScanner scanner) throws IOException {
+ final HStore store, final InternalScanner scanner) throws IOException {
return scanner;
}
@Override
public InternalScanner preCompactScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
- final Store store, List<? extends KeyValueScanner> scanners, final ScanType scanType,
+ final HStore store, List<? extends KeyValueScanner> scanners, final ScanType scanType,
final long earliestPutTs, final InternalScanner s) throws IOException {
return null;
}
@Override
- public void postCompact(ObserverContext<RegionCoprocessorEnvironment> e, final Store store,
+ public void postCompact(ObserverContext<RegionCoprocessorEnvironment> e, final HStore store,
final StoreFile resultFile) throws IOException {
}
@@ -270,7 +270,7 @@ public abstract class BaseRegionObserver
@Override
public KeyValueScanner preStoreScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
- final Store store, final Scan scan, final NavigableSet<byte[]> targetCols,
+ final HStore store, final Scan scan, final NavigableSet<byte[]> targetCols,
final KeyValueScanner s) throws IOException {
return null;
}
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java Fri Aug 17 16:46:07 2012
@@ -39,7 +39,7 @@ import org.apache.hadoop.hbase.regionser
import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
import org.apache.hadoop.hbase.regionserver.RegionScanner;
import org.apache.hadoop.hbase.regionserver.ScanType;
-import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.regionserver.HStore;
import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.regionserver.StoreFileScanner;
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
@@ -84,14 +84,14 @@ public interface RegionObserver extends
* @throws IOException if an error occurred on the coprocessor
*/
InternalScanner preFlushScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
- final Store store, final KeyValueScanner memstoreScanner, final InternalScanner s)
+ final HStore store, final KeyValueScanner memstoreScanner, final InternalScanner s)
throws IOException;
/**
* Called before the memstore is flushed to disk.
* @param c the environment provided by the region server
* @throws IOException if an error occurred on the coprocessor
- * @deprecated use {@link #preFlush(ObserverContext, Store, InternalScanner)} instead
+ * @deprecated use {@link #preFlush(ObserverContext, HStore, InternalScanner)} instead
*/
void preFlush(final ObserverContext<RegionCoprocessorEnvironment> c) throws IOException;
@@ -104,14 +104,14 @@ public interface RegionObserver extends
* unless the implementation is writing new store files on its own.
* @throws IOException if an error occurred on the coprocessor
*/
- InternalScanner preFlush(final ObserverContext<RegionCoprocessorEnvironment> c, final Store store,
+ InternalScanner preFlush(final ObserverContext<RegionCoprocessorEnvironment> c, final HStore store,
final InternalScanner scanner) throws IOException;
/**
* Called after the memstore is flushed to disk.
* @param c the environment provided by the region server
* @throws IOException if an error occurred on the coprocessor
- * @deprecated use {@link #preFlush(ObserverContext, Store, InternalScanner)} instead.
+ * @deprecated use {@link #preFlush(ObserverContext, HStore, InternalScanner)} instead.
*/
void postFlush(final ObserverContext<RegionCoprocessorEnvironment> c) throws IOException;
@@ -122,7 +122,7 @@ public interface RegionObserver extends
* @param resultFile the new store file written out during compaction
* @throws IOException if an error occurred on the coprocessor
*/
- void postFlush(final ObserverContext<RegionCoprocessorEnvironment> c, final Store store,
+ void postFlush(final ObserverContext<RegionCoprocessorEnvironment> c, final HStore store,
final StoreFile resultFile) throws IOException;
/**
@@ -135,7 +135,7 @@ public interface RegionObserver extends
* @throws IOException if an error occurred on the coprocessor
*/
void preCompactSelection(final ObserverContext<RegionCoprocessorEnvironment> c,
- final Store store, final List<StoreFile> candidates) throws IOException;
+ final HStore store, final List<StoreFile> candidates) throws IOException;
/**
* Called after the {@link StoreFile}s to compact have been selected from the
@@ -145,7 +145,7 @@ public interface RegionObserver extends
* @param selected the store files selected to compact
*/
void postCompactSelection(final ObserverContext<RegionCoprocessorEnvironment> c,
- final Store store, final ImmutableList<StoreFile> selected);
+ final HStore store, final ImmutableList<StoreFile> selected);
/**
* Called prior to writing the {@link StoreFile}s selected for compaction into
@@ -172,7 +172,7 @@ public interface RegionObserver extends
* @throws IOException if an error occurred on the coprocessor
*/
InternalScanner preCompact(final ObserverContext<RegionCoprocessorEnvironment> c,
- final Store store, final InternalScanner scanner) throws IOException;
+ final HStore store, final InternalScanner scanner) throws IOException;
/**
* Called prior to writing the {@link StoreFile}s selected for compaction into
@@ -194,7 +194,7 @@ public interface RegionObserver extends
* @throws IOException if an error occurred on the coprocessor
*/
InternalScanner preCompactScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
- final Store store, List<? extends KeyValueScanner> scanners, final ScanType scanType,
+ final HStore store, List<? extends KeyValueScanner> scanners, final ScanType scanType,
final long earliestPutTs, final InternalScanner s) throws IOException;
/**
@@ -205,7 +205,7 @@ public interface RegionObserver extends
* @param resultFile the new store file written out during compaction
* @throws IOException if an error occurred on the coprocessor
*/
- void postCompact(final ObserverContext<RegionCoprocessorEnvironment> c, final Store store,
+ void postCompact(final ObserverContext<RegionCoprocessorEnvironment> c, final HStore store,
StoreFile resultFile) throws IOException;
/**
@@ -623,8 +623,8 @@ public interface RegionObserver extends
* Called before a store opens a new scanner.
* This hook is called when a "user" scanner is opened.
* <p>
- * See {@link #preFlushScannerOpen(ObserverContext, Store, KeyValueScanner, InternalScanner)}
- * and {@link #preCompactScannerOpen(ObserverContext, Store, List, ScanType, long, InternalScanner)}
+ * See {@link #preFlushScannerOpen(ObserverContext, HStore, KeyValueScanner, InternalScanner)}
+ * and {@link #preCompactScannerOpen(ObserverContext, HStore, List, ScanType, long, InternalScanner)}
* to override scanners created for flushes or compactions, resp.
* <p>
* Call CoprocessorEnvironment#complete to skip any subsequent chained
@@ -640,7 +640,7 @@ public interface RegionObserver extends
* @throws IOException if an error occurred on the coprocessor
*/
KeyValueScanner preStoreScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
- final Store store, final Scan scan, final NavigableSet<byte[]> targetCols,
+ final HStore store, final Scan scan, final NavigableSet<byte[]> targetCols,
final KeyValueScanner s) throws IOException;
/**
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java Fri Aug 17 16:46:07 2012
@@ -55,7 +55,7 @@ import org.apache.hadoop.hbase.io.hfile.
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
-import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.regionserver.HStore;
import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.regionserver.TimeRangeTracker;
import org.apache.hadoop.hbase.util.Bytes;
@@ -205,8 +205,8 @@ public class HFileOutputFormat extends F
.withCompression(compression)
.withComparator(KeyValue.KEY_COMPARATOR)
.withDataBlockEncoder(encoder)
- .withChecksumType(Store.getChecksumType(conf))
- .withBytesPerChecksum(Store.getBytesPerChecksum(conf))
+ .withChecksumType(HStore.getChecksumType(conf))
+ .withBytesPerChecksum(HStore.getBytesPerChecksum(conf))
.create();
this.writers.put(family, wl);
return wl;
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java Fri Aug 17 16:46:07 2012
@@ -72,7 +72,7 @@ import org.apache.hadoop.hbase.io.hfile.
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.regionserver.HStore;
import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType;
import org.apache.hadoop.hbase.util.Bytes;
@@ -551,8 +551,8 @@ public class LoadIncrementalHFiles exten
.withCompression(compression)
.withDataBlockEncoder(dataBlockEncoder)
.withBloomType(bloomFilterType)
- .withChecksumType(Store.getChecksumType(conf))
- .withBytesPerChecksum(Store.getBytesPerChecksum(conf))
+ .withChecksumType(HStore.getChecksumType(conf))
+ .withBytesPerChecksum(HStore.getBytesPerChecksum(conf))
.build();
HFileScanner scanner = halfReader.getScanner(false, false, false);
scanner.seekTo();
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java Fri Aug 17 16:46:07 2012
@@ -45,7 +45,7 @@ import org.apache.hadoop.hbase.backup.HF
import org.apache.hadoop.hbase.catalog.MetaEditor;
import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.regionserver.HStore;
import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
@@ -336,7 +336,7 @@ class CatalogJanitor extends Chore {
HTableDescriptor parentDescriptor = getTableDescriptor(parent.getTableName());
for (HColumnDescriptor family: parentDescriptor.getFamilies()) {
- Path p = Store.getStoreHomedir(tabledir, split.getEncodedName(),
+ Path p = HStore.getStoreHomedir(tabledir, split.getEncodedName(),
family.getName());
if (!fs.exists(p)) continue;
// Look for reference files. Call listStatus with anonymous instance of PathFilter.
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java Fri Aug 17 16:46:07 2012
@@ -158,7 +158,7 @@ public class CompactSplitThread implemen
public synchronized boolean requestSplit(final HRegion r) {
// don't split regions that are blocking
- if (shouldSplitRegion() && r.getCompactPriority() >= HStore.PRIORITY_USER) {
+ if (shouldSplitRegion() && r.getCompactPriority() >= Store.PRIORITY_USER) {
byte[] midKey = r.checkSplit();
if (midKey != null) {
requestSplit(r, midKey);
@@ -186,19 +186,19 @@ public class CompactSplitThread implemen
public synchronized void requestCompaction(final HRegion r,
final String why) throws IOException {
- for (HStore s : r.getStores().values()) {
- requestCompaction(r, s, why, HStore.NO_PRIORITY);
+ for (Store s : r.getStores().values()) {
+ requestCompaction(r, s, why, Store.NO_PRIORITY);
}
}
- public synchronized void requestCompaction(final HRegion r, final HStore s,
+ public synchronized void requestCompaction(final HRegion r, final Store s,
final String why) throws IOException {
- requestCompaction(r, s, why, HStore.NO_PRIORITY);
+ requestCompaction(r, s, why, Store.NO_PRIORITY);
}
public synchronized void requestCompaction(final HRegion r, final String why,
int p) throws IOException {
- for (HStore s : r.getStores().values()) {
+ for (Store s : r.getStores().values()) {
requestCompaction(r, s, why, p);
}
}
@@ -209,7 +209,7 @@ public class CompactSplitThread implemen
* @param why Why compaction requested -- used in debug messages
* @param priority override the default priority (NO_PRIORITY == decide)
*/
- public synchronized void requestCompaction(final HRegion r, final HStore s,
+ public synchronized void requestCompaction(final HRegion r, final Store s,
final String why, int priority) throws IOException {
if (this.server.isStopped()) {
return;
@@ -217,7 +217,7 @@ public class CompactSplitThread implemen
CompactionRequest cr = s.requestCompaction(priority);
if (cr != null) {
cr.setServer(server);
- if (priority != HStore.NO_PRIORITY) {
+ if (priority != Store.NO_PRIORITY) {
cr.setPriority(priority);
}
ThreadPoolExecutor pool = s.throttleCompaction(cr.getSize())
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java Fri Aug 17 16:46:07 2012
@@ -37,7 +37,7 @@ public interface CompactionRequestor {
* @param why Why compaction was requested -- used in debug messages
* @throws IOException
*/
- public void requestCompaction(final HRegion r, final HStore s, final String why)
+ public void requestCompaction(final HRegion r, final Store s, final String why)
throws IOException;
/**
@@ -55,7 +55,7 @@ public interface CompactionRequestor {
* @param pri Priority of this compaction. minHeap. <=0 is critical
* @throws IOException
*/
- public void requestCompaction(final HRegion r, final HStore s,
+ public void requestCompaction(final HRegion r, final Store s,
final String why, int pri) throws IOException;
}
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Compactor.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Compactor.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Compactor.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Compactor.java Fri Aug 17 16:46:07 2012
@@ -60,7 +60,7 @@ class Compactor extends Configured {
* nothing made it through the compaction.
* @throws IOException
*/
- StoreFile.Writer compact(final Store store,
+ StoreFile.Writer compact(final HStore store,
final Collection<StoreFile> filesToCompact,
final boolean majorCompaction, final long maxId)
throws IOException {
@@ -176,9 +176,9 @@ class Compactor extends Configured {
++progress.currentCompactedKVs;
// check periodically to see if a system stop is requested
- if (Store.closeCheckInterval > 0) {
+ if (HStore.closeCheckInterval > 0) {
bytesWritten += kv.getLength();
- if (bytesWritten > Store.closeCheckInterval) {
+ if (bytesWritten > HStore.closeCheckInterval) {
bytesWritten = 0;
isInterrupted(store, writer);
}
@@ -201,7 +201,7 @@ class Compactor extends Configured {
return writer;
}
- void isInterrupted(final Store store, final StoreFile.Writer writer)
+ void isInterrupted(final HStore store, final StoreFile.Writer writer)
throws IOException {
if (store.getHRegion().areWritesEnabled()) return;
// Else cleanup.
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java Fri Aug 17 16:46:07 2012
@@ -51,7 +51,7 @@ public class ConstantSizeRegionSplitPoli
boolean force = region.shouldForceSplit();
boolean foundABigStore = false;
- for (HStore store : region.getStores().values()) {
+ for (Store store : region.getStores().values()) {
// If any of the stores are unable to split (eg they contain reference files)
// then don't split
if ((!store.canSplit())) {
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/GetClosestRowBeforeTracker.java Fri Aug 17 16:46:07 2012
@@ -84,7 +84,7 @@ class GetClosestRowBeforeTracker {
* @return True if this <code>kv</code> is expired.
*/
boolean isExpired(final KeyValue kv) {
- return Store.isExpired(kv, this.oldestts);
+ return HStore.isExpired(kv, this.oldestts);
}
/*
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java Fri Aug 17 16:46:07 2012
@@ -203,7 +203,7 @@ public class HRegion implements HeapSize
private final AtomicInteger lockIdGenerator = new AtomicInteger(1);
static private Random rand = new Random();
- protected final Map<byte[], HStore> stores = new ConcurrentSkipListMap<byte[], HStore>(
+ protected final Map<byte[], Store> stores = new ConcurrentSkipListMap<byte[], Store>(
Bytes.BYTES_RAWCOMPARATOR);
// Registered region protocol handlers
@@ -545,22 +545,22 @@ public class HRegion implements HeapSize
ThreadPoolExecutor storeOpenerThreadPool =
getStoreOpenAndCloseThreadPool(
"StoreOpenerThread-" + this.regionInfo.getRegionNameAsString());
- CompletionService<Store> completionService =
- new ExecutorCompletionService<Store>(storeOpenerThreadPool);
+ CompletionService<HStore> completionService =
+ new ExecutorCompletionService<HStore>(storeOpenerThreadPool);
// initialize each store in parallel
for (final HColumnDescriptor family : htableDescriptor.getFamilies()) {
status.setStatus("Instantiating store for column family " + family);
- completionService.submit(new Callable<Store>() {
- public Store call() throws IOException {
+ completionService.submit(new Callable<HStore>() {
+ public HStore call() throws IOException {
return instantiateHStore(tableDir, family);
}
});
}
try {
for (int i = 0; i < htableDescriptor.getFamilies().size(); i++) {
- Future<Store> future = completionService.take();
- Store store = future.get();
+ Future<HStore> future = completionService.take();
+ HStore store = future.get();
this.stores.put(store.getColumnFamilyName().getBytes(), store);
long storeSeqId = store.getMaxSequenceId();
@@ -642,7 +642,7 @@ public class HRegion implements HeapSize
* @return True if this region has references.
*/
public boolean hasReferences() {
- for (HStore store : this.stores.values()) {
+ for (Store store : this.stores.values()) {
for (StoreFile sf : store.getStorefiles()) {
// Found a reference, return.
if (sf.isReference()) return true;
@@ -660,7 +660,7 @@ public class HRegion implements HeapSize
HDFSBlocksDistribution hdfsBlocksDistribution =
new HDFSBlocksDistribution();
synchronized (this.stores) {
- for (HStore store : this.stores.values()) {
+ for (Store store : this.stores.values()) {
for (StoreFile sf : store.getStorefiles()) {
HDFSBlocksDistribution storeFileBlocksDistribution =
sf.getHDFSBlockDistribution();
@@ -689,7 +689,7 @@ public class HRegion implements HeapSize
FileSystem fs = tablePath.getFileSystem(conf);
for (HColumnDescriptor family: tableDescriptor.getFamilies()) {
- Path storeHomeDir = Store.getStoreHomedir(tablePath, regionEncodedName,
+ Path storeHomeDir = HStore.getStoreHomedir(tablePath, regionEncodedName,
family.getName());
if (!fs.exists(storeHomeDir))continue;
@@ -977,7 +977,7 @@ public class HRegion implements HeapSize
storeCloserThreadPool);
// close each store in parallel
- for (final HStore store : stores.values()) {
+ for (final Store store : stores.values()) {
completionService
.submit(new Callable<ImmutableList<StoreFile>>() {
public ImmutableList<StoreFile> call() throws IOException {
@@ -1173,7 +1173,7 @@ public class HRegion implements HeapSize
/** @return returns size of largest HStore. */
public long getLargestHStoreSize() {
long size = 0;
- for (HStore h : stores.values()) {
+ for (Store h : stores.values()) {
long storeSize = h.getSize();
if (storeSize > size) {
size = storeSize;
@@ -1205,7 +1205,7 @@ public class HRegion implements HeapSize
}
void triggerMajorCompaction() {
- for (HStore h : stores.values()) {
+ for (Store h : stores.values()) {
h.triggerMajorCompaction();
}
}
@@ -1232,7 +1232,7 @@ public class HRegion implements HeapSize
* @throws IOException e
*/
public void compactStores() throws IOException {
- for (HStore s : getStores().values()) {
+ for (Store s : getStores().values()) {
CompactionRequest cr = s.requestCompaction();
if(cr != null) {
try {
@@ -1500,7 +1500,7 @@ public class HRegion implements HeapSize
wal.startCacheFlush(this.regionInfo.getEncodedNameAsBytes());
completeSequenceId = this.getCompleteCacheFlushSequenceId(sequenceId);
- for (HStore s : stores.values()) {
+ for (Store s : stores.values()) {
storeFlushers.add(s.getStoreFlusher(completeSequenceId));
}
@@ -1658,7 +1658,7 @@ public class HRegion implements HeapSize
startRegionOperation();
this.readRequestsCount.increment();
try {
- HStore store = getStore(family);
+ Store store = getStore(family);
// get the closest key. (HStore.getRowKeyAtOrBefore can return null)
KeyValue key = store.getRowKeyAtOrBefore(row);
Result result = null;
@@ -2662,7 +2662,7 @@ public class HRegion implements HeapSize
byte[] family = e.getKey();
List<KeyValue> edits = e.getValue();
- HStore store = getStore(family);
+ Store store = getStore(family);
for (KeyValue kv: edits) {
kv.setMemstoreTS(localizedWriteEntry.getWriteNumber());
size += store.add(kv);
@@ -2702,7 +2702,7 @@ public class HRegion implements HeapSize
// Remove those keys from the memstore that matches our
// key's (row, cf, cq, timestamp, memstoreTS). The interesting part is
// that even the memstoreTS has to match for keys that will be rolleded-back.
- HStore store = getStore(family);
+ Store store = getStore(family);
for (KeyValue kv: edits) {
store.rollback(kv);
kvsRolledback++;
@@ -2918,7 +2918,7 @@ public class HRegion implements HeapSize
long editsCount = 0;
long intervalEdits = 0;
HLog.Entry entry;
- HStore store = null;
+ Store store = null;
boolean reported_once = false;
try {
@@ -3056,7 +3056,7 @@ public class HRegion implements HeapSize
* @param kv KeyValue to add.
* @return True if we should flush.
*/
- protected boolean restoreEdit(final HStore s, final KeyValue kv) {
+ protected boolean restoreEdit(final Store s, final KeyValue kv) {
long kvSize = s.add(kv);
if (this.rsAccounting != null) {
rsAccounting.addAndGetRegionReplayEditsSize(this.regionInfo.getRegionName(), kvSize);
@@ -3079,9 +3079,9 @@ public class HRegion implements HeapSize
return true;
}
- protected Store instantiateHStore(Path tableDir, HColumnDescriptor c)
+ protected HStore instantiateHStore(Path tableDir, HColumnDescriptor c)
throws IOException {
- return new Store(tableDir, this, c, this.fs, this.conf);
+ return new HStore(tableDir, this, c, this.fs, this.conf);
}
/**
@@ -3091,11 +3091,11 @@ public class HRegion implements HeapSize
* @return Store that goes with the family on passed <code>column</code>.
* TODO: Make this lookup faster.
*/
- public HStore getStore(final byte[] column) {
+ public Store getStore(final byte[] column) {
return this.stores.get(column);
}
- public Map<byte[], HStore> getStores() {
+ public Map<byte[], Store> getStores() {
return this.stores;
}
@@ -3111,7 +3111,7 @@ public class HRegion implements HeapSize
List<String> storeFileNames = new ArrayList<String>();
synchronized(closeLock) {
for(byte[] column : columns) {
- HStore store = this.stores.get(column);
+ Store store = this.stores.get(column);
if (store == null) {
throw new IllegalArgumentException("No column family : " +
new String(column) + " available");
@@ -3331,7 +3331,7 @@ public class HRegion implements HeapSize
byte[] familyName = p.getFirst();
String path = p.getSecond();
- HStore store = getStore(familyName);
+ Store store = getStore(familyName);
if (store == null) {
IOException ioe = new DoNotRetryIOException(
"No such column family " + Bytes.toStringBinary(familyName));
@@ -3373,7 +3373,7 @@ public class HRegion implements HeapSize
for (Pair<byte[], String> p : familyPaths) {
byte[] familyName = p.getFirst();
String path = p.getSecond();
- HStore store = getStore(familyName);
+ Store store = getStore(familyName);
try {
store.bulkLoadHFile(path);
} catch (IOException ioe) {
@@ -3474,7 +3474,7 @@ public class HRegion implements HeapSize
for (Map.Entry<byte[], NavigableSet<byte[]>> entry :
scan.getFamilyMap().entrySet()) {
- HStore store = stores.get(entry.getKey());
+ Store store = stores.get(entry.getKey());
KeyValueScanner scanner = store.getScanner(scan, entry.getValue());
scanners.add(scanner);
}
@@ -4054,7 +4054,7 @@ public class HRegion implements HeapSize
public static void makeColumnFamilyDirs(FileSystem fs, Path tabledir,
final HRegionInfo hri, byte [] colFamily)
throws IOException {
- Path dir = Store.getStoreHomedir(tabledir, hri.getEncodedName(), colFamily);
+ Path dir = HStore.getStoreHomedir(tabledir, hri.getEncodedName(), colFamily);
if (!fs.mkdirs(dir)) {
LOG.warn("Failed to create " + dir);
}
@@ -4195,7 +4195,7 @@ public class HRegion implements HeapSize
}
for (StoreFile hsf: srcFiles) {
StoreFile.rename(fs, hsf.getPath(),
- StoreFile.getUniqueFile(fs, Store.getStoreHomedir(tableDir,
+ StoreFile.getUniqueFile(fs, HStore.getStoreHomedir(tableDir,
newRegionInfo.getEncodedName(), colFamily)));
}
}
@@ -4252,7 +4252,7 @@ public class HRegion implements HeapSize
* @throws IOException
*/
boolean isMajorCompaction() throws IOException {
- for (HStore store : this.stores.values()) {
+ for (Store store : this.stores.values()) {
if (store.isMajorCompaction()) {
return true;
}
@@ -4638,7 +4638,7 @@ public class HRegion implements HeapSize
boolean flush = false;
WALEdit walEdits = null;
List<KeyValue> allKVs = new ArrayList<KeyValue>(append.size());
- Map<HStore, List<KeyValue>> tempMemstore = new HashMap<HStore, List<KeyValue>>();
+ Map<Store, List<KeyValue>> tempMemstore = new HashMap<Store, List<KeyValue>>();
long before = EnvironmentEdgeManager.currentTimeMillis();
long size = 0;
long txid = 0;
@@ -4655,7 +4655,7 @@ public class HRegion implements HeapSize
for (Map.Entry<byte[], List<KeyValue>> family : append.getFamilyMap()
.entrySet()) {
- HStore store = stores.get(family.getKey());
+ Store store = stores.get(family.getKey());
List<KeyValue> kvs = new ArrayList<KeyValue>(family.getValue().size());
// Get previous values for all columns in this family
@@ -4738,8 +4738,8 @@ public class HRegion implements HeapSize
}
//Actually write to Memstore now
- for (Map.Entry<HStore, List<KeyValue>> entry : tempMemstore.entrySet()) {
- HStore store = entry.getKey();
+ for (Map.Entry<Store, List<KeyValue>> entry : tempMemstore.entrySet()) {
+ Store store = entry.getKey();
size += store.upsert(entry.getValue());
allKVs.addAll(entry.getValue());
}
@@ -4791,7 +4791,7 @@ public class HRegion implements HeapSize
boolean flush = false;
WALEdit walEdits = null;
List<KeyValue> allKVs = new ArrayList<KeyValue>(increment.numColumns());
- Map<HStore, List<KeyValue>> tempMemstore = new HashMap<HStore, List<KeyValue>>();
+ Map<Store, List<KeyValue>> tempMemstore = new HashMap<Store, List<KeyValue>>();
long before = EnvironmentEdgeManager.currentTimeMillis();
long size = 0;
long txid = 0;
@@ -4808,7 +4808,7 @@ public class HRegion implements HeapSize
for (Map.Entry<byte [], NavigableMap<byte [], Long>> family :
increment.getFamilyMap().entrySet()) {
- HStore store = stores.get(family.getKey());
+ Store store = stores.get(family.getKey());
List<KeyValue> kvs = new ArrayList<KeyValue>(family.getValue().size());
// Get previous values for all columns in this family
@@ -4860,8 +4860,8 @@ public class HRegion implements HeapSize
}
//Actually write to Memstore now
- for (Map.Entry<HStore, List<KeyValue>> entry : tempMemstore.entrySet()) {
- HStore store = entry.getKey();
+ for (Map.Entry<Store, List<KeyValue>> entry : tempMemstore.entrySet()) {
+ Store store = entry.getKey();
size += store.upsert(entry.getValue());
allKVs.addAll(entry.getValue());
}
@@ -4918,7 +4918,7 @@ public class HRegion implements HeapSize
Integer lid = obtainRowLock(row);
this.updatesLock.readLock().lock();
try {
- HStore store = stores.get(family);
+ Store store = stores.get(family);
// Get the old value:
Get get = new Get(row);
@@ -5029,7 +5029,7 @@ public class HRegion implements HeapSize
@Override
public long heapSize() {
long heapSize = DEEP_OVERHEAD;
- for (HStore store : this.stores.values()) {
+ for (Store store : this.stores.values()) {
heapSize += store.heapSize();
}
// this does not take into account row locks, recent flushes, mvcc entries
@@ -5274,7 +5274,7 @@ public class HRegion implements HeapSize
*/
public int getCompactPriority() {
int count = Integer.MAX_VALUE;
- for (HStore store : stores.values()) {
+ for (Store store : stores.values()) {
count = Math.min(count, store.getCompactPriority());
}
return count;
@@ -5286,7 +5286,7 @@ public class HRegion implements HeapSize
* @return true if any store has too many store files
*/
public boolean needsCompaction() {
- for (HStore store : stores.values()) {
+ for (Store store : stores.values()) {
if(store.needsCompaction()) {
return true;
}
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java Fri Aug 17 16:46:07 2012
@@ -1142,7 +1142,7 @@ public class HRegionServer implements C
long currentCompactedKVs = 0;
synchronized (r.stores) {
stores += r.stores.size();
- for (HStore store : r.stores.values()) {
+ for (Store store : r.stores.values()) {
storefiles += store.getStorefilesCount();
storeUncompressedSizeMB += (int) (store.getStoreSizeUncompressed()
/ 1024 / 1024);
@@ -1228,7 +1228,7 @@ public class HRegionServer implements C
for (HRegion r : this.instance.onlineRegions.values()) {
if (r == null)
continue;
- for (HStore s : r.getStores().values()) {
+ for (Store s : r.getStores().values()) {
try {
if (s.needsCompaction()) {
// Queue a compaction. Will recognize if major is needed.
@@ -1369,8 +1369,8 @@ public class HRegionServer implements C
writeRequestsCount += r.writeRequestsCount.get();
synchronized (r.stores) {
stores += r.stores.size();
- for (Map.Entry<byte[], HStore> ee : r.stores.entrySet()) {
- final HStore store = ee.getValue();
+ for (Map.Entry<byte[], Store> ee : r.stores.entrySet()) {
+ final Store store = ee.getValue();
final SchemaMetrics schemaMetrics = store.getSchemaMetrics();
{
@@ -1644,7 +1644,7 @@ public class HRegionServer implements C
LOG.info("Post open deploy tasks for region=" + r.getRegionNameAsString() +
", daughter=" + daughter);
// Do checks to see if we need to compact (references or too many files)
- for (HStore s : r.getStores().values()) {
+ for (Store s : r.getStores().values()) {
if (s.hasReferences() || s.needsCompaction()) {
getCompactionRequester().requestCompaction(r, s, "Opening Region");
}
@@ -2009,7 +2009,7 @@ public class HRegionServer implements C
int storefileIndexSizeMB = 0;
synchronized (r.stores) {
stores += r.stores.size();
- for (HStore store : r.stores.values()) {
+ for (Store store : r.stores.values()) {
storefiles += store.getStorefilesCount();
storefileSizeMB += (int) (store.getStorefilesSize() / 1024 / 1024);
storefileIndexSizeMB += (int) (store.getStorefilesIndexSize() / 1024 / 1024);
@@ -3590,7 +3590,7 @@ public class HRegionServer implements C
region.getRegionNameAsString());
compactSplitThread.requestCompaction(region,
"User-triggered " + (major ? "major " : "") + "compaction",
- HStore.PRIORITY_USER);
+ Store.PRIORITY_USER);
return CompactRegionResponse.newBuilder().build();
} catch (IOException ie) {
throw new ServiceException(ie);