You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2012/08/17 18:46:09 UTC

svn commit: r1374354 [5/5] - in /hbase/trunk/hbase-server/src: main/java/org/apache/hadoop/hbase/coprocessor/ main/java/org/apache/hadoop/hbase/mapreduce/ main/java/org/apache/hadoop/hbase/master/ main/java/org/apache/hadoop/hbase/regionserver/ main/ja...

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java Fri Aug 17 16:46:07 2012
@@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.regionser
  */
 @InterfaceAudience.LimitedPrivate("Coprocessor")
 public class StoreFileScanner implements KeyValueScanner {
-  static final Log LOG = LogFactory.getLog(Store.class);
+  static final Log LOG = LogFactory.getLog(HStore.class);
 
   // the reader it comes from:
   private final StoreFile.Reader reader;

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java Fri Aug 17 16:46:07 2012
@@ -33,7 +33,7 @@ import org.apache.hadoop.hbase.HConstant
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.filter.Filter;
-import org.apache.hadoop.hbase.regionserver.Store.ScanInfo;
+import org.apache.hadoop.hbase.regionserver.HStore.ScanInfo;
 import org.apache.hadoop.hbase.regionserver.metrics.RegionMetricsStorage;
 import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -47,7 +47,7 @@ import org.apache.hadoop.hbase.util.Envi
 public class StoreScanner extends NonLazyKeyValueScanner
     implements KeyValueScanner, InternalScanner, ChangedReadersObserver {
   static final Log LOG = LogFactory.getLog(StoreScanner.class);
-  private Store store;
+  private HStore store;
   private ScanQueryMatcher matcher;
   private KeyValueHeap heap;
   private boolean cacheBlocks;
@@ -79,7 +79,7 @@ public class StoreScanner extends NonLaz
   private KeyValue lastTop = null;
 
   /** An internal constructor. */
-  private StoreScanner(Store store, boolean cacheBlocks, Scan scan,
+  private StoreScanner(HStore store, boolean cacheBlocks, Scan scan,
       final NavigableSet<byte[]> columns, long ttl, int minVersions) {
     this.store = store;
     this.cacheBlocks = cacheBlocks;
@@ -107,7 +107,7 @@ public class StoreScanner extends NonLaz
    * @param columns which columns we are scanning
    * @throws IOException
    */
-  public StoreScanner(Store store, ScanInfo scanInfo, Scan scan, final NavigableSet<byte[]> columns)
+  public StoreScanner(HStore store, ScanInfo scanInfo, Scan scan, final NavigableSet<byte[]> columns)
                               throws IOException {
     this(store, scan.getCacheBlocks(), scan, columns, scanInfo.getTtl(),
         scanInfo.getMinVersions());
@@ -159,7 +159,7 @@ public class StoreScanner extends NonLaz
    * @param smallestReadPoint the readPoint that we should use for tracking
    *          versions
    */
-  public StoreScanner(Store store, ScanInfo scanInfo, Scan scan,
+  public StoreScanner(HStore store, ScanInfo scanInfo, Scan scan,
       List<? extends KeyValueScanner> scanners, ScanType scanType,
       long smallestReadPoint, long earliestPutTs) throws IOException {
     this(store, false, scan, null, scanInfo.getTtl(),
@@ -181,7 +181,7 @@ public class StoreScanner extends NonLaz
   }
 
   /** Constructor for testing. */
-  StoreScanner(final Scan scan, Store.ScanInfo scanInfo,
+  StoreScanner(final Scan scan, HStore.ScanInfo scanInfo,
       ScanType scanType, final NavigableSet<byte[]> columns,
       final List<KeyValueScanner> scanners) throws IOException {
     this(scan, scanInfo, scanType, columns, scanners,
@@ -189,7 +189,7 @@ public class StoreScanner extends NonLaz
   }
 
   // Constructor for testing.
-  StoreScanner(final Scan scan, Store.ScanInfo scanInfo,
+  StoreScanner(final Scan scan, HStore.ScanInfo scanInfo,
       ScanType scanType, final NavigableSet<byte[]> columns,
       final List<KeyValueScanner> scanners, long earliestPutTs)
           throws IOException {

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionRequest.java Fri Aug 17 16:46:07 2012
@@ -33,7 +33,7 @@ import org.apache.hadoop.hbase.RemoteExc
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
-import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.util.StringUtils;
@@ -52,7 +52,7 @@ public class CompactionRequest implement
     Runnable {
     static final Log LOG = LogFactory.getLog(CompactionRequest.class);
     private final HRegion r;
-    private final Store s;
+    private final HStore s;
     private final CompactSelection compactSelection;
     private final long totalSize;
     private final boolean isMajor;
@@ -68,7 +68,7 @@ public class CompactionRequest implement
     private static final ConcurrentHashMap<Long, AtomicInteger>
       minorCompactions = new ConcurrentHashMap<Long, AtomicInteger>();
 
-    public CompactionRequest(HRegion r, Store s,
+    public CompactionRequest(HRegion r, HStore s,
         CompactSelection files, boolean isMajor, int p) {
       Preconditions.checkNotNull(r);
       Preconditions.checkNotNull(files);
@@ -181,7 +181,7 @@ public class CompactionRequest implement
     }
 
     /** Gets the Store for the request */
-    public Store getStore() {
+    public HStore getStore() {
       return s;
     }
 

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java Fri Aug 17 16:46:07 2012
@@ -55,7 +55,7 @@ import org.apache.hadoop.hbase.ipc.Reque
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
-import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 import org.apache.hadoop.hbase.security.AccessDeniedException;
@@ -802,14 +802,14 @@ public class AccessController extends Ba
 
   @Override
   public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e,
-      final Store store, final InternalScanner scanner) throws IOException {
+      final HStore store, final InternalScanner scanner) throws IOException {
     requirePermission(getTableName(e.getEnvironment()), null, null, Action.ADMIN);
     return scanner;
   }
 
   @Override
   public void preCompactSelection(final ObserverContext<RegionCoprocessorEnvironment> e,
-      final Store store, final List<StoreFile> candidates) throws IOException {
+      final HStore store, final List<StoreFile> candidates) throws IOException {
     requirePermission(getTableName(e.getEnvironment()), null, null, Action.ADMIN);
   }
 

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java Fri Aug 17 16:46:07 2012
@@ -27,7 +27,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.regionserver.HStore;
 
 /**
  * Helper class for all utilities related to archival/retrieval of HFiles
@@ -64,7 +64,7 @@ public class HFileArchiveUtil {
   public static Path getStoreArchivePath(Configuration conf, HRegionInfo region, Path tabledir,
       byte[] family) {
     Path tableArchiveDir = getTableArchivePath(conf, tabledir);
-    return Store.getStoreHomedir(tableArchiveDir,
+    return HStore.getStoreHomedir(tableArchiveDir,
       HRegionInfo.encodeRegionName(region.getRegionName()), family);
   }
 

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java Fri Aug 17 16:46:07 2012
@@ -57,7 +57,7 @@ import org.apache.hadoop.hbase.ServerNam
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.NoServerForRegionException;
-import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
 
 import com.google.common.base.Preconditions;
@@ -123,7 +123,7 @@ import com.google.common.collect.Sets;
  * <p>
  * The more complicated answer is that this depends upon the largest storefile
  * in your region. With a growing data size, this will get larger over time. You
- * want the largest region to be just big enough that the {@link Store} compact
+ * want the largest region to be just big enough that the {@link HStore} compact
  * selection algorithm only compacts it due to a timed major. If you don't, your
  * cluster can be prone to compaction storms as the algorithm decides to run
  * major compactions on a large series of regions all at once. Note that
@@ -671,7 +671,7 @@ public class RegionSplitter {
           HTableDescriptor htd = table.getTableDescriptor();
           // check every Column Family for that region
           for (HColumnDescriptor c : htd.getFamilies()) {
-            Path cfDir = Store.getStoreHomedir(tableDir, hri.getEncodedName(),
+            Path cfDir = HStore.getStoreHomedir(tableDir, hri.getEncodedName(),
                 c.getName());
             if (fs.exists(cfDir)) {
               for (FileStatus file : fs.listStatus(cfDir)) {

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java Fri Aug 17 16:46:07 2012
@@ -73,7 +73,7 @@ import org.apache.hadoop.hbase.regionser
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.MultiVersionConsistencyControl;
-import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -1815,7 +1815,7 @@ public class HBaseTestingUtility {
    * Do a small get/scan against one store. This is required because store
    * has no actual methods of querying itself, and relies on StoreScanner.
    */
-  public static List<KeyValue> getFromStoreFile(Store store,
+  public static List<KeyValue> getFromStoreFile(HStore store,
                                                 Get get) throws IOException {
     MultiVersionConsistencyControl.resetThreadReadPoint();
     Scan scan = new Scan(get);
@@ -1839,7 +1839,7 @@ public class HBaseTestingUtility {
    * Do a small get/scan against one store. This is required because store
    * has no actual methods of querying itself, and relies on StoreScanner.
    */
-  public static List<KeyValue> getFromStoreFile(Store store,
+  public static List<KeyValue> getFromStoreFile(HStore store,
                                                 byte [] row,
                                                 NavigableSet<byte[]> columns
                                                 ) throws IOException {

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java Fri Aug 17 16:46:07 2012
@@ -40,8 +40,8 @@ import org.apache.hadoop.hbase.master.cl
 import org.apache.hadoop.hbase.regionserver.CheckedArchivingHFileCleaner;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
-import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.HFileArchiveTestingUtil;
@@ -178,7 +178,7 @@ public class TestZooKeeperTableArchiveCl
     loadAndCompact(region);
 
     // check that we actually have some store files that were archived
-    HStore store = region.getStore(TEST_FAM);
+    Store store = region.getStore(TEST_FAM);
     Path storeArchiveDir = HFileArchiveTestingUtil.getStoreArchivePath(UTIL.getConfiguration(),
       region, store);
 
@@ -338,7 +338,7 @@ public class TestZooKeeperTableArchiveCl
     loadAndCompact(region);
 
     // check that we actually have some store files that were archived
-    HStore store = region.getStore(TEST_FAM);
+    Store store = region.getStore(TEST_FAM);
     Path storeArchiveDir = HFileArchiveTestingUtil.getStoreArchivePath(UTIL.getConfiguration(),
       region, store);
 
@@ -376,7 +376,7 @@ public class TestZooKeeperTableArchiveCl
    * Compact all the store files in a given region.
    */
   private void compactRegion(HRegion region, byte[] family) throws IOException {
-    HStore store = region.getStores().get(TEST_FAM);
+    Store store = region.getStores().get(TEST_FAM);
     store.compactRecentForTesting(store.getStorefiles().size());
   }
 }

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java Fri Aug 17 16:46:07 2012
@@ -67,8 +67,8 @@ import org.apache.hadoop.hbase.io.hfile.
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
-import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.hadoop.io.DataInputBuffer;
@@ -4594,7 +4594,7 @@ public class TestFromClientSide {
     String regionName = table.getRegionLocations().firstKey().getEncodedName();
     HRegion region = TEST_UTIL.getRSForFirstRegionInTable(
         tableName).getFromOnlineRegions(regionName);
-    HStore store = region.getStores().values().iterator().next();
+    Store store = region.getStores().values().iterator().next();
     CacheConfig cacheConf = store.getCacheConfig();
     cacheConf.setCacheDataOnWrite(true);
     cacheConf.setEvictOnClose(true);
@@ -4669,7 +4669,7 @@ public class TestFromClientSide {
     assertEquals(++expectedBlockMiss, cache.getStats().getMissCount());
   }
 
-  private void waitForStoreFileCount(HStore store, int count, int timeout)
+  private void waitForStoreFileCount(Store store, int count, int timeout)
   throws InterruptedException {
     long start = System.currentTimeMillis();
     while (start + timeout > System.currentTimeMillis() &&

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java Fri Aug 17 16:46:07 2012
@@ -48,7 +48,7 @@ import org.apache.hadoop.hbase.regionser
 import org.apache.hadoop.hbase.regionserver.Leases;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.regionserver.ScanType;
-import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -140,20 +140,20 @@ public class SimpleRegionObserver extend
   }
 
   @Override
-  public InternalScanner preFlush(ObserverContext<RegionCoprocessorEnvironment> c, Store store, InternalScanner scanner) {
+  public InternalScanner preFlush(ObserverContext<RegionCoprocessorEnvironment> c, HStore store, InternalScanner scanner) {
     hadPreFlush = true;
     return scanner;
   }
 
   @Override
   public InternalScanner preFlushScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
-      Store store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
+      HStore store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
     hadPreFlushScannerOpen = true;
     return null;
   }
 
   @Override
-  public void postFlush(ObserverContext<RegionCoprocessorEnvironment> c, Store store, StoreFile resultFile) {
+  public void postFlush(ObserverContext<RegionCoprocessorEnvironment> c, HStore store, StoreFile resultFile) {
     hadPostFlush = true;
   }
 
@@ -177,26 +177,26 @@ public class SimpleRegionObserver extend
 
   @Override
   public void preCompactSelection(ObserverContext<RegionCoprocessorEnvironment> c,
-      Store store, List<StoreFile> candidates) {
+      HStore store, List<StoreFile> candidates) {
     hadPreCompactSelect = true;
   }
 
   @Override
   public void postCompactSelection(ObserverContext<RegionCoprocessorEnvironment> c,
-      Store store, ImmutableList<StoreFile> selected) {
+      HStore store, ImmutableList<StoreFile> selected) {
     hadPostCompactSelect = true;
   }
 
   @Override
   public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e,
-      Store store, InternalScanner scanner) {
+      HStore store, InternalScanner scanner) {
     hadPreCompact = true;
     return scanner;
   }
 
   @Override
   public InternalScanner preCompactScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
-      Store store, List<? extends KeyValueScanner> scanners, ScanType scanType, long earliestPutTs,
+      HStore store, List<? extends KeyValueScanner> scanners, ScanType scanType, long earliestPutTs,
       InternalScanner s) throws IOException {
     hadPreCompactScanner = true;
     return null;
@@ -204,7 +204,7 @@ public class SimpleRegionObserver extend
 
   @Override
   public void postCompact(ObserverContext<RegionCoprocessorEnvironment> e,
-      Store store, StoreFile resultFile) {
+      HStore store, StoreFile resultFile) {
     hadPostCompact = true;
   }
 
@@ -222,7 +222,7 @@ public class SimpleRegionObserver extend
 
   @Override
   public KeyValueScanner preStoreScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
-      final Store store, final Scan scan, final NavigableSet<byte[]> targetCols,
+      final HStore store, final Scan scan, final NavigableSet<byte[]> targetCols,
       final KeyValueScanner s) throws IOException {
     hadPreStoreScannerOpen = true;
     return null;

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java Fri Aug 17 16:46:07 2012
@@ -54,7 +54,7 @@ import org.apache.hadoop.hbase.regionser
 import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.regionserver.SplitTransaction;
-import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -174,13 +174,13 @@ public class TestCoprocessorInterface ex
     }
     @Override
     public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e,
-        Store store, InternalScanner scanner) {
+        HStore store, InternalScanner scanner) {
       preCompactCalled = true;
       return scanner;
     }
     @Override
     public void postCompact(ObserverContext<RegionCoprocessorEnvironment> e,
-        Store store, StoreFile resultFile) {
+        HStore store, StoreFile resultFile) {
       postCompactCalled = true;
     }
     @Override

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java Fri Aug 17 16:46:07 2012
@@ -61,7 +61,7 @@ import org.apache.hadoop.hbase.protobuf.
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
-import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -316,7 +316,7 @@ public class TestRegionObserverInterface
 
     @Override
     public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e,
-        Store store, final InternalScanner scanner) {
+        HStore store, final InternalScanner scanner) {
       return new InternalScanner() {
         @Override
         public boolean next(List<KeyValue> results) throws IOException {
@@ -368,7 +368,7 @@ public class TestRegionObserverInterface
 
     @Override
     public void postCompact(ObserverContext<RegionCoprocessorEnvironment> e,
-        Store store, StoreFile resultFile) {
+        HStore store, StoreFile resultFile) {
       lastCompaction = EnvironmentEdgeManager.currentTimeMillis();
     }
 

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java Fri Aug 17 16:46:07 2012
@@ -45,7 +45,7 @@ import org.apache.hadoop.hbase.io.hfile.
 import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.MemStore;
-import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.metrics.SchemaConfigured;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ClassSize;
@@ -303,8 +303,8 @@ public class TestHeapSize extends TestCa
         sc.heapSize());
 
     // Store Overhead
-    cl = Store.class;
-    actual = Store.FIXED_OVERHEAD;
+    cl = HStore.class;
+    actual = HStore.FIXED_OVERHEAD;
     expected = ClassSize.estimateBase(cl, false);
     if(expected != actual) {
       ClassSize.estimateBase(cl, true);

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java Fri Aug 17 16:46:07 2012
@@ -64,7 +64,7 @@ import org.apache.hadoop.hbase.io.hfile.
 import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
-import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.TimeRangeTracker;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -706,7 +706,7 @@ public class TestHFileOutputFormat  {
       assertEquals("Should start with empty table", 0, util.countRows(table));
 
       // deep inspection: get the StoreFile dir
-      final Path storePath = Store.getStoreHomedir(
+      final Path storePath = HStore.getStoreHomedir(
           HTableDescriptor.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
           admin.getTableRegions(TABLE_NAME).get(0).getEncodedName(),
           FAMILIES[0]);

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java Fri Aug 17 16:46:07 2012
@@ -65,7 +65,7 @@ import org.apache.hadoop.hbase.master.Ca
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateResponse;
-import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.HFileArchiveUtil;
@@ -353,7 +353,7 @@ public class TestCatalogJanitor {
       Path rootdir = services.getMasterFileSystem().getRootDir();
       Path tabledir =
         HTableDescriptor.getTableDir(rootdir, htd.getName());
-      Path storedir = Store.getStoreHomedir(tabledir, splita.getEncodedName(),
+      Path storedir = HStore.getStoreHomedir(tabledir, splita.getEncodedName(),
           htd.getColumnFamilies()[0].getName());
       Reference ref = Reference.createTopReference(Bytes.toBytes("ccc"));
       long now = System.currentTimeMillis();
@@ -599,7 +599,7 @@ public class TestCatalogJanitor {
     // the single test passes, but when the full suite is run, things get borked).
     FSUtils.setRootDir(fs.getConf(), rootdir);
     Path tabledir = HTableDescriptor.getTableDir(rootdir, htd.getName());
-    Path storedir = Store.getStoreHomedir(tabledir, parent.getEncodedName(),
+    Path storedir = HStore.getStoreHomedir(tabledir, parent.getEncodedName(),
       htd.getColumnFamilies()[0].getName());
 
     // delete the file and ensure that the files have been archived
@@ -665,7 +665,7 @@ public class TestCatalogJanitor {
     // the single test passes, but when the full suite is run, things get borked).
     FSUtils.setRootDir(fs.getConf(), rootdir);
     Path tabledir = HTableDescriptor.getTableDir(rootdir, parent.getTableName());
-    Path storedir = Store.getStoreHomedir(tabledir, parent.getEncodedName(),
+    Path storedir = HStore.getStoreHomedir(tabledir, parent.getEncodedName(),
       htd.getColumnFamilies()[0].getName());
     System.out.println("Old root:" + rootdir);
     System.out.println("Old table:" + tabledir);
@@ -772,7 +772,7 @@ public class TestCatalogJanitor {
   throws IOException {
     Path rootdir = services.getMasterFileSystem().getRootDir();
     Path tabledir = HTableDescriptor.getTableDir(rootdir, parent.getTableName());
-    Path storedir = Store.getStoreHomedir(tabledir, daughter.getEncodedName(),
+    Path storedir = HStore.getStoreHomedir(tabledir, daughter.getEncodedName(),
       htd.getColumnFamilies()[0].getName());
     Reference ref =
       top? Reference.createTopReference(midkey): Reference.createBottomReference(midkey);

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java Fri Aug 17 16:46:07 2012
@@ -111,7 +111,7 @@ public class CompactionTool implements T
    * @return
    * @throws IOException
    */
-  private Store getStore(final FileSystem fs, final Path storedir, final Path tmpdir)
+  private HStore getStore(final FileSystem fs, final Path storedir, final Path tmpdir)
   throws IOException {
     // TODO: Let config on table and column family be configurable from
     // command-line setting versions, etc.  For now do defaults
@@ -121,7 +121,7 @@ public class CompactionTool implements T
     HRegion region = createHRegion(hri, tmpdir);
     // Create a Store w/ check of hbase.rootdir blanked out and return our
     // list of files instead of have Store search its home dir.
-    return new Store(tmpdir, region, hcd, fs, getConf()) {
+    return new HStore(tmpdir, region, hcd, fs, getConf()) {
       @Override
       public FileStatus[] getStoreFiles() throws IOException {
         return this.fs.listStatus(getHomedir());
@@ -145,7 +145,7 @@ public class CompactionTool implements T
     errCode = checkdir(fs, tmpdir);
     if (errCode != 0) return errCode;
     // Get a Store that wraps the inputdir of files to compact.
-    Store store = getStore(fs, inputdir, tmpdir);
+    HStore store = getStore(fs, inputdir, tmpdir);
     // Now we have a Store, run a compaction of passed files.
     try {
       CompactionRequest cr = store.requestCompaction();

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/HFileReadWriteTest.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/HFileReadWriteTest.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/HFileReadWriteTest.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/HFileReadWriteTest.java Fri Aug 17 16:46:07 2012
@@ -346,7 +346,7 @@ public class HFileReadWriteTest {
     HTableDescriptor htd = new HTableDescriptor(TABLE_NAME);
     HRegion region = new HRegion(outputDir, null, fs, conf, regionInfo, htd,
         null);
-    Store store = new Store(outputDir, region, columnDescriptor, fs, conf);
+    HStore store = new HStore(outputDir, region, columnDescriptor, fs, conf);
 
     StoreFile.Writer writer = new StoreFile.WriterBuilder(conf,
         new CacheConfig(conf), fs, blockSize)
@@ -400,7 +400,7 @@ public class HFileReadWriteTest {
     return resultPath;
   }
 
-  private void performMerge(List<StoreFileScanner> scanners, Store store,
+  private void performMerge(List<StoreFileScanner> scanners, HStore store,
       StoreFile.Writer writer) throws IOException {
     InternalScanner scanner = null;
     try {

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java Fri Aug 17 16:46:07 2012
@@ -25,9 +25,9 @@ public class NoOpScanPolicyObserver exte
    */
   @Override
   public InternalScanner preFlushScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
-      Store store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
-    Store.ScanInfo oldSI = store.getScanInfo();
-    Store.ScanInfo scanInfo = new Store.ScanInfo(store.getFamily(), oldSI.getTtl(),
+      HStore store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
+    HStore.ScanInfo oldSI = store.getScanInfo();
+    HStore.ScanInfo scanInfo = new HStore.ScanInfo(store.getFamily(), oldSI.getTtl(),
         oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
     Scan scan = new Scan();
     scan.setMaxVersions(oldSI.getMaxVersions());
@@ -41,11 +41,11 @@ public class NoOpScanPolicyObserver exte
    */
   @Override
   public InternalScanner preCompactScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
-      Store store, List<? extends KeyValueScanner> scanners, ScanType scanType, long earliestPutTs,
+      HStore store, List<? extends KeyValueScanner> scanners, ScanType scanType, long earliestPutTs,
       InternalScanner s) throws IOException {
     // this demonstrates how to override the scanners default behavior
-    Store.ScanInfo oldSI = store.getScanInfo();
-    Store.ScanInfo scanInfo = new Store.ScanInfo(store.getFamily(), oldSI.getTtl(),
+    HStore.ScanInfo oldSI = store.getScanInfo();
+    HStore.ScanInfo scanInfo = new HStore.ScanInfo(store.getFamily(), oldSI.getTtl(),
         oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
     Scan scan = new Scan();
     scan.setMaxVersions(oldSI.getMaxVersions());
@@ -55,7 +55,7 @@ public class NoOpScanPolicyObserver exte
 
   @Override
   public KeyValueScanner preStoreScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
-      Store store, final Scan scan, final NavigableSet<byte[]> targetCols, KeyValueScanner s)
+      HStore store, final Scan scan, final NavigableSet<byte[]> targetCols, KeyValueScanner s)
       throws IOException {
     return new StoreScanner(store, store.getScanInfo(), scan, targetCols);
   }

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java Fri Aug 17 16:46:07 2012
@@ -127,7 +127,7 @@ public class TestAtomicOperation extends
 
     assertEquals(value+amount, result);
 
-    Store store = (Store) region.getStore(fam1);
+    HStore store = (HStore) region.getStore(fam1);
     // ICV removes any extra values floating around in there.
     assertEquals(1, store.memstore.kvset.size());
     assertTrue(store.memstore.snapshot.isEmpty());

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java Fri Aug 17 16:46:07 2012
@@ -121,7 +121,7 @@ public class TestCacheOnWriteInSchema {
   private final CacheOnWriteType cowType;
   private Configuration conf;
   private final String testDescription;
-  private Store store;
+  private HStore store;
   private FileSystem fs;
 
   public TestCacheOnWriteInSchema(CacheOnWriteType cowType) {
@@ -164,7 +164,7 @@ public class TestCacheOnWriteInSchema {
     HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
     HLog hlog = new HLog(fs, logdir, oldLogDir, conf);
     HRegion region = new HRegion(basedir, hlog, fs, conf, info, htd, null);
-    store = new Store(basedir, region, hcd, fs, conf);
+    store = new HStore(basedir, region, hcd, fs, conf);
   }
 
   @After

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSelection.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSelection.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSelection.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSelection.java Fri Aug 17 16:46:07 2012
@@ -49,7 +49,7 @@ public class TestCompactSelection extend
   private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
 
   private Configuration conf;
-  private Store store;
+  private HStore store;
   private static final String DIR=
     TEST_UTIL.getDataTestDir("TestCompactSelection").toString();
   private static Path TEST_FILE;
@@ -91,7 +91,7 @@ public class TestCompactSelection extend
     Path tableDir = new Path(basedir, Bytes.toString(htd.getName()));
     region = new HRegion(tableDir, hlog, fs, conf, info, htd, null);
 
-    store = new Store(basedir, region, hcd, fs, conf);
+    store = new HStore(basedir, region, hcd, fs, conf);
     TEST_FILE = StoreFile.getRandomFilename(fs, store.getHomedir());
     fs.create(TEST_FILE);
   }

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java Fri Aug 17 16:46:07 2012
@@ -168,10 +168,10 @@ public class TestCompaction extends HBas
 
   public void majorCompactionWithDataBlockEncoding(boolean inCacheOnly)
       throws Exception {
-    Map<Store, HFileDataBlockEncoder> replaceBlockCache =
-        new HashMap<Store, HFileDataBlockEncoder>();
-    for (Entry<byte[], HStore> pair : r.getStores().entrySet()) {
-      Store store = (Store) pair.getValue();
+    Map<HStore, HFileDataBlockEncoder> replaceBlockCache =
+        new HashMap<HStore, HFileDataBlockEncoder>();
+    for (Entry<byte[], Store> pair : r.getStores().entrySet()) {
+      HStore store = (HStore) pair.getValue();
       HFileDataBlockEncoder blockEncoder = store.getDataBlockEncoder();
       replaceBlockCache.put(store, blockEncoder);
       final DataBlockEncoding inCache = DataBlockEncoding.PREFIX;
@@ -184,7 +184,7 @@ public class TestCompaction extends HBas
     majorCompaction();
 
     // restore settings
-    for (Entry<Store, HFileDataBlockEncoder> entry :
+    for (Entry<HStore, HFileDataBlockEncoder> entry :
         replaceBlockCache.entrySet()) {
       entry.getKey().setDataBlockEncoderInTest(entry.getValue());
     }
@@ -206,7 +206,7 @@ public class TestCompaction extends HBas
     assertEquals(compactionThreshold, result.size());
 
     // see if CompactionProgress is in place but null
-    for (HStore store : this.r.stores.values()) {
+    for (Store store : this.r.stores.values()) {
       assertNull(store.getCompactionProgress());
     }
 
@@ -215,7 +215,7 @@ public class TestCompaction extends HBas
 
     // see if CompactionProgress has done its thing on at least one store
     int storeCount = 0;
-    for (HStore store : this.r.stores.values()) {
+    for (Store store : this.r.stores.values()) {
       CompactionProgress progress = store.getCompactionProgress();
       if( progress != null ) {
         ++storeCount;
@@ -281,10 +281,10 @@ public class TestCompaction extends HBas
     // Multiple versions allowed for an entry, so the delete isn't enough
     // Lower TTL and expire to ensure that all our entries have been wiped
     final int ttl = 1000;
-    for (HStore hstore : this.r.stores.values()) {
-      Store store = ((Store) hstore);
-      Store.ScanInfo old = store.scanInfo;
-      Store.ScanInfo si = new Store.ScanInfo(old.getFamily(),
+    for (Store hstore : this.r.stores.values()) {
+      HStore store = ((HStore) hstore);
+      HStore.ScanInfo old = store.scanInfo;
+      HStore.ScanInfo si = new HStore.ScanInfo(old.getFamily(),
           old.getMinVersions(), old.getMaxVersions(), ttl,
           old.getKeepDeletedCells(), 0, old.getComparator());
       store.scanInfo = si;
@@ -303,7 +303,7 @@ public class TestCompaction extends HBas
     conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, delay);
     conf.setFloat("hbase.hregion.majorcompaction.jitter", jitterPct);
 
-    Store s = ((Store) r.getStore(COLUMN_FAMILY));
+    HStore s = ((HStore) r.getStore(COLUMN_FAMILY));
     try {
       createStoreFile(r);
       createStoreFile(r);
@@ -436,7 +436,7 @@ public class TestCompaction extends HBas
     assertEquals(compactionThreshold, result.size());
 
     // do a compaction
-    HStore store2 = this.r.stores.get(fam2);
+    Store store2 = this.r.stores.get(fam2);
     int numFiles1 = store2.getStorefiles().size();
     assertTrue("Was expecting to see 4 store files", numFiles1 > compactionThreshold); // > 3
     store2.compactRecentForTesting(compactionThreshold);   // = 3
@@ -482,8 +482,8 @@ public class TestCompaction extends HBas
     assertEquals(0, count());
 
     // lower the polling interval for this test
-    int origWI = Store.closeCheckInterval;
-    Store.closeCheckInterval = 10*1000; // 10 KB
+    int origWI = HStore.closeCheckInterval;
+    HStore.closeCheckInterval = 10*1000; // 10 KB
 
     try {
       // Create a couple store files w/ 15KB (over 10KB interval)
@@ -513,7 +513,7 @@ public class TestCompaction extends HBas
       spyR.compactStores();
 
       // ensure that the compaction stopped, all old files are intact,
-      HStore s = r.stores.get(COLUMN_FAMILY);
+      Store s = r.stores.get(COLUMN_FAMILY);
       assertEquals(compactionThreshold, s.getStorefilesCount());
       assertTrue(s.getStorefilesSize() > 15*1000);
       // and no new store files persisted past compactStores()
@@ -523,7 +523,7 @@ public class TestCompaction extends HBas
     } finally {
       // don't mess up future tests
       r.writestate.writesEnabled = true;
-      Store.closeCheckInterval = origWI;
+      HStore.closeCheckInterval = origWI;
 
       // Delete all Store information once done using
       for (int i = 0; i < compactionThreshold; i++) {
@@ -537,10 +537,10 @@ public class TestCompaction extends HBas
       // Multiple versions allowed for an entry, so the delete isn't enough
       // Lower TTL and expire to ensure that all our entries have been wiped
       final int ttl = 1000;
-      for (HStore hstore: this.r.stores.values()) {
-        Store store = (Store)hstore;
-        Store.ScanInfo old = store.scanInfo;
-        Store.ScanInfo si = new Store.ScanInfo(old.getFamily(),
+      for (Store hstore: this.r.stores.values()) {
+        HStore store = (HStore)hstore;
+        HStore.ScanInfo old = store.scanInfo;
+        HStore.ScanInfo si = new HStore.ScanInfo(old.getFamily(),
             old.getMinVersions(), old.getMaxVersions(), ttl,
             old.getKeepDeletedCells(), 0, old.getComparator());
         store.scanInfo = si;
@@ -585,7 +585,7 @@ public class TestCompaction extends HBas
     for (int i = 0; i < nfiles; i++) {
       createStoreFile(r);
     }
-    Store store = (Store) r.getStore(COLUMN_FAMILY);
+    HStore store = (HStore) r.getStore(COLUMN_FAMILY);
 
     List<StoreFile> storeFiles = store.getStorefiles();
     long maxId = StoreFile.getMaxSequenceIdInList(storeFiles);
@@ -623,14 +623,14 @@ public class TestCompaction extends HBas
    * Test for HBASE-5920 - Test user requested major compactions always occurring
    */
   public void testNonUserMajorCompactionRequest() throws Exception {
-    HStore store = r.getStore(COLUMN_FAMILY);
+    Store store = r.getStore(COLUMN_FAMILY);
     createStoreFile(r);
     for (int i = 0; i < MAX_FILES_TO_COMPACT + 1; i++) {
       createStoreFile(r);
     }
     store.triggerMajorCompaction();
 
-    CompactionRequest request = store.requestCompaction(HStore.NO_PRIORITY);
+    CompactionRequest request = store.requestCompaction(Store.NO_PRIORITY);
     assertNotNull("Expected to receive a compaction request", request);
     assertEquals(
       "System-requested major compaction should not occur if there are too many store files",
@@ -642,13 +642,13 @@ public class TestCompaction extends HBas
    * Test for HBASE-5920
    */
   public void testUserMajorCompactionRequest() throws IOException{
-    HStore store = r.getStore(COLUMN_FAMILY);
+    Store store = r.getStore(COLUMN_FAMILY);
     createStoreFile(r);
     for (int i = 0; i < MAX_FILES_TO_COMPACT + 1; i++) {
       createStoreFile(r);
     }
     store.triggerMajorCompaction();
-    CompactionRequest request = store.requestCompaction(HStore.PRIORITY_USER);
+    CompactionRequest request = store.requestCompaction(Store.PRIORITY_USER);
     assertNotNull("Expected to receive a compaction request", request);
     assertEquals(
       "User-requested major compaction should always occur, even if there are too many store files",

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java Fri Aug 17 16:46:07 2012
@@ -236,7 +236,7 @@ public class TestHRegion extends HBaseTe
       MonitoredTask status = TaskMonitor.get().createStatus(method);
       Map<byte[], Long> maxSeqIdInStores = new TreeMap<byte[], Long>(
           Bytes.BYTES_COMPARATOR);
-      for (HStore store : region.getStores().values()) {
+      for (Store store : region.getStores().values()) {
         maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(),
             minSeqId - 1);
       }
@@ -288,7 +288,7 @@ public class TestHRegion extends HBaseTe
       MonitoredTask status = TaskMonitor.get().createStatus(method);
       Map<byte[], Long> maxSeqIdInStores = new TreeMap<byte[], Long>(
           Bytes.BYTES_COMPARATOR);
-      for (HStore store : region.getStores().values()) {
+      for (Store store : region.getStores().values()) {
         maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(),
             recoverSeqId - 1);
       }
@@ -336,7 +336,7 @@ public class TestHRegion extends HBaseTe
       
       Map<byte[], Long> maxSeqIdInStores = new TreeMap<byte[], Long>(
         Bytes.BYTES_COMPARATOR);
-      for (HStore store : region.getStores().values()) {
+      for (Store store : region.getStores().values()) {
         maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), minSeqId);
       }
       long seqId = region.replayRecoveredEditsIfAny(regiondir,
@@ -864,7 +864,7 @@ public class TestHRegion extends HBaseTe
       put.add(kv);
 
       //checkAndPut with wrong value
-      Store store = (Store) region.getStore(fam1);
+      HStore store = (HStore) region.getStore(fam1);
       store.memstore.kvset.size();
 
       boolean res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL,
@@ -1379,10 +1379,10 @@ public class TestHRegion extends HBaseTe
       // extract the key values out the memstore:
       // This is kinda hacky, but better than nothing...
       long now = System.currentTimeMillis();
-      KeyValue firstKv = ((Store) region.getStore(fam1)).memstore.kvset.first();
+      KeyValue firstKv = ((HStore) region.getStore(fam1)).memstore.kvset.first();
       assertTrue(firstKv.getTimestamp() <= now);
       now = firstKv.getTimestamp();
-      for (KeyValue kv : ((Store) region.getStore(fam1)).memstore.kvset) {
+      for (KeyValue kv : ((HStore) region.getStore(fam1)).memstore.kvset) {
         assertTrue(kv.getTimestamp() <= now);
         now = kv.getTimestamp();
       }
@@ -2320,7 +2320,7 @@ public class TestHRegion extends HBaseTe
 
       assertEquals(value+amount, result);
 
-      Store store = (Store) region.getStore(fam1);
+      HStore store = (HStore) region.getStore(fam1);
       // ICV removes any extra values floating around in there.
       assertEquals(1, store.memstore.kvset.size());
       assertTrue(store.memstore.snapshot.isEmpty());
@@ -2346,7 +2346,7 @@ public class TestHRegion extends HBaseTe
       region.put(put);
 
       // get the store in question:
-      Store s = (Store) region.getStore(fam1);
+      HStore s = (HStore) region.getStore(fam1);
       s.snapshot(); //bam
 
       // now increment:
@@ -2490,7 +2490,7 @@ public class TestHRegion extends HBaseTe
       // flush to disk.
       region.flushcache();
 
-      Store store = (Store) region.getStore(fam1);
+      HStore store = (HStore) region.getStore(fam1);
       assertEquals(0, store.memstore.kvset.size());
 
       long r = region.incrementColumnValue(row, fam1, qual1, amount, true);
@@ -2516,7 +2516,7 @@ public class TestHRegion extends HBaseTe
       region.put(put);
       region.flushcache();
 
-      Store store = (Store) region.getStore(fam1);
+      HStore store = (HStore) region.getStore(fam1);
       assertEquals(0, store.memstore.kvset.size());
 
       long r = region.incrementColumnValue(row, fam1, qual3, amount, true);
@@ -2562,7 +2562,7 @@ public class TestHRegion extends HBaseTe
 
       assertEquals(value+amount, result);
 
-      Store store = (Store) region.getStore(fam1);
+      HStore store = (HStore) region.getStore(fam1);
       // ICV should update the existing Put with the same timestamp
       assertEquals(1, store.memstore.kvset.size());
       assertTrue(store.memstore.snapshot.isEmpty());
@@ -2578,7 +2578,7 @@ public class TestHRegion extends HBaseTe
 
       assertEquals(value+amount, result);
 
-      store = (Store) region.getStore(fam1);
+      store = (HStore) region.getStore(fam1);
       // ICV should update the existing Put with the same timestamp
       assertEquals(2, store.memstore.kvset.size());
       assertTrue(store.memstore.snapshot.isEmpty());
@@ -3397,7 +3397,7 @@ public class TestHRegion extends HBaseTe
         region.flushcache();
       }
       //before compaction
-      Store store = (Store) region.getStore(fam1);
+      HStore store = (HStore) region.getStore(fam1);
       List<StoreFile> storeFiles = store.getStorefiles();
       for (StoreFile storefile : storeFiles) {
         StoreFile.Reader reader = storefile.getReader();

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java Fri Aug 17 16:46:07 2012
@@ -35,7 +35,7 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.*;
 import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.regionserver.Store.ScanInfo;
+import org.apache.hadoop.hbase.regionserver.HStore.ScanInfo;
 import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
 import org.apache.hadoop.hbase.util.Bytes;
 

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java Fri Aug 17 16:46:07 2012
@@ -99,7 +99,7 @@ public class TestQueryMatcher extends HB
 
     // 2,4,5
     
-    ScanQueryMatcher qm = new ScanQueryMatcher(scan, new Store.ScanInfo(fam2,
+    ScanQueryMatcher qm = new ScanQueryMatcher(scan, new HStore.ScanInfo(fam2,
         0, 1, ttl, false, 0, rowComparator), get.getFamilyMap().get(fam2),
         EnvironmentEdgeManager.currentTimeMillis() - ttl);
 
@@ -144,7 +144,7 @@ public class TestQueryMatcher extends HB
     expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
     expected.add(ScanQueryMatcher.MatchCode.DONE);
 
-    ScanQueryMatcher qm = new ScanQueryMatcher(scan, new Store.ScanInfo(fam2,
+    ScanQueryMatcher qm = new ScanQueryMatcher(scan, new HStore.ScanInfo(fam2,
         0, 1, ttl, false, 0, rowComparator), null,
         EnvironmentEdgeManager.currentTimeMillis() - ttl);
 
@@ -197,7 +197,7 @@ public class TestQueryMatcher extends HB
     };
 
     long now = EnvironmentEdgeManager.currentTimeMillis();
-    ScanQueryMatcher qm = new ScanQueryMatcher(scan, new Store.ScanInfo(fam2,
+    ScanQueryMatcher qm = new ScanQueryMatcher(scan, new HStore.ScanInfo(fam2,
         0, 1, testTTL, false, 0, rowComparator), get.getFamilyMap().get(fam2),
         now - testTTL);
 
@@ -250,7 +250,7 @@ public class TestQueryMatcher extends HB
     };
 
     long now = EnvironmentEdgeManager.currentTimeMillis();
-    ScanQueryMatcher qm = new ScanQueryMatcher(scan, new Store.ScanInfo(fam2,
+    ScanQueryMatcher qm = new ScanQueryMatcher(scan, new HStore.ScanInfo(fam2,
         0, 1, testTTL, false, 0, rowComparator), null,
         now - testTTL);
 

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java Fri Aug 17 16:46:07 2012
@@ -240,7 +240,7 @@ public class TestRegionServerMetrics {
     rs.doMetrics();
     for (HRegion r : TEST_UTIL.getMiniHBaseCluster().getRegions(
         Bytes.toBytes(TABLE_NAME))) {
-      for (Map.Entry<byte[], HStore> storeEntry : r.getStores().entrySet()) {
+      for (Map.Entry<byte[], Store> storeEntry : r.getStores().entrySet()) {
         LOG.info("For region " + r.getRegionNameAsString() + ", CF " +
             Bytes.toStringBinary(storeEntry.getKey()) + " found store files " +
             ": " + storeEntry.getValue().getStorefiles());

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java Fri Aug 17 16:46:07 2012
@@ -45,7 +45,7 @@ public class TestRegionSplitPolicy {
   private Configuration conf;
   private HTableDescriptor htd;
   private HRegion mockRegion;
-  private TreeMap<byte[], Store> stores;
+  private TreeMap<byte[], HStore> stores;
   private static final byte [] TABLENAME = new byte [] {'t'};
 
   @Before
@@ -57,7 +57,7 @@ public class TestRegionSplitPolicy {
     Mockito.doReturn(htd).when(mockRegion).getTableDesc();
     Mockito.doReturn(hri).when(mockRegion).getRegionInfo();
 
-    stores = new TreeMap<byte[], Store>(Bytes.BYTES_COMPARATOR);
+    stores = new TreeMap<byte[], HStore>(Bytes.BYTES_COMPARATOR);
     Mockito.doReturn(stores).when(mockRegion).getStores();
   }
 
@@ -90,7 +90,7 @@ public class TestRegionSplitPolicy {
     // Add a store in excess of split size.  Because there are "no regions"
     // on this server -- rss.getOnlineRegions is 0 -- then we should split
     // like a constantsizeregionsplitpolicy would
-    Store mockStore = Mockito.mock(Store.class);
+    HStore mockStore = Mockito.mock(HStore.class);
     Mockito.doReturn(2000L).when(mockStore).getSize();
     Mockito.doReturn(true).when(mockStore).canSplit();
     stores.put(new byte[]{1}, mockStore);
@@ -152,7 +152,7 @@ public class TestRegionSplitPolicy {
     Mockito.doReturn(myHtd).when(myMockRegion).getTableDesc();
     Mockito.doReturn(stores).when(myMockRegion).getStores();
 
-    Store mockStore = Mockito.mock(Store.class);
+    HStore mockStore = Mockito.mock(HStore.class);
     Mockito.doReturn(2000L).when(mockStore).getSize();
     Mockito.doReturn(true).when(mockStore).canSplit();
     Mockito.doReturn(Bytes.toBytes("abcd")).when(mockStore).getSplitPoint();
@@ -190,7 +190,7 @@ public class TestRegionSplitPolicy {
     assertFalse(policy.shouldSplit());
 
     // Add a store above the requisite size. Should split.
-    Store mockStore = Mockito.mock(Store.class);
+    HStore mockStore = Mockito.mock(HStore.class);
     Mockito.doReturn(2000L).when(mockStore).getSize();
     Mockito.doReturn(true).when(mockStore).canSplit();
     stores.put(new byte[]{1}, mockStore);
@@ -228,7 +228,7 @@ public class TestRegionSplitPolicy {
     assertNull(policy.getSplitPoint());
 
     // Add a store above the requisite size. Should split.
-    Store mockStore = Mockito.mock(Store.class);
+    HStore mockStore = Mockito.mock(HStore.class);
     Mockito.doReturn(2000L).when(mockStore).getSize();
     Mockito.doReturn(true).when(mockStore).canSplit();
     Mockito.doReturn(Bytes.toBytes("store 1 split"))
@@ -239,7 +239,7 @@ public class TestRegionSplitPolicy {
         Bytes.toString(policy.getSplitPoint()));
 
     // Add a bigger store. The split point should come from that one
-    Store mockStore2 = Mockito.mock(Store.class);
+    HStore mockStore2 = Mockito.mock(HStore.class);
     Mockito.doReturn(4000L).when(mockStore2).getSize();
     Mockito.doReturn(true).when(mockStore2).canSplit();
     Mockito.doReturn(Bytes.toBytes("store 2 split"))

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java Fri Aug 17 16:46:07 2012
@@ -148,7 +148,7 @@ public class TestSplitTransaction {
     when(storeFileMock.isReference()).thenReturn(true);
 
     // add the mock to the parent stores
-    Store storeMock = Mockito.mock(Store.class);
+    HStore storeMock = Mockito.mock(HStore.class);
     List<StoreFile> storeFileList = new ArrayList<StoreFile>(1);
     storeFileList.add(storeFileMock);
     when(storeMock.getStorefiles()).thenReturn(storeFileList);

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java Fri Aug 17 16:46:07 2012
@@ -74,7 +74,7 @@ import com.google.common.base.Joiner;
 public class TestStore extends TestCase {
   public static final Log LOG = LogFactory.getLog(TestStore.class);
 
-  Store store;
+  HStore store;
   byte [] table = Bytes.toBytes("table");
   byte [] family = Bytes.toBytes("family");
 
@@ -147,7 +147,7 @@ public class TestStore extends TestCase 
     HLog hlog = new HLog(fs, logdir, oldLogDir, conf);
     HRegion region = new HRegion(basedir, hlog, fs, conf, info, htd, null);
 
-    store = new Store(basedir, region, hcd, fs, conf);
+    store = new HStore(basedir, region, hcd, fs, conf);
   }
 
   public void testDeleteExpiredStoreFiles() throws Exception {
@@ -216,14 +216,14 @@ public class TestStore extends TestCase 
     }
     // after flush; check the lowest time stamp
     long lowestTimeStampFromStore = 
-        Store.getLowestTimestamp(store.getStorefiles());
+        HStore.getLowestTimestamp(store.getStorefiles());
     long lowestTimeStampFromFS = 
       getLowestTimeStampFromFS(fs,store.getStorefiles());
     assertEquals(lowestTimeStampFromStore,lowestTimeStampFromFS);
     
     // after compact; check the lowest time stamp
     store.compact(store.requestCompaction());
-    lowestTimeStampFromStore = Store.getLowestTimestamp(store.getStorefiles());
+    lowestTimeStampFromStore = HStore.getLowestTimestamp(store.getStorefiles());
     lowestTimeStampFromFS = getLowestTimeStampFromFS(fs,store.getStorefiles());
     assertEquals(lowestTimeStampFromStore,lowestTimeStampFromFS); 
   }
@@ -278,7 +278,7 @@ public class TestStore extends TestCase 
     w.close();
     this.store.close();
     // Reopen it... should pick up two files
-    this.store = new Store(storedir.getParent().getParent(),
+    this.store = new HStore(storedir.getParent().getParent(),
       this.store.getHRegion(),
       this.store.getFamily(), fs, c);
     System.out.println(this.store.getHRegionInfo().getEncodedName());
@@ -688,7 +688,7 @@ public class TestStore extends TestCase 
 
 
 
-  private static void flushStore(Store store, long id) throws IOException {
+  private static void flushStore(HStore store, long id) throws IOException {
     StoreFlusher storeFlusher = store.getStoreFlusher(id);
     storeFlusher.prepare();
     storeFlusher.flushCache(Mockito.mock(MonitoredTask.class));

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java Fri Aug 17 16:46:07 2012
@@ -178,14 +178,14 @@ public class TestStoreFile extends HBase
     KeyValue midKV = KeyValue.createKeyValueFromKey(midkey);
     byte [] midRow = midKV.getRow();
     // Create top split.
-    Path topDir = Store.getStoreHomedir(this.testDir, "1",
+    Path topDir = HStore.getStoreHomedir(this.testDir, "1",
       Bytes.toBytes(f.getPath().getParent().getName()));
     if (this.fs.exists(topDir)) {
       this.fs.delete(topDir, true);
     }
     Path topPath = StoreFile.split(this.fs, topDir, f, midRow, true);
     // Create bottom split.
-    Path bottomDir = Store.getStoreHomedir(this.testDir, "2",
+    Path bottomDir = HStore.getStoreHomedir(this.testDir, "2",
       Bytes.toBytes(f.getPath().getParent().getName()));
     if (this.fs.exists(bottomDir)) {
       this.fs.delete(bottomDir, true);

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java Fri Aug 17 16:46:07 2012
@@ -37,7 +37,7 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueTestUtil;
 import org.apache.hadoop.hbase.MediumTests;
 import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.regionserver.Store.ScanInfo;
+import org.apache.hadoop.hbase.regionserver.HStore.ScanInfo;
 import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdge;
@@ -550,7 +550,7 @@ public class TestStoreScanner extends Te
       List<KeyValueScanner> scanners = scanFixture(kvs);
       Scan scan = new Scan();
       scan.setMaxVersions(2);
-      Store.ScanInfo scanInfo = new Store.ScanInfo(Bytes.toBytes("cf"),
+      HStore.ScanInfo scanInfo = new HStore.ScanInfo(Bytes.toBytes("cf"),
         0 /* minVersions */,
         2 /* maxVersions */, 500 /* ttl */,
         false /* keepDeletedCells */,

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java Fri Aug 17 16:46:07 2012
@@ -48,8 +48,8 @@ import org.apache.hadoop.hbase.monitorin
 import org.apache.hadoop.hbase.regionserver.FlushRequester;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
-import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdge;
@@ -182,7 +182,7 @@ public class TestWALReplay {
     // flush region and make major compaction
     destServer.getOnlineRegion(destRegion.getRegionName()).flushcache();
     // wait to complete major compaction
-    for (HStore store : destServer.getOnlineRegion(destRegion.getRegionName())
+    for (Store store : destServer.getOnlineRegion(destRegion.getRegionName())
         .getStores().values()) {
       store.triggerMajorCompaction();
     }
@@ -422,7 +422,7 @@ public class TestWALReplay {
         final AtomicInteger countOfRestoredEdits = new AtomicInteger(0);
         HRegion region3 = new HRegion(basedir, wal3, newFS, newConf, hri, htd, null) {
           @Override
-          protected boolean restoreEdit(HStore s, KeyValue kv) {
+          protected boolean restoreEdit(Store s, KeyValue kv) {
             boolean b = super.restoreEdit(s, kv);
             countOfRestoredEdits.incrementAndGet();
             return b;

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java Fri Aug 17 16:46:07 2012
@@ -34,8 +34,8 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.regionserver.HStore;
 
 /**
  * Test helper for testing archiving of HFiles
@@ -222,7 +222,7 @@ public class HFileArchiveTestingUtil {
    * @param store store that is archiving files
    * @return {@link Path} to the store archive directory for the given region
    */
-  public static Path getStoreArchivePath(Configuration conf, HRegion region, HStore store) {
+  public static Path getStoreArchivePath(Configuration conf, HRegion region, Store store) {
     return HFileArchiveUtil.getStoreArchivePath(conf, region, store.getFamily().getName());
   }
 
@@ -234,7 +234,7 @@ public class HFileArchiveTestingUtil {
     HRegion region = servingRegions.get(0);
 
     // check that we actually have some store files that were archived
-    HStore store = region.getStore(storeName);
+    Store store = region.getStore(storeName);
     return HFileArchiveTestingUtil.getStoreArchivePath(util.getConfiguration(), region, store);
   }
 }

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java?rev=1374354&r1=1374353&r2=1374354&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCoprocessorScanPolicy.java Fri Aug 17 16:46:07 2012
@@ -47,7 +47,7 @@ import org.apache.hadoop.hbase.coprocess
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
 import org.apache.hadoop.hbase.regionserver.ScanType;
-import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.StoreScanner;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -203,15 +203,15 @@ public class TestCoprocessorScanPolicy {
 
     @Override
     public InternalScanner preFlushScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
-        Store store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
+        HStore store, KeyValueScanner memstoreScanner, InternalScanner s) throws IOException {
       Long newTtl = ttls.get(store.getTableName());
       if (newTtl != null) {
         System.out.println("PreFlush:" + newTtl);
       }
       Integer newVersions = versions.get(store.getTableName());
-      Store.ScanInfo oldSI = store.getScanInfo();
+      HStore.ScanInfo oldSI = store.getScanInfo();
       HColumnDescriptor family = store.getFamily();
-      Store.ScanInfo scanInfo = new Store.ScanInfo(family.getName(), family.getMinVersions(),
+      HStore.ScanInfo scanInfo = new HStore.ScanInfo(family.getName(), family.getMinVersions(),
           newVersions == null ? family.getMaxVersions() : newVersions,
           newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
           oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
@@ -224,13 +224,13 @@ public class TestCoprocessorScanPolicy {
 
     @Override
     public InternalScanner preCompactScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
-        Store store, List<? extends KeyValueScanner> scanners, ScanType scanType,
+        HStore store, List<? extends KeyValueScanner> scanners, ScanType scanType,
         long earliestPutTs, InternalScanner s) throws IOException {
       Long newTtl = ttls.get(store.getTableName());
       Integer newVersions = versions.get(store.getTableName());
-      Store.ScanInfo oldSI = store.getScanInfo();
+      HStore.ScanInfo oldSI = store.getScanInfo();
       HColumnDescriptor family = store.getFamily();
-      Store.ScanInfo scanInfo = new Store.ScanInfo(family.getName(), family.getMinVersions(),
+      HStore.ScanInfo scanInfo = new HStore.ScanInfo(family.getName(), family.getMinVersions(),
           newVersions == null ? family.getMaxVersions() : newVersions,
           newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
           oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());
@@ -242,13 +242,13 @@ public class TestCoprocessorScanPolicy {
 
     @Override
     public KeyValueScanner preStoreScannerOpen(
-        final ObserverContext<RegionCoprocessorEnvironment> c, Store store, final Scan scan,
+        final ObserverContext<RegionCoprocessorEnvironment> c, HStore store, final Scan scan,
         final NavigableSet<byte[]> targetCols, KeyValueScanner s) throws IOException {
       Long newTtl = ttls.get(store.getTableName());
       Integer newVersions = versions.get(store.getTableName());
-      Store.ScanInfo oldSI = store.getScanInfo();
+      HStore.ScanInfo oldSI = store.getScanInfo();
       HColumnDescriptor family = store.getFamily();
-      Store.ScanInfo scanInfo = new Store.ScanInfo(family.getName(), family.getMinVersions(),
+      HStore.ScanInfo scanInfo = new HStore.ScanInfo(family.getName(), family.getMinVersions(),
           newVersions == null ? family.getMaxVersions() : newVersions,
           newTtl == null ? oldSI.getTtl() : newTtl, family.getKeepDeletedCells(),
           oldSI.getTimeToPurgeDeletes(), oldSI.getComparator());