You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by bu...@apache.org on 2016/10/17 16:54:43 UTC

[4/6] hbase git commit: HBASE-16847 Commented out broken test-compile references. These will be fixed and put back in later.

http://git-wip-us.apache.org/repos/asf/hbase/blob/d6ef946f/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
index 08f0470..c9b4217 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
@@ -676,445 +676,445 @@ public class TestHRegion {
     scanner1.close();
   }
 
-  @Test
-  public void testSkipRecoveredEditsReplay() throws Exception {
-    String method = "testSkipRecoveredEditsReplay";
-    TableName tableName = TableName.valueOf(method);
-    byte[] family = Bytes.toBytes("family");
-    this.region = initHRegion(tableName, method, CONF, family);
-    final WALFactory wals = new WALFactory(CONF, null, method);
-    try {
-      Path regiondir = region.getRegionStorage().getRegionDir();
-      FileSystem fs = region.getRegionStorage().getFileSystem();
-      byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
-
-      Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regiondir);
-
-      long maxSeqId = 1050;
-      long minSeqId = 1000;
-
-      for (long i = minSeqId; i <= maxSeqId; i += 10) {
-        Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
-        fs.create(recoveredEdits);
-        WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits);
-
-        long time = System.nanoTime();
-        WALEdit edit = new WALEdit();
-        edit.add(new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes
-            .toBytes(i)));
-        writer.append(new WAL.Entry(new HLogKey(regionName, tableName, i, time,
-            HConstants.DEFAULT_CLUSTER_ID), edit));
-
-        writer.close();
-      }
-      MonitoredTask status = TaskMonitor.get().createStatus(method);
-      Map<byte[], Long> maxSeqIdInStores = new TreeMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
-      for (Store store : region.getStores()) {
-        maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), minSeqId - 1);
-      }
-      long seqId = region.replayRecoveredEditsIfAny(regiondir, maxSeqIdInStores, null, status);
-      assertEquals(maxSeqId, seqId);
-      region.getMVCC().advanceTo(seqId);
-      Get get = new Get(row);
-      Result result = region.get(get);
-      for (long i = minSeqId; i <= maxSeqId; i += 10) {
-        List<Cell> kvs = result.getColumnCells(family, Bytes.toBytes(i));
-        assertEquals(1, kvs.size());
-        assertArrayEquals(Bytes.toBytes(i), CellUtil.cloneValue(kvs.get(0)));
-      }
-    } finally {
-      HBaseTestingUtility.closeRegionAndWAL(this.region);
-      this.region = null;
-      wals.close();
-    }
-  }
-
-  @Test
-  public void testSkipRecoveredEditsReplaySomeIgnored() throws Exception {
-    String method = "testSkipRecoveredEditsReplaySomeIgnored";
-    TableName tableName = TableName.valueOf(method);
-    byte[] family = Bytes.toBytes("family");
-    this.region = initHRegion(tableName, method, CONF, family);
-    final WALFactory wals = new WALFactory(CONF, null, method);
-    try {
-      Path regiondir = region.getRegionStorage().getRegionDir();
-      FileSystem fs = region.getRegionStorage().getFileSystem();
-      byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
-
-      Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regiondir);
-
-      long maxSeqId = 1050;
-      long minSeqId = 1000;
-
-      for (long i = minSeqId; i <= maxSeqId; i += 10) {
-        Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
-        fs.create(recoveredEdits);
-        WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits);
-
-        long time = System.nanoTime();
-        WALEdit edit = new WALEdit();
-        edit.add(new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes
-            .toBytes(i)));
-        writer.append(new WAL.Entry(new HLogKey(regionName, tableName, i, time,
-            HConstants.DEFAULT_CLUSTER_ID), edit));
-
-        writer.close();
-      }
-      long recoverSeqId = 1030;
-      MonitoredTask status = TaskMonitor.get().createStatus(method);
-      Map<byte[], Long> maxSeqIdInStores = new TreeMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
-      for (Store store : region.getStores()) {
-        maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), recoverSeqId - 1);
-      }
-      long seqId = region.replayRecoveredEditsIfAny(regiondir, maxSeqIdInStores, null, status);
-      assertEquals(maxSeqId, seqId);
-      region.getMVCC().advanceTo(seqId);
-      Get get = new Get(row);
-      Result result = region.get(get);
-      for (long i = minSeqId; i <= maxSeqId; i += 10) {
-        List<Cell> kvs = result.getColumnCells(family, Bytes.toBytes(i));
-        if (i < recoverSeqId) {
-          assertEquals(0, kvs.size());
-        } else {
-          assertEquals(1, kvs.size());
-          assertArrayEquals(Bytes.toBytes(i), CellUtil.cloneValue(kvs.get(0)));
-        }
-      }
-    } finally {
-      HBaseTestingUtility.closeRegionAndWAL(this.region);
-      this.region = null;
-      wals.close();
-    }
-  }
-
-  @Test
-  public void testSkipRecoveredEditsReplayAllIgnored() throws Exception {
-    byte[] family = Bytes.toBytes("family");
-    this.region = initHRegion(tableName, method, CONF, family);
-    try {
-      Path regiondir = region.getRegionStorage().getRegionDir();
-      FileSystem fs = region.getRegionStorage().getFileSystem();
-
-      Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regiondir);
-      for (int i = 1000; i < 1050; i += 10) {
-        Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
-        FSDataOutputStream dos = fs.create(recoveredEdits);
-        dos.writeInt(i);
-        dos.close();
-      }
-      long minSeqId = 2000;
-      Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", minSeqId - 1));
-      FSDataOutputStream dos = fs.create(recoveredEdits);
-      dos.close();
-
-      Map<byte[], Long> maxSeqIdInStores = new TreeMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
-      for (Store store : region.getStores()) {
-        maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), minSeqId);
-      }
-      long seqId = region.replayRecoveredEditsIfAny(regiondir, maxSeqIdInStores, null, null);
-      assertEquals(minSeqId, seqId);
-    } finally {
-      HBaseTestingUtility.closeRegionAndWAL(this.region);
-      this.region = null;
-    }
-  }
-
-  @Test
-  public void testSkipRecoveredEditsReplayTheLastFileIgnored() throws Exception {
-    String method = "testSkipRecoveredEditsReplayTheLastFileIgnored";
-    TableName tableName = TableName.valueOf(method);
-    byte[] family = Bytes.toBytes("family");
-    this.region = initHRegion(tableName, method, CONF, family);
-    final WALFactory wals = new WALFactory(CONF, null, method);
-    try {
-      Path regiondir = region.getRegionStorage().getRegionDir();
-      FileSystem fs = region.getRegionStorage().getFileSystem();
-      byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
-      byte[][] columns = region.getTableDesc().getFamiliesKeys().toArray(new byte[0][]);
-
-      assertEquals(0, region.getStoreFileList(columns).size());
-
-      Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regiondir);
-
-      long maxSeqId = 1050;
-      long minSeqId = 1000;
-
-      for (long i = minSeqId; i <= maxSeqId; i += 10) {
-        Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
-        fs.create(recoveredEdits);
-        WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits);
-
-        long time = System.nanoTime();
-        WALEdit edit = null;
-        if (i == maxSeqId) {
-          edit = WALEdit.createCompaction(region.getRegionInfo(),
-          CompactionDescriptor.newBuilder()
-          .setTableName(ByteString.copyFrom(tableName.getName()))
-          .setFamilyName(ByteString.copyFrom(regionName))
-          .setEncodedRegionName(ByteString.copyFrom(regionName))
-          .setStoreHomeDirBytes(ByteString.copyFrom(Bytes.toBytes(regiondir.toString())))
-          .setRegionName(ByteString.copyFrom(region.getRegionInfo().getRegionName()))
-          .build());
-        } else {
-          edit = new WALEdit();
-          edit.add(new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes
-            .toBytes(i)));
-        }
-        writer.append(new WAL.Entry(new HLogKey(regionName, tableName, i, time,
-            HConstants.DEFAULT_CLUSTER_ID), edit));
-        writer.close();
-      }
-
-      long recoverSeqId = 1030;
-      Map<byte[], Long> maxSeqIdInStores = new TreeMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
-      MonitoredTask status = TaskMonitor.get().createStatus(method);
-      for (Store store : region.getStores()) {
-        maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), recoverSeqId - 1);
-      }
-      long seqId = region.replayRecoveredEditsIfAny(regiondir, maxSeqIdInStores, null, status);
-      assertEquals(maxSeqId, seqId);
-
-      // assert that the files are flushed
-      assertEquals(1, region.getStoreFileList(columns).size());
-
-    } finally {
-      HBaseTestingUtility.closeRegionAndWAL(this.region);
-      this.region = null;
-      wals.close();
-    }
-  }
-
-  @Test
-  public void testRecoveredEditsReplayCompaction() throws Exception {
-    testRecoveredEditsReplayCompaction(false);
-    testRecoveredEditsReplayCompaction(true);
-  }
-  public void testRecoveredEditsReplayCompaction(boolean mismatchedRegionName) throws Exception {
-    String method = name.getMethodName();
-    TableName tableName = TableName.valueOf(method);
-    byte[] family = Bytes.toBytes("family");
-    this.region = initHRegion(tableName, method, CONF, family);
-    final WALFactory wals = new WALFactory(CONF, null, method);
-    try {
-      Path regiondir = region.getRegionStorage().getRegionDir();
-      FileSystem fs = region.getRegionStorage().getFileSystem();
-      byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
-
-      long maxSeqId = 3;
-      long minSeqId = 0;
-
-      for (long i = minSeqId; i < maxSeqId; i++) {
-        Put put = new Put(Bytes.toBytes(i));
-        put.addColumn(family, Bytes.toBytes(i), Bytes.toBytes(i));
-        region.put(put);
-        region.flush(true);
-      }
-
-      // this will create a region with 3 files
-      assertEquals(3, region.getStore(family).getStorefilesCount());
-      List<Path> storeFiles = new ArrayList<Path>(3);
-      for (StoreFile sf : region.getStore(family).getStorefiles()) {
-        storeFiles.add(sf.getPath());
-      }
-
-      // disable compaction completion
-      CONF.setBoolean("hbase.hstore.compaction.complete", false);
-      region.compactStores();
-
-      // ensure that nothing changed
-      assertEquals(3, region.getStore(family).getStorefilesCount());
-
-      // now find the compacted file, and manually add it to the recovered edits
-      Path tmpDir = region.getRegionStorage().getTempDir();
-      FileStatus[] files = FSUtils.listStatus(fs, tmpDir);
-      String errorMsg = "Expected to find 1 file in the region temp directory "
-          + "from the compaction, could not find any";
-      assertNotNull(errorMsg, files);
-      assertEquals(errorMsg, 1, files.length);
-      // move the file inside region dir
-      Path newFile = region.getRegionStorage().commitStoreFile(Bytes.toString(family),
-          files[0].getPath());
-
-      byte[] encodedNameAsBytes = this.region.getRegionInfo().getEncodedNameAsBytes();
-      byte[] fakeEncodedNameAsBytes = new byte [encodedNameAsBytes.length];
-      for (int i=0; i < encodedNameAsBytes.length; i++) {
-        // Mix the byte array to have a new encodedName
-        fakeEncodedNameAsBytes[i] = (byte) (encodedNameAsBytes[i] + 1);
-      }
-
-      CompactionDescriptor compactionDescriptor = ServerProtobufUtil.toCompactionDescriptor(this.region
-        .getRegionInfo(), mismatchedRegionName ? fakeEncodedNameAsBytes : null, family,
-            storeFiles, Lists.newArrayList(newFile),
-            region.getRegionStorage().getStoreDir(Bytes.toString(family)));
-
-      WALUtil.writeCompactionMarker(region.getWAL(), this.region.getReplicationScope(),
-          this.region.getRegionInfo(), compactionDescriptor, region.getMVCC());
-
-      Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regiondir);
-
-      Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", 1000));
-      fs.create(recoveredEdits);
-      WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits);
-
-      long time = System.nanoTime();
-
-      writer.append(new WAL.Entry(new HLogKey(regionName, tableName, 10, time,
-          HConstants.DEFAULT_CLUSTER_ID), WALEdit.createCompaction(region.getRegionInfo(),
-          compactionDescriptor)));
-      writer.close();
-
-      // close the region now, and reopen again
-      region.getTableDesc();
-      region.getRegionInfo();
-      region.close();
-      try {
-        region = HRegion.openHRegion(region, null);
-      } catch (WrongRegionException wre) {
-        fail("Matching encoded region name should not have produced WrongRegionException");
-      }
-
-      // now check whether we have only one store file, the compacted one
-      Collection<StoreFile> sfs = region.getStore(family).getStorefiles();
-      for (StoreFile sf : sfs) {
-        LOG.info(sf.getPath());
-      }
-      if (!mismatchedRegionName) {
-        assertEquals(1, region.getStore(family).getStorefilesCount());
-      }
-      files = FSUtils.listStatus(fs, tmpDir);
-      assertTrue("Expected to find 0 files inside " + tmpDir, files == null || files.length == 0);
-
-      for (long i = minSeqId; i < maxSeqId; i++) {
-        Get get = new Get(Bytes.toBytes(i));
-        Result result = region.get(get);
-        byte[] value = result.getValue(family, Bytes.toBytes(i));
-        assertArrayEquals(Bytes.toBytes(i), value);
-      }
-    } finally {
-      HBaseTestingUtility.closeRegionAndWAL(this.region);
-      this.region = null;
-      wals.close();
-    }
-  }
-
-  @Test
-  public void testFlushMarkers() throws Exception {
-    // tests that flush markers are written to WAL and handled at recovered edits
-    String method = name.getMethodName();
-    TableName tableName = TableName.valueOf(method);
-    byte[] family = Bytes.toBytes("family");
-    Path logDir = TEST_UTIL.getDataTestDirOnTestFS(method + ".log");
-    final Configuration walConf = new Configuration(TEST_UTIL.getConfiguration());
-    FSUtils.setRootDir(walConf, logDir);
-    final WALFactory wals = new WALFactory(walConf, null, method);
-    final WAL wal = wals.getWAL(tableName.getName(), tableName.getNamespace());
-
-    this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW,
-      HConstants.EMPTY_END_ROW, method, CONF, false, Durability.USE_DEFAULT, wal, family);
-    try {
-      Path regiondir = region.getRegionStorage().getRegionDir();
-      FileSystem fs = region.getRegionStorage().getFileSystem();
-      byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
-
-      long maxSeqId = 3;
-      long minSeqId = 0;
-
-      for (long i = minSeqId; i < maxSeqId; i++) {
-        Put put = new Put(Bytes.toBytes(i));
-        put.addColumn(family, Bytes.toBytes(i), Bytes.toBytes(i));
-        region.put(put);
-        region.flush(true);
-      }
-
-      // this will create a region with 3 files from flush
-      assertEquals(3, region.getStore(family).getStorefilesCount());
-      List<String> storeFiles = new ArrayList<String>(3);
-      for (StoreFile sf : region.getStore(family).getStorefiles()) {
-        storeFiles.add(sf.getPath().getName());
-      }
-
-      // now verify that the flush markers are written
-      wal.shutdown();
-      WAL.Reader reader = WALFactory.createReader(fs, AbstractFSWALProvider.getCurrentFileName(wal),
-        TEST_UTIL.getConfiguration());
-      try {
-        List<WAL.Entry> flushDescriptors = new ArrayList<WAL.Entry>();
-        long lastFlushSeqId = -1;
-        while (true) {
-          WAL.Entry entry = reader.next();
-          if (entry == null) {
-            break;
-          }
-          Cell cell = entry.getEdit().getCells().get(0);
-          if (WALEdit.isMetaEditFamily(cell)) {
-            FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(cell);
-            assertNotNull(flushDesc);
-            assertArrayEquals(tableName.getName(), flushDesc.getTableName().toByteArray());
-            if (flushDesc.getAction() == FlushAction.START_FLUSH) {
-              assertTrue(flushDesc.getFlushSequenceNumber() > lastFlushSeqId);
-            } else if (flushDesc.getAction() == FlushAction.COMMIT_FLUSH) {
-              assertTrue(flushDesc.getFlushSequenceNumber() == lastFlushSeqId);
-            }
-            lastFlushSeqId = flushDesc.getFlushSequenceNumber();
-            assertArrayEquals(regionName, flushDesc.getEncodedRegionName().toByteArray());
-            assertEquals(1, flushDesc.getStoreFlushesCount()); //only one store
-            StoreFlushDescriptor storeFlushDesc = flushDesc.getStoreFlushes(0);
-            assertArrayEquals(family, storeFlushDesc.getFamilyName().toByteArray());
-            assertEquals("family", storeFlushDesc.getStoreHomeDir());
-            if (flushDesc.getAction() == FlushAction.START_FLUSH) {
-              assertEquals(0, storeFlushDesc.getFlushOutputCount());
-            } else {
-              assertEquals(1, storeFlushDesc.getFlushOutputCount()); //only one file from flush
-              assertTrue(storeFiles.contains(storeFlushDesc.getFlushOutput(0)));
-            }
-
-            flushDescriptors.add(entry);
-          }
-        }
-
-        assertEquals(3 * 2, flushDescriptors.size()); // START_FLUSH and COMMIT_FLUSH per flush
-
-        // now write those markers to the recovered edits again.
-
-        Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regiondir);
-
-        Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", 1000));
-        fs.create(recoveredEdits);
-        WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits);
-
-        for (WAL.Entry entry : flushDescriptors) {
-          writer.append(entry);
-        }
-        writer.close();
-      } finally {
-        if (null != reader) {
-          try {
-            reader.close();
-          } catch (IOException exception) {
-            LOG.warn("Problem closing wal: " + exception.getMessage());
-            LOG.debug("exception details", exception);
-          }
-        }
-      }
-
-
-      // close the region now, and reopen again
-      region.close();
-      region = HRegion.openHRegion(region, null);
-
-      // now check whether we have can read back the data from region
-      for (long i = minSeqId; i < maxSeqId; i++) {
-        Get get = new Get(Bytes.toBytes(i));
-        Result result = region.get(get);
-        byte[] value = result.getValue(family, Bytes.toBytes(i));
-        assertArrayEquals(Bytes.toBytes(i), value);
-      }
-    } finally {
-      HBaseTestingUtility.closeRegionAndWAL(this.region);
-      this.region = null;
-      wals.close();
-    }
-  }
+//  @Test
+//  public void testSkipRecoveredEditsReplay() throws Exception {
+//    String method = "testSkipRecoveredEditsReplay";
+//    TableName tableName = TableName.valueOf(method);
+//    byte[] family = Bytes.toBytes("family");
+//    this.region = initHRegion(tableName, method, CONF, family);
+//    final WALFactory wals = new WALFactory(CONF, null, method);
+//    try {
+//      Path regiondir = region.getRegionStorage().getRegionDir();
+//      FileSystem fs = region.getRegionStorage().getFileSystem();
+//      byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
+//
+//      Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regiondir);
+//
+//      long maxSeqId = 1050;
+//      long minSeqId = 1000;
+//
+//      for (long i = minSeqId; i <= maxSeqId; i += 10) {
+//        Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
+//        fs.create(recoveredEdits);
+//        WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits);
+//
+//        long time = System.nanoTime();
+//        WALEdit edit = new WALEdit();
+//        edit.add(new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes
+//            .toBytes(i)));
+//        writer.append(new WAL.Entry(new HLogKey(regionName, tableName, i, time,
+//            HConstants.DEFAULT_CLUSTER_ID), edit));
+//
+//        writer.close();
+//      }
+//      MonitoredTask status = TaskMonitor.get().createStatus(method);
+//      Map<byte[], Long> maxSeqIdInStores = new TreeMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
+//      for (Store store : region.getStores()) {
+//        maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), minSeqId - 1);
+//      }
+//      long seqId = region.replayRecoveredEditsIfAny(regiondir, maxSeqIdInStores, null, status);
+//      assertEquals(maxSeqId, seqId);
+//      region.getMVCC().advanceTo(seqId);
+//      Get get = new Get(row);
+//      Result result = region.get(get);
+//      for (long i = minSeqId; i <= maxSeqId; i += 10) {
+//        List<Cell> kvs = result.getColumnCells(family, Bytes.toBytes(i));
+//        assertEquals(1, kvs.size());
+//        assertArrayEquals(Bytes.toBytes(i), CellUtil.cloneValue(kvs.get(0)));
+//      }
+//    } finally {
+//      HBaseTestingUtility.closeRegionAndWAL(this.region);
+//      this.region = null;
+//      wals.close();
+//    }
+//  }
+
+//  @Test
+//  public void testSkipRecoveredEditsReplaySomeIgnored() throws Exception {
+//    String method = "testSkipRecoveredEditsReplaySomeIgnored";
+//    TableName tableName = TableName.valueOf(method);
+//    byte[] family = Bytes.toBytes("family");
+//    this.region = initHRegion(tableName, method, CONF, family);
+//    final WALFactory wals = new WALFactory(CONF, null, method);
+//    try {
+//      Path regiondir = region.getRegionStorage().getRegionDir();
+//      FileSystem fs = region.getRegionStorage().getFileSystem();
+//      byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
+//
+//      Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regiondir);
+//
+//      long maxSeqId = 1050;
+//      long minSeqId = 1000;
+//
+//      for (long i = minSeqId; i <= maxSeqId; i += 10) {
+//        Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
+//        fs.create(recoveredEdits);
+//        WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits);
+//
+//        long time = System.nanoTime();
+//        WALEdit edit = new WALEdit();
+//        edit.add(new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes
+//            .toBytes(i)));
+//        writer.append(new WAL.Entry(new HLogKey(regionName, tableName, i, time,
+//            HConstants.DEFAULT_CLUSTER_ID), edit));
+//
+//        writer.close();
+//      }
+//      long recoverSeqId = 1030;
+//      MonitoredTask status = TaskMonitor.get().createStatus(method);
+//      Map<byte[], Long> maxSeqIdInStores = new TreeMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
+//      for (Store store : region.getStores()) {
+//        maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), recoverSeqId - 1);
+//      }
+//      long seqId = region.replayRecoveredEditsIfAny(regiondir, maxSeqIdInStores, null, status);
+//      assertEquals(maxSeqId, seqId);
+//      region.getMVCC().advanceTo(seqId);
+//      Get get = new Get(row);
+//      Result result = region.get(get);
+//      for (long i = minSeqId; i <= maxSeqId; i += 10) {
+//        List<Cell> kvs = result.getColumnCells(family, Bytes.toBytes(i));
+//        if (i < recoverSeqId) {
+//          assertEquals(0, kvs.size());
+//        } else {
+//          assertEquals(1, kvs.size());
+//          assertArrayEquals(Bytes.toBytes(i), CellUtil.cloneValue(kvs.get(0)));
+//        }
+//      }
+//    } finally {
+//      HBaseTestingUtility.closeRegionAndWAL(this.region);
+//      this.region = null;
+//      wals.close();
+//    }
+//  }
+
+//  @Test
+//  public void testSkipRecoveredEditsReplayAllIgnored() throws Exception {
+//    byte[] family = Bytes.toBytes("family");
+//    this.region = initHRegion(tableName, method, CONF, family);
+//    try {
+//      Path regiondir = region.getRegionStorage().getRegionDir();
+//      FileSystem fs = region.getRegionStorage().getFileSystem();
+//
+//      Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regiondir);
+//      for (int i = 1000; i < 1050; i += 10) {
+//        Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
+//        FSDataOutputStream dos = fs.create(recoveredEdits);
+//        dos.writeInt(i);
+//        dos.close();
+//      }
+//      long minSeqId = 2000;
+//      Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", minSeqId - 1));
+//      FSDataOutputStream dos = fs.create(recoveredEdits);
+//      dos.close();
+//
+//      Map<byte[], Long> maxSeqIdInStores = new TreeMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
+//      for (Store store : region.getStores()) {
+//        maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), minSeqId);
+//      }
+//      long seqId = region.replayRecoveredEditsIfAny(regiondir, maxSeqIdInStores, null, null);
+//      assertEquals(minSeqId, seqId);
+//    } finally {
+//      HBaseTestingUtility.closeRegionAndWAL(this.region);
+//      this.region = null;
+//    }
+//  }
+
+//  @Test
+//  public void testSkipRecoveredEditsReplayTheLastFileIgnored() throws Exception {
+//    String method = "testSkipRecoveredEditsReplayTheLastFileIgnored";
+//    TableName tableName = TableName.valueOf(method);
+//    byte[] family = Bytes.toBytes("family");
+//    this.region = initHRegion(tableName, method, CONF, family);
+//    final WALFactory wals = new WALFactory(CONF, null, method);
+//    try {
+//      Path regiondir = region.getRegionStorage().getRegionDir();
+//      FileSystem fs = region.getRegionStorage().getFileSystem();
+//      byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
+//      byte[][] columns = region.getTableDesc().getFamiliesKeys().toArray(new byte[0][]);
+//
+//      assertEquals(0, region.getStoreFileList(columns).size());
+//
+//      Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regiondir);
+//
+//      long maxSeqId = 1050;
+//      long minSeqId = 1000;
+//
+//      for (long i = minSeqId; i <= maxSeqId; i += 10) {
+//        Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
+//        fs.create(recoveredEdits);
+//        WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits);
+//
+//        long time = System.nanoTime();
+//        WALEdit edit = null;
+//        if (i == maxSeqId) {
+//          edit = WALEdit.createCompaction(region.getRegionInfo(),
+//          CompactionDescriptor.newBuilder()
+//          .setTableName(ByteString.copyFrom(tableName.getName()))
+//          .setFamilyName(ByteString.copyFrom(regionName))
+//          .setEncodedRegionName(ByteString.copyFrom(regionName))
+//          .setStoreHomeDirBytes(ByteString.copyFrom(Bytes.toBytes(regiondir.toString())))
+//          .setRegionName(ByteString.copyFrom(region.getRegionInfo().getRegionName()))
+//          .build());
+//        } else {
+//          edit = new WALEdit();
+//          edit.add(new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes
+//            .toBytes(i)));
+//        }
+//        writer.append(new WAL.Entry(new HLogKey(regionName, tableName, i, time,
+//            HConstants.DEFAULT_CLUSTER_ID), edit));
+//        writer.close();
+//      }
+//
+//      long recoverSeqId = 1030;
+//      Map<byte[], Long> maxSeqIdInStores = new TreeMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
+//      MonitoredTask status = TaskMonitor.get().createStatus(method);
+//      for (Store store : region.getStores()) {
+//        maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), recoverSeqId - 1);
+//      }
+//      long seqId = region.replayRecoveredEditsIfAny(regiondir, maxSeqIdInStores, null, status);
+//      assertEquals(maxSeqId, seqId);
+//
+//      // assert that the files are flushed
+//      assertEquals(1, region.getStoreFileList(columns).size());
+//
+//    } finally {
+//      HBaseTestingUtility.closeRegionAndWAL(this.region);
+//      this.region = null;
+//      wals.close();
+//    }
+//  }
+
+//  @Test
+//  public void testRecoveredEditsReplayCompaction() throws Exception {
+//    testRecoveredEditsReplayCompaction(false);
+//    testRecoveredEditsReplayCompaction(true);
+//  }
+//  public void testRecoveredEditsReplayCompaction(boolean mismatchedRegionName) throws Exception {
+//    String method = name.getMethodName();
+//    TableName tableName = TableName.valueOf(method);
+//    byte[] family = Bytes.toBytes("family");
+//    this.region = initHRegion(tableName, method, CONF, family);
+//    final WALFactory wals = new WALFactory(CONF, null, method);
+//    try {
+//      Path regiondir = region.getRegionStorage().getRegionDir();
+//      FileSystem fs = region.getRegionStorage().getFileSystem();
+//      byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
+//
+//      long maxSeqId = 3;
+//      long minSeqId = 0;
+//
+//      for (long i = minSeqId; i < maxSeqId; i++) {
+//        Put put = new Put(Bytes.toBytes(i));
+//        put.addColumn(family, Bytes.toBytes(i), Bytes.toBytes(i));
+//        region.put(put);
+//        region.flush(true);
+//      }
+//
+//      // this will create a region with 3 files
+//      assertEquals(3, region.getStore(family).getStorefilesCount());
+//      List<Path> storeFiles = new ArrayList<Path>(3);
+//      for (StoreFile sf : region.getStore(family).getStorefiles()) {
+//        storeFiles.add(sf.getPath());
+//      }
+//
+//      // disable compaction completion
+//      CONF.setBoolean("hbase.hstore.compaction.complete", false);
+//      region.compactStores();
+//
+//      // ensure that nothing changed
+//      assertEquals(3, region.getStore(family).getStorefilesCount());
+//
+//      // now find the compacted file, and manually add it to the recovered edits
+//      Path tmpDir = region.getRegionStorage().getTempDir();
+//      FileStatus[] files = FSUtils.listStatus(fs, tmpDir);
+//      String errorMsg = "Expected to find 1 file in the region temp directory "
+//          + "from the compaction, could not find any";
+//      assertNotNull(errorMsg, files);
+//      assertEquals(errorMsg, 1, files.length);
+//      // move the file inside region dir
+//      Path newFile = region.getRegionStorage().commitStoreFile(Bytes.toString(family),
+//          files[0].getPath());
+//
+//      byte[] encodedNameAsBytes = this.region.getRegionInfo().getEncodedNameAsBytes();
+//      byte[] fakeEncodedNameAsBytes = new byte [encodedNameAsBytes.length];
+//      for (int i=0; i < encodedNameAsBytes.length; i++) {
+//        // Mix the byte array to have a new encodedName
+//        fakeEncodedNameAsBytes[i] = (byte) (encodedNameAsBytes[i] + 1);
+//      }
+//
+//      CompactionDescriptor compactionDescriptor = ServerProtobufUtil.toCompactionDescriptor(this.region
+//        .getRegionInfo(), mismatchedRegionName ? fakeEncodedNameAsBytes : null, family,
+//            storeFiles, Lists.newArrayList(newFile),
+//            region.getRegionStorage().getStoreDir(Bytes.toString(family)));
+//
+//      WALUtil.writeCompactionMarker(region.getWAL(), this.region.getReplicationScope(),
+//          this.region.getRegionInfo(), compactionDescriptor, region.getMVCC());
+//
+//      Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regiondir);
+//
+//      Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", 1000));
+//      fs.create(recoveredEdits);
+//      WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits);
+//
+//      long time = System.nanoTime();
+//
+//      writer.append(new WAL.Entry(new HLogKey(regionName, tableName, 10, time,
+//          HConstants.DEFAULT_CLUSTER_ID), WALEdit.createCompaction(region.getRegionInfo(),
+//          compactionDescriptor)));
+//      writer.close();
+//
+//      // close the region now, and reopen again
+//      region.getTableDesc();
+//      region.getRegionInfo();
+//      region.close();
+//      try {
+//        region = HRegion.openHRegion(region, null);
+//      } catch (WrongRegionException wre) {
+//        fail("Matching encoded region name should not have produced WrongRegionException");
+//      }
+//
+//      // now check whether we have only one store file, the compacted one
+//      Collection<StoreFile> sfs = region.getStore(family).getStorefiles();
+//      for (StoreFile sf : sfs) {
+//        LOG.info(sf.getPath());
+//      }
+//      if (!mismatchedRegionName) {
+//        assertEquals(1, region.getStore(family).getStorefilesCount());
+//      }
+//      files = FSUtils.listStatus(fs, tmpDir);
+//      assertTrue("Expected to find 0 files inside " + tmpDir, files == null || files.length == 0);
+//
+//      for (long i = minSeqId; i < maxSeqId; i++) {
+//        Get get = new Get(Bytes.toBytes(i));
+//        Result result = region.get(get);
+//        byte[] value = result.getValue(family, Bytes.toBytes(i));
+//        assertArrayEquals(Bytes.toBytes(i), value);
+//      }
+//    } finally {
+//      HBaseTestingUtility.closeRegionAndWAL(this.region);
+//      this.region = null;
+//      wals.close();
+//    }
+//  }
+
+//  @Test
+//  public void testFlushMarkers() throws Exception {
+//    // tests that flush markers are written to WAL and handled at recovered edits
+//    String method = name.getMethodName();
+//    TableName tableName = TableName.valueOf(method);
+//    byte[] family = Bytes.toBytes("family");
+//    Path logDir = TEST_UTIL.getDataTestDirOnTestFS(method + ".log");
+//    final Configuration walConf = new Configuration(TEST_UTIL.getConfiguration());
+//    FSUtils.setRootDir(walConf, logDir);
+//    final WALFactory wals = new WALFactory(walConf, null, method);
+//    final WAL wal = wals.getWAL(tableName.getName(), tableName.getNamespace());
+//
+//    this.region = initHRegion(tableName, HConstants.EMPTY_START_ROW,
+//      HConstants.EMPTY_END_ROW, method, CONF, false, Durability.USE_DEFAULT, wal, family);
+//    try {
+//      Path regiondir = region.getRegionStorage().getRegionDir();
+//      FileSystem fs = region.getRegionStorage().getFileSystem();
+//      byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
+//
+//      long maxSeqId = 3;
+//      long minSeqId = 0;
+//
+//      for (long i = minSeqId; i < maxSeqId; i++) {
+//        Put put = new Put(Bytes.toBytes(i));
+//        put.addColumn(family, Bytes.toBytes(i), Bytes.toBytes(i));
+//        region.put(put);
+//        region.flush(true);
+//      }
+//
+//      // this will create a region with 3 files from flush
+//      assertEquals(3, region.getStore(family).getStorefilesCount());
+//      List<String> storeFiles = new ArrayList<String>(3);
+//      for (StoreFile sf : region.getStore(family).getStorefiles()) {
+//        storeFiles.add(sf.getPath().getName());
+//      }
+//
+//      // now verify that the flush markers are written
+//      wal.shutdown();
+//      WAL.Reader reader = WALFactory.createReader(fs, AbstractFSWALProvider.getCurrentFileName(wal),
+//        TEST_UTIL.getConfiguration());
+//      try {
+//        List<WAL.Entry> flushDescriptors = new ArrayList<WAL.Entry>();
+//        long lastFlushSeqId = -1;
+//        while (true) {
+//          WAL.Entry entry = reader.next();
+//          if (entry == null) {
+//            break;
+//          }
+//          Cell cell = entry.getEdit().getCells().get(0);
+//          if (WALEdit.isMetaEditFamily(cell)) {
+//            FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(cell);
+//            assertNotNull(flushDesc);
+//            assertArrayEquals(tableName.getName(), flushDesc.getTableName().toByteArray());
+//            if (flushDesc.getAction() == FlushAction.START_FLUSH) {
+//              assertTrue(flushDesc.getFlushSequenceNumber() > lastFlushSeqId);
+//            } else if (flushDesc.getAction() == FlushAction.COMMIT_FLUSH) {
+//              assertTrue(flushDesc.getFlushSequenceNumber() == lastFlushSeqId);
+//            }
+//            lastFlushSeqId = flushDesc.getFlushSequenceNumber();
+//            assertArrayEquals(regionName, flushDesc.getEncodedRegionName().toByteArray());
+//            assertEquals(1, flushDesc.getStoreFlushesCount()); //only one store
+//            StoreFlushDescriptor storeFlushDesc = flushDesc.getStoreFlushes(0);
+//            assertArrayEquals(family, storeFlushDesc.getFamilyName().toByteArray());
+//            assertEquals("family", storeFlushDesc.getStoreHomeDir());
+//            if (flushDesc.getAction() == FlushAction.START_FLUSH) {
+//              assertEquals(0, storeFlushDesc.getFlushOutputCount());
+//            } else {
+//              assertEquals(1, storeFlushDesc.getFlushOutputCount()); //only one file from flush
+//              assertTrue(storeFiles.contains(storeFlushDesc.getFlushOutput(0)));
+//            }
+//
+//            flushDescriptors.add(entry);
+//          }
+//        }
+//
+//        assertEquals(3 * 2, flushDescriptors.size()); // START_FLUSH and COMMIT_FLUSH per flush
+//
+//        // now write those markers to the recovered edits again.
+//
+//        Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regiondir);
+//
+//        Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", 1000));
+//        fs.create(recoveredEdits);
+//        WALProvider.Writer writer = wals.createRecoveredEditsWriter(fs, recoveredEdits);
+//
+//        for (WAL.Entry entry : flushDescriptors) {
+//          writer.append(entry);
+//        }
+//        writer.close();
+//      } finally {
+//        if (null != reader) {
+//          try {
+//            reader.close();
+//          } catch (IOException exception) {
+//            LOG.warn("Problem closing wal: " + exception.getMessage());
+//            LOG.debug("exception details", exception);
+//          }
+//        }
+//      }
+//
+//
+//      // close the region now, and reopen again
+//      region.close();
+//      region = HRegion.openHRegion(region, null);
+//
+//      // now check whether we have can read back the data from region
+//      for (long i = minSeqId; i < maxSeqId; i++) {
+//        Get get = new Get(Bytes.toBytes(i));
+//        Result result = region.get(get);
+//        byte[] value = result.getValue(family, Bytes.toBytes(i));
+//        assertArrayEquals(Bytes.toBytes(i), value);
+//      }
+//    } finally {
+//      HBaseTestingUtility.closeRegionAndWAL(this.region);
+//      this.region = null;
+//      wals.close();
+//    }
+//  }
 
   class IsFlushWALMarker extends ArgumentMatcher<WALEdit> {
     volatile FlushAction[] actions;
@@ -2615,55 +2615,55 @@ public class TestHRegion {
     }
   }
 
-  // ////////////////////////////////////////////////////////////////////////////
-  // Merge test
-  // ////////////////////////////////////////////////////////////////////////////
-  @Test
-  public void testMerge() throws IOException {
-    byte[][] families = { fam1, fam2, fam3 };
-    Configuration hc = initSplit();
-    // Setting up region
-    String method = this.getName();
-    this.region = initHRegion(tableName, method, hc, families);
-    try {
-      LOG.info("" + HBaseTestCase.addContent(region, fam3));
-      region.flush(true);
-      region.compactStores();
-      byte[] splitRow = region.checkSplit();
-      assertNotNull(splitRow);
-      LOG.info("SplitRow: " + Bytes.toString(splitRow));
-      HRegion[] subregions = splitRegion(region, splitRow);
-      try {
-        // Need to open the regions.
-        for (int i = 0; i < subregions.length; i++) {
-          HRegion.openHRegion(subregions[i], null);
-          subregions[i].compactStores();
-        }
-        Path oldRegionPath = region.getRegionStorage().getRegionDir();
-        Path oldRegion1 = subregions[0].getRegionStorage().getRegionDir();
-        Path oldRegion2 = subregions[1].getRegionStorage().getRegionDir();
-        long startTime = System.currentTimeMillis();
-        region = HRegion.mergeAdjacent(subregions[0], subregions[1]);
-        LOG.info("Merge regions elapsed time: "
-            + ((System.currentTimeMillis() - startTime) / 1000.0));
-        FILESYSTEM.delete(oldRegion1, true);
-        FILESYSTEM.delete(oldRegion2, true);
-        FILESYSTEM.delete(oldRegionPath, true);
-        LOG.info("splitAndMerge completed.");
-      } finally {
-        for (int i = 0; i < subregions.length; i++) {
-          try {
-            HBaseTestingUtility.closeRegionAndWAL(subregions[i]);
-          } catch (IOException e) {
-            // Ignore.
-          }
-        }
-      }
-    } finally {
-      HBaseTestingUtility.closeRegionAndWAL(this.region);
-      this.region = null;
-    }
-  }
+//  // ////////////////////////////////////////////////////////////////////////////
+//  // Merge test
+//  // ////////////////////////////////////////////////////////////////////////////
+//  @Test
+//  public void testMerge() throws IOException {
+//    byte[][] families = { fam1, fam2, fam3 };
+//    Configuration hc = initSplit();
+//    // Setting up region
+//    String method = this.getName();
+//    this.region = initHRegion(tableName, method, hc, families);
+//    try {
+//      LOG.info("" + HBaseTestCase.addContent(region, fam3));
+//      region.flush(true);
+//      region.compactStores();
+//      byte[] splitRow = region.checkSplit();
+//      assertNotNull(splitRow);
+//      LOG.info("SplitRow: " + Bytes.toString(splitRow));
+//      HRegion[] subregions = splitRegion(region, splitRow);
+//      try {
+//        // Need to open the regions.
+//        for (int i = 0; i < subregions.length; i++) {
+//          HRegion.openHRegion(subregions[i], null);
+//          subregions[i].compactStores();
+//        }
+//        Path oldRegionPath = region.getRegionStorage().getRegionDir();
+//        Path oldRegion1 = subregions[0].getRegionStorage().getRegionDir();
+//        Path oldRegion2 = subregions[1].getRegionStorage().getRegionDir();
+//        long startTime = System.currentTimeMillis();
+//        region = HRegion.mergeAdjacent(subregions[0], subregions[1]);
+//        LOG.info("Merge regions elapsed time: "
+//            + ((System.currentTimeMillis() - startTime) / 1000.0));
+//        FILESYSTEM.delete(oldRegion1, true);
+//        FILESYSTEM.delete(oldRegion2, true);
+//        FILESYSTEM.delete(oldRegionPath, true);
+//        LOG.info("splitAndMerge completed.");
+//      } finally {
+//        for (int i = 0; i < subregions.length; i++) {
+//          try {
+//            HBaseTestingUtility.closeRegionAndWAL(subregions[i]);
+//          } catch (IOException e) {
+//            // Ignore.
+//          }
+//        }
+//      }
+//    } finally {
+//      HBaseTestingUtility.closeRegionAndWAL(this.region);
+//      this.region = null;
+//    }
+//  }
 
   /**
    * @param parent
@@ -4456,91 +4456,91 @@ public class TestHRegion {
     }
   }
 
-  /**
-   * Testcase to check state of region initialization task set to ABORTED or not
-   * if any exceptions during initialization
-   *
-   * @throws Exception
-   */
-  @Test
-  public void testStatusSettingToAbortIfAnyExceptionDuringRegionInitilization() throws Exception {
-    TableName tableName = TableName.valueOf(name.getMethodName());
-    HRegionInfo info = null;
-    try {
-      FileSystem fs = Mockito.mock(FileSystem.class);
-      Mockito.when(fs.exists((Path) Mockito.anyObject())).thenThrow(new IOException());
-      HTableDescriptor htd = new HTableDescriptor(tableName);
-      htd.addFamily(new HColumnDescriptor("cf"));
-      info = new HRegionInfo(htd.getTableName(), HConstants.EMPTY_BYTE_ARRAY,
-          HConstants.EMPTY_BYTE_ARRAY, false);
-      Path path = new Path(dir + "testStatusSettingToAbortIfAnyExceptionDuringRegionInitilization");
-      region = HRegion.newHRegion(CONF, fs, path, htd, info, null, null);
-      // region initialization throws IOException and set task state to ABORTED.
-      region.initialize();
-      fail("Region initialization should fail due to IOException");
-    } catch (IOException io) {
-      List<MonitoredTask> tasks = TaskMonitor.get().getTasks();
-      for (MonitoredTask monitoredTask : tasks) {
-        if (!(monitoredTask instanceof MonitoredRPCHandler)
-            && monitoredTask.getDescription().contains(region.toString())) {
-          assertTrue("Region state should be ABORTED.",
-              monitoredTask.getState().equals(MonitoredTask.State.ABORTED));
-          break;
-        }
-      }
-    } finally {
-      HBaseTestingUtility.closeRegionAndWAL(region);
-    }
-  }
-
-  /**
-   * Verifies that the .regioninfo file is written on region creation and that
-   * is recreated if missing during region opening.
-   */
-  @Test
-  public void testRegionInfoFileCreation() throws IOException {
-    Path rootDir = new Path(dir + "testRegionInfoFileCreation");
-
-    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testtb"));
-    htd.addFamily(new HColumnDescriptor("cf"));
-
-    HRegionInfo hri = new HRegionInfo(htd.getTableName());
-
-    // Create a region and skip the initialization (like CreateTableHandler)
-    HRegion region = HBaseTestingUtility.createRegionAndWAL(hri, rootDir, CONF, htd, false);
-    Path regionDir = region.getRegionStorage().getRegionDir();
-    FileSystem fs = region.getRegionStorage().getFileSystem();
-    HBaseTestingUtility.closeRegionAndWAL(region);
-
-    Path regionInfoFile = LegacyLayout.getRegionInfoFile(regionDir);
-
-    // Verify that the .regioninfo file is present
-    assertTrue(LegacyLayout.REGION_INFO_FILE + " should be present in the region dir",
-        fs.exists(regionInfoFile));
-
-    // Try to open the region
-    region = HRegion.openHRegion(rootDir, hri, htd, null, CONF);
-    assertEquals(regionDir, region.getRegionStorage().getRegionDir());
-    HBaseTestingUtility.closeRegionAndWAL(region);
-
-    // Verify that the .regioninfo file is still there
-    assertTrue(LegacyLayout.REGION_INFO_FILE + " should be present in the region dir",
-        fs.exists(regionInfoFile));
-
-    // Remove the .regioninfo file and verify is recreated on region open
-    fs.delete(regionInfoFile, true);
-    assertFalse(LegacyLayout.REGION_INFO_FILE + " should be removed from the region dir",
-        fs.exists(regionInfoFile));
-
-    region = HRegion.openHRegion(rootDir, hri, htd, null, CONF);
-//    region = TEST_UTIL.openHRegion(hri, htd);
-    assertEquals(regionDir, region.getRegionStorage().getRegionDir());
-    HBaseTestingUtility.closeRegionAndWAL(region);
-
-    // Verify that the .regioninfo file is still there
-    assertTrue(LegacyLayout.REGION_INFO_FILE + " should be present in the region dir",
-        fs.exists(new Path(regionDir, LegacyLayout.REGION_INFO_FILE)));
-  }
+//  /**
+//   * Testcase to check state of region initialization task set to ABORTED or not
+//   * if any exceptions during initialization
+//   *
+//   * @throws Exception
+//   */
+//  @Test
+//  public void testStatusSettingToAbortIfAnyExceptionDuringRegionInitilization() throws Exception {
+//    TableName tableName = TableName.valueOf(name.getMethodName());
+//    HRegionInfo info = null;
+//    try {
+//      FileSystem fs = Mockito.mock(FileSystem.class);
+//      Mockito.when(fs.exists((Path) Mockito.anyObject())).thenThrow(new IOException());
+//      HTableDescriptor htd = new HTableDescriptor(tableName);
+//      htd.addFamily(new HColumnDescriptor("cf"));
+//      info = new HRegionInfo(htd.getTableName(), HConstants.EMPTY_BYTE_ARRAY,
+//          HConstants.EMPTY_BYTE_ARRAY, false);
+//      Path path = new Path(dir + "testStatusSettingToAbortIfAnyExceptionDuringRegionInitilization");
+//      region = HRegion.newHRegion(CONF, fs, path, htd, info, null, null);
+//      // region initialization throws IOException and set task state to ABORTED.
+//      region.initialize();
+//      fail("Region initialization should fail due to IOException");
+//    } catch (IOException io) {
+//      List<MonitoredTask> tasks = TaskMonitor.get().getTasks();
+//      for (MonitoredTask monitoredTask : tasks) {
+//        if (!(monitoredTask instanceof MonitoredRPCHandler)
+//            && monitoredTask.getDescription().contains(region.toString())) {
+//          assertTrue("Region state should be ABORTED.",
+//              monitoredTask.getState().equals(MonitoredTask.State.ABORTED));
+//          break;
+//        }
+//      }
+//    } finally {
+//      HBaseTestingUtility.closeRegionAndWAL(region);
+//    }
+//  }
+
+//  /**
+//   * Verifies that the .regioninfo file is written on region creation and that
+//   * is recreated if missing during region opening.
+//   */
+//  @Test
+//  public void testRegionInfoFileCreation() throws IOException {
+//    Path rootDir = new Path(dir + "testRegionInfoFileCreation");
+//
+//    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testtb"));
+//    htd.addFamily(new HColumnDescriptor("cf"));
+//
+//    HRegionInfo hri = new HRegionInfo(htd.getTableName());
+//
+//    // Create a region and skip the initialization (like CreateTableHandler)
+//    HRegion region = HBaseTestingUtility.createRegionAndWAL(hri, rootDir, CONF, htd, false);
+//    Path regionDir = region.getRegionStorage().getRegionDir();
+//    FileSystem fs = region.getRegionStorage().getFileSystem();
+//    HBaseTestingUtility.closeRegionAndWAL(region);
+//
+//    Path regionInfoFile = LegacyLayout.getRegionInfoFile(regionDir);
+//
+//    // Verify that the .regioninfo file is present
+//    assertTrue(LegacyLayout.REGION_INFO_FILE + " should be present in the region dir",
+//        fs.exists(regionInfoFile));
+//
+//    // Try to open the region
+//    region = HRegion.openHRegion(rootDir, hri, htd, null, CONF);
+//    assertEquals(regionDir, region.getRegionStorage().getRegionDir());
+//    HBaseTestingUtility.closeRegionAndWAL(region);
+//
+//    // Verify that the .regioninfo file is still there
+//    assertTrue(LegacyLayout.REGION_INFO_FILE + " should be present in the region dir",
+//        fs.exists(regionInfoFile));
+//
+//    // Remove the .regioninfo file and verify is recreated on region open
+//    fs.delete(regionInfoFile, true);
+//    assertFalse(LegacyLayout.REGION_INFO_FILE + " should be removed from the region dir",
+//        fs.exists(regionInfoFile));
+//
+//    region = HRegion.openHRegion(rootDir, hri, htd, null, CONF);
+////    region = TEST_UTIL.openHRegion(hri, htd);
+//    assertEquals(regionDir, region.getRegionStorage().getRegionDir());
+//    HBaseTestingUtility.closeRegionAndWAL(region);
+//
+//    // Verify that the .regioninfo file is still there
+//    assertTrue(LegacyLayout.REGION_INFO_FILE + " should be present in the region dir",
+//        fs.exists(new Path(regionDir, LegacyLayout.REGION_INFO_FILE)));
+//  }
 
   /**
    * TestCase for increment
@@ -4888,110 +4888,110 @@ public class TestHRegion {
     this.region = null;
   }
 
-  @Test
-  public void testRegionReplicaSecondary() throws IOException {
-    // create a primary region, load some data and flush
-    // create a secondary region, and do a get against that
-    Path rootDir = new Path(dir + "testRegionReplicaSecondary");
-    FSUtils.setRootDir(TEST_UTIL.getConfiguration(), rootDir);
-
-    byte[][] families = new byte[][] {
-        Bytes.toBytes("cf1"), Bytes.toBytes("cf2"), Bytes.toBytes("cf3")
-    };
-    byte[] cq = Bytes.toBytes("cq");
-    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testRegionReplicaSecondary"));
-    for (byte[] family : families) {
-      htd.addFamily(new HColumnDescriptor(family));
-    }
-
-    long time = System.currentTimeMillis();
-    HRegionInfo primaryHri = new HRegionInfo(htd.getTableName(),
-      HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW,
-      false, time, 0);
-    HRegionInfo secondaryHri = new HRegionInfo(htd.getTableName(),
-      HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW,
-      false, time, 1);
-
-    HRegion primaryRegion = null, secondaryRegion = null;
-
-    try {
-      primaryRegion = HBaseTestingUtility.createRegionAndWAL(primaryHri,
-          rootDir, TEST_UTIL.getConfiguration(), htd);
-
-      // load some data
-      putData(primaryRegion, 0, 1000, cq, families);
-
-      // flush region
-      primaryRegion.flush(true);
-
-      // open secondary region
-      secondaryRegion = HRegion.openHRegion(rootDir, secondaryHri, htd, null, CONF);
-
-      verifyData(secondaryRegion, 0, 1000, cq, families);
-    } finally {
-      if (primaryRegion != null) {
-        HBaseTestingUtility.closeRegionAndWAL(primaryRegion);
-      }
-      if (secondaryRegion != null) {
-        HBaseTestingUtility.closeRegionAndWAL(secondaryRegion);
-      }
-    }
-  }
-
-  @Test
-  public void testRegionReplicaSecondaryIsReadOnly() throws IOException {
-    // create a primary region, load some data and flush
-    // create a secondary region, and do a put against that
-    Path rootDir = new Path(dir + "testRegionReplicaSecondary");
-    FSUtils.setRootDir(TEST_UTIL.getConfiguration(), rootDir);
-
-    byte[][] families = new byte[][] {
-        Bytes.toBytes("cf1"), Bytes.toBytes("cf2"), Bytes.toBytes("cf3")
-    };
-    byte[] cq = Bytes.toBytes("cq");
-    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testRegionReplicaSecondary"));
-    for (byte[] family : families) {
-      htd.addFamily(new HColumnDescriptor(family));
-    }
-
-    long time = System.currentTimeMillis();
-    HRegionInfo primaryHri = new HRegionInfo(htd.getTableName(),
-      HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW,
-      false, time, 0);
-    HRegionInfo secondaryHri = new HRegionInfo(htd.getTableName(),
-      HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW,
-      false, time, 1);
-
-    HRegion primaryRegion = null, secondaryRegion = null;
-
-    try {
-      primaryRegion = HBaseTestingUtility.createRegionAndWAL(primaryHri,
-          rootDir, TEST_UTIL.getConfiguration(), htd);
-
-      // load some data
-      putData(primaryRegion, 0, 1000, cq, families);
-
-      // flush region
-      primaryRegion.flush(true);
-
-      // open secondary region
-      secondaryRegion = HRegion.openHRegion(rootDir, secondaryHri, htd, null, CONF);
-
-      try {
-        putData(secondaryRegion, 0, 1000, cq, families);
-        fail("Should have thrown exception");
-      } catch (IOException ex) {
-        // expected
-      }
-    } finally {
-      if (primaryRegion != null) {
-        HBaseTestingUtility.closeRegionAndWAL(primaryRegion);
-      }
-      if (secondaryRegion != null) {
-        HBaseTestingUtility.closeRegionAndWAL(secondaryRegion);
-      }
-    }
-  }
+//  @Test
+//  public void testRegionReplicaSecondary() throws IOException {
+//    // create a primary region, load some data and flush
+//    // create a secondary region, and do a get against that
+//    Path rootDir = new Path(dir + "testRegionReplicaSecondary");
+//    FSUtils.setRootDir(TEST_UTIL.getConfiguration(), rootDir);
+//
+//    byte[][] families = new byte[][] {
+//        Bytes.toBytes("cf1"), Bytes.toBytes("cf2"), Bytes.toBytes("cf3")
+//    };
+//    byte[] cq = Bytes.toBytes("cq");
+//    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testRegionReplicaSecondary"));
+//    for (byte[] family : families) {
+//      htd.addFamily(new HColumnDescriptor(family));
+//    }
+//
+//    long time = System.currentTimeMillis();
+//    HRegionInfo primaryHri = new HRegionInfo(htd.getTableName(),
+//      HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW,
+//      false, time, 0);
+//    HRegionInfo secondaryHri = new HRegionInfo(htd.getTableName(),
+//      HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW,
+//      false, time, 1);
+//
+//    HRegion primaryRegion = null, secondaryRegion = null;
+//
+//    try {
+//      primaryRegion = HBaseTestingUtility.createRegionAndWAL(primaryHri,
+//          rootDir, TEST_UTIL.getConfiguration(), htd);
+//
+//      // load some data
+//      putData(primaryRegion, 0, 1000, cq, families);
+//
+//      // flush region
+//      primaryRegion.flush(true);
+//
+//      // open secondary region
+//      secondaryRegion = HRegion.openHRegion(rootDir, secondaryHri, htd, null, CONF);
+//
+//      verifyData(secondaryRegion, 0, 1000, cq, families);
+//    } finally {
+//      if (primaryRegion != null) {
+//        HBaseTestingUtility.closeRegionAndWAL(primaryRegion);
+//      }
+//      if (secondaryRegion != null) {
+//        HBaseTestingUtility.closeRegionAndWAL(secondaryRegion);
+//      }
+//    }
+//  }
+//
+//  @Test
+//  public void testRegionReplicaSecondaryIsReadOnly() throws IOException {
+//    // create a primary region, load some data and flush
+//    // create a secondary region, and do a put against that
+//    Path rootDir = new Path(dir + "testRegionReplicaSecondary");
+//    FSUtils.setRootDir(TEST_UTIL.getConfiguration(), rootDir);
+//
+//    byte[][] families = new byte[][] {
+//        Bytes.toBytes("cf1"), Bytes.toBytes("cf2"), Bytes.toBytes("cf3")
+//    };
+//    byte[] cq = Bytes.toBytes("cq");
+//    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testRegionReplicaSecondary"));
+//    for (byte[] family : families) {
+//      htd.addFamily(new HColumnDescriptor(family));
+//    }
+//
+//    long time = System.currentTimeMillis();
+//    HRegionInfo primaryHri = new HRegionInfo(htd.getTableName(),
+//      HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW,
+//      false, time, 0);
+//    HRegionInfo secondaryHri = new HRegionInfo(htd.getTableName(),
+//      HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW,
+//      false, time, 1);
+//
+//    HRegion primaryRegion = null, secondaryRegion = null;
+//
+//    try {
+//      primaryRegion = HBaseTestingUtility.createRegionAndWAL(primaryHri,
+//          rootDir, TEST_UTIL.getConfiguration(), htd);
+//
+//      // load some data
+//      putData(primaryRegion, 0, 1000, cq, families);
+//
+//      // flush region
+//      primaryRegion.flush(true);
+//
+//      // open secondary region
+//      secondaryRegion = HRegion.openHRegion(rootDir, secondaryHri, htd, null, CONF);
+//
+//      try {
+//        putData(secondaryRegion, 0, 1000, cq, families);
+//        fail("Should have thrown exception");
+//      } catch (IOException ex) {
+//        // expected
+//      }
+//    } finally {
+//      if (primaryRegion != null) {
+//        HBaseTestingUtility.closeRegionAndWAL(primaryRegion);
+//      }
+//      if (secondaryRegion != null) {
+//        HBaseTestingUtility.closeRegionAndWAL(secondaryRegion);
+//      }
+//    }
+//  }
 
   static WALFactory createWALFactory(Configuration conf, Path rootDir) throws IOException {
     Configuration confForWAL = new Configuration(conf);
@@ -5001,60 +5001,60 @@ public class TestHRegion {
         "hregion-" + RandomStringUtils.randomNumeric(8));
   }
 
-  @Test
-  public void testCompactionFromPrimary() throws IOException {
-    Path rootDir = new Path(dir + "testRegionReplicaSecondary");
-    FSUtils.setRootDir(TEST_UTIL.getConfiguration(), rootDir);
-
-    byte[][] families = new byte[][] {
-        Bytes.toBytes("cf1"), Bytes.toBytes("cf2"), Bytes.toBytes("cf3")
-    };
-    byte[] cq = Bytes.toBytes("cq");
-    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testRegionReplicaSecondary"));
-    for (byte[] family : families) {
-      htd.addFamily(new HColumnDescriptor(family));
-    }
-
-    long time = System.currentTimeMillis();
-    HRegionInfo primaryHri = new HRegionInfo(htd.getTableName(),
-      HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW,
-      false, time, 0);
-    HRegionInfo secondaryHri = new HRegionInfo(htd.getTableName(),
-      HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW,
-      false, time, 1);
-
-    HRegion primaryRegion = null, secondaryRegion = null;
-
-    try {
-      primaryRegion = HBaseTestingUtility.createRegionAndWAL(primaryHri,
-          rootDir, TEST_UTIL.getConfiguration(), htd);
-
-      // load some data
-      putData(primaryRegion, 0, 1000, cq, families);
-
-      // flush region
-      primaryRegion.flush(true);
-
-      // open secondary region
-      secondaryRegion = HRegion.openHRegion(rootDir, secondaryHri, htd, null, CONF);
-
-      // move the file of the primary region to the archive, simulating a compaction
-      Collection<StoreFile> storeFiles = primaryRegion.getStore(families[0]).getStorefiles();
-      primaryRegion.getRegionStorage().removeStoreFiles(Bytes.toString(families[0]), storeFiles);
-      Collection<StoreFileInfo> storeFileInfos = primaryRegion.getRegionStorage()
-          .getStoreFiles(families[0]);
-      Assert.assertTrue(storeFileInfos == null || storeFileInfos.size() == 0);
-
-      verifyData(secondaryRegion, 0, 1000, cq, families);
-    } finally {
-      if (primaryRegion != null) {
-        HBaseTestingUtility.closeRegionAndWAL(primaryRegion);
-      }
-      if (secondaryRegion != null) {
-        HBaseTestingUtility.closeRegionAndWAL(secondaryRegion);
-      }
-    }
-  }
+//  @Test
+//  public void testCompactionFromPrimary() throws IOException {
+//    Path rootDir = new Path(dir + "testRegionReplicaSecondary");
+//    FSUtils.setRootDir(TEST_UTIL.getConfiguration(), rootDir);
+//
+//    byte[][] families = new byte[][] {
+//        Bytes.toBytes("cf1"), Bytes.toBytes("cf2"), Bytes.toBytes("cf3")
+//    };
+//    byte[] cq = Bytes.toBytes("cq");
+//    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testRegionReplicaSecondary"));
+//    for (byte[] family : families) {
+//      htd.addFamily(new HColumnDescriptor(family));
+//    }
+//
+//    long time = System.currentTimeMillis();
+//    HRegionInfo primaryHri = new HRegionInfo(htd.getTableName(),
+//      HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW,
+//      false, time, 0);
+//    HRegionInfo secondaryHri = new HRegionInfo(htd.getTableName(),
+//      HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW,
+//      false, time, 1);
+//
+//    HRegion primaryRegion = null, secondaryRegion = null;
+//
+//    try {
+//      primaryRegion = HBaseTestingUtility.createRegionAndWAL(primaryHri,
+//          rootDir, TEST_UTIL.getConfiguration(), htd);
+//
+//      // load some data
+//      putData(primaryRegion, 0, 1000, cq, families);
+//
+//      // flush region
+//      primaryRegion.flush(true);
+//
+//      // open secondary region
+//      secondaryRegion = HRegion.openHRegion(rootDir, secondaryHri, htd, null, CONF);
+//
+//      // move the file of the primary region to the archive, simulating a compaction
+//      Collection<StoreFile> storeFiles = primaryRegion.getStore(families[0]).getStorefiles();
+//      primaryRegion.getRegionStorage().removeStoreFiles(Bytes.toString(families[0]), storeFiles);
+//      Collection<StoreFileInfo> storeFileInfos = primaryRegion.getRegionStorage()
+//          .getStoreFiles(families[0]);
+//      Assert.assertTrue(storeFileInfos == null || storeFileInfos.size() == 0);
+//
+//      verifyData(secondaryRegion, 0, 1000, cq, families);
+//    } finally {
+//      if (primaryRegion != null) {
+//        HBaseTestingUtility.closeRegionAndWAL(primaryRegion);
+//      }
+//      if (secondaryRegion != null) {
+//        HBaseTestingUtility.closeRegionAndWAL(secondaryRegion);
+//      }
+//    }
+//  }
 
   private void putData(int startRow, int numRows, byte[] qf, byte[]... families) throws
       IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/d6ef946f/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java
deleted file mode 100644
index 808029c..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java
+++ /dev/null
@@ -1,230 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.regionserver;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.net.URI;
-import java.util.Collection;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.fs.RegionStorage;
-import org.apache.hadoop.hbase.fs.FSUtilsWithRetries;
-import org.apache.hadoop.hbase.testclassification.RegionServerTests;
-import org.apache.hadoop.hbase.testclassification.SmallTests;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.util.Progressable;
-
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-@Category({RegionServerTests.class, SmallTests.class})
-public class TestHRegionStorage {
-  private static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-  private static final Log LOG = LogFactory.getLog(TestHRegionStorage.class);
-
-  @Test
-  public void testOnDiskRegionCreation() throws IOException {
-    Path rootDir = TEST_UTIL.getDataTestDirOnTestFS("testOnDiskRegionCreation");
-    FileSystem fs = TEST_UTIL.getTestFileSystem();
-    Configuration conf = TEST_UTIL.getConfiguration();
-
-    // Create a Region
-    HRegionInfo hri = new HRegionInfo(TableName.valueOf("TestTable"));
-    RegionStorage regionFs = RegionStorage.open(conf, fs, rootDir, hri, true);
-
-    // Verify if the region is on disk
-    Path regionDir = regionFs.getRegionDir();
-    assertTrue("The region folder should be created", fs.exists(regionDir));
-
-    // Verify the .regioninfo
-    HRegionInfo hriVerify = RegionStorage.open(conf, regionDir, false).getRegionInfo();
-    assertEquals(hri, hriVerify);
-
-    // Open the region
-    regionFs = RegionStorage.open(conf, fs, rootDir, hri, false);
-    assertEquals(regionDir, regionFs.getRegionDir());
-
-    // Delete the region
-    RegionStorage.destroy(conf, fs, rootDir, hri);
-    assertFalse("The region folder should be removed", fs.exists(regionDir));
-
-    fs.delete(rootDir, true);
-  }
-
-  @Test
-  public void testNonIdempotentOpsWithRetries() throws IOException {
-    Path rootDir = TEST_UTIL.getDataTestDirOnTestFS("testOnDiskRegionCreation");
-    FileSystem fs = TEST_UTIL.getTestFileSystem();
-    Configuration conf = TEST_UTIL.getConfiguration();
-
-    FSUtilsWithRetries regionFs = new FSUtilsWithRetries(conf, new MockFileSystemForCreate());
-    boolean result = regionFs.createDir(new Path("/foo/bar"));
-    assertTrue("Couldn't create the directory", result);
-
-    regionFs = new FSUtilsWithRetries(conf, new MockFileSystem());
-    result = regionFs.rename(new Path("/foo/bar"), new Path("/foo/bar2"));
-    assertTrue("Couldn't rename the directory", result);
-
-    regionFs = new FSUtilsWithRetries(conf, new MockFileSystem());
-    result = regionFs.deleteDir(new Path("/foo/bar"));
-    assertTrue("Couldn't delete the directory", result);
-    fs.delete(rootDir, true);
-  }
-
-  static class MockFileSystemForCreate extends MockFileSystem {
-    @Override
-    public boolean exists(Path path) {
-      return false;
-    }
-  }
-
-  /**
-   * a mock fs which throws exception for first 3 times, and then process the call (returns the
-   * excepted result).
-   */
-  static class MockFileSystem extends FileSystem {
-    int retryCount;
-    final static int successRetryCount = 3;
-
-    public MockFileSystem() {
-      retryCount = 0;
-    }
-
-    @Override
-    public FSDataOutputStream append(Path arg0, int arg1, Progressable arg2) throws IOException {
-      throw new IOException("");
-    }
-
-    @Override
-    public FSDataOutputStream create(Path arg0, FsPermission arg1, boolean arg2, int arg3,
-        short arg4, long arg5, Progressable arg6) throws IOException {
-      LOG.debug("Create, " + retryCount);
-      if (retryCount++ < successRetryCount) throw new IOException("Something bad happen");
-      return null;
-    }
-
-    @Override
-    public boolean delete(Path arg0) throws IOException {
-      if (retryCount++ < successRetryCount) throw new IOException("Something bad happen");
-      return true;
-    }
-
-    @Override
-    public boolean delete(Path arg0, boolean arg1) throws IOException {
-      if (retryCount++ < successRetryCount) throw new IOException("Something bad happen");
-      return true;
-    }
-
-    @Override
-    public FileStatus getFileStatus(Path arg0) throws IOException {
-      FileStatus fs = new FileStatus();
-      return fs;
-    }
-
-    @Override
-    public boolean exists(Path path) {
-      return true;
-    }
-
-    @Override
-    public URI getUri() {
-      throw new RuntimeException("Something bad happen");
-    }
-
-    @Override
-    public Path getWorkingDirectory() {
-      throw new RuntimeException("Something bad happen");
-    }
-
-    @Override
-    public FileStatus[] listStatus(Path arg0) throws IOException {
-      throw new IOException("Something bad happen");
-    }
-
-    @Override
-    public boolean mkdirs(Path arg0, FsPermission arg1) throws IOException {
-      LOG.debug("mkdirs, " + retryCount);
-      if (retryCount++ < successRetryCount) throw new IOException("Something bad happen");
-      return true;
-    }
-
-    @Override
-    public FSDataInputStream open(Path arg0, int arg1) throws IOException {
-      throw new IOException("Something bad happen");
-    }
-
-    @Override
-    public boolean rename(Path arg0, Path arg1) throws IOException {
-      LOG.debug("rename, " + retryCount);
-      if (retryCount++ < successRetryCount) throw new IOException("Something bad happen");
-      return true;
-    }
-
-    @Override
-    public void setWorkingDirectory(Path arg0) {
-      throw new RuntimeException("Something bad happen");
-    }
-  }
-
-  @Test
-  public void testTempAndCommit() throws IOException {
-    Path rootDir = TEST_UTIL.getDataTestDirOnTestFS("testTempAndCommit");
-    FileSystem fs = TEST_UTIL.getTestFileSystem();
-    Configuration conf = TEST_UTIL.getConfiguration();
-
-    // Create a Region
-    String familyName = "cf";
-    HRegionInfo hri = new HRegionInfo(TableName.valueOf("TestTable"));
-    RegionStorage regionFs = RegionStorage.open(conf, fs, rootDir, hri, true);
-
-    // New region, no store files
-    Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(familyName);
-    assertEquals(0, storeFiles != null ? storeFiles.size() : 0);
-
-    // Create a new file in temp (no files in the family)
-    Path buildPath = regionFs.createTempName();
-    fs.createNewFile(buildPath);
-    storeFiles = regionFs.getStoreFiles(familyName);
-    assertEquals(0, storeFiles != null ? storeFiles.size() : 0);
-
-    // commit the file
-    Path dstPath = regionFs.commitStoreFile(familyName, buildPath);
-    storeFiles = regionFs.getStoreFiles(familyName);
-    assertEquals(0, storeFiles != null ? storeFiles.size() : 0);
-    assertFalse(fs.exists(buildPath));
-
-    fs.delete(rootDir, true);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/d6ef946f/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java
index a790116..84f458b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java
@@ -60,37 +60,37 @@ public class TestHRegionInfo {
     assertTrue(hri.equals(pbhri));
   }
 
-  @Test
-  public void testReadAndWriteHRegionInfoFile() throws IOException, InterruptedException {
-    HBaseTestingUtility htu = new HBaseTestingUtility();
-    HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO;
-    Path basedir = htu.getDataTestDir();
-    // Create a region.  That'll write the .regioninfo file.
-    FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(htu.getConfiguration());
-    HRegion r = HBaseTestingUtility.createRegionAndWAL(hri, basedir, htu.getConfiguration(),
-        fsTableDescriptors.get(TableName.META_TABLE_NAME));
-    // Get modtime on the file.
-    long modtime = getModTime(r);
-    HBaseTestingUtility.closeRegionAndWAL(r);
-    Thread.sleep(1001);
-    r = HRegion.openHRegion(basedir, hri, fsTableDescriptors.get(TableName.META_TABLE_NAME),
-        null, htu.getConfiguration());
-    // Ensure the file is not written for a second time.
-    long modtime2 = getModTime(r);
-    assertEquals(modtime, modtime2);
-    // Now load the file.
-    HRegionInfo deserializedHri = RegionStorage.open(r.getRegionStorage().getConfiguration(),
-        r.getRegionStorage.getRegionContainer(), false).getRegionInfo();
-    assertTrue(hri.equals(deserializedHri));
-    HBaseTestingUtility.closeRegionAndWAL(r);
-  }
-
-  long getModTime(final HRegion r) throws IOException {
-    FileStatus[] statuses = r.getRegionStorage().getFileSystem().listStatus(
-      LegacyLayout.getRegionInfoFile(r.getRegionStorage().getRegionDir()));
-    assertTrue(statuses != null && statuses.length == 1);
-    return statuses[0].getModificationTime();
-  }
+//  @Test
+//  public void testReadAndWriteHRegionInfoFile() throws IOException, InterruptedException {
+//    HBaseTestingUtility htu = new HBaseTestingUtility();
+//    HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO;
+//    Path basedir = htu.getDataTestDir();
+//    // Create a region.  That'll write the .regioninfo file.
+//    FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(htu.getConfiguration());
+//    HRegion r = HBaseTestingUtility.createRegionAndWAL(hri, basedir, htu.getConfiguration(),
+//        fsTableDescriptors.get(TableName.META_TABLE_NAME));
+//    // Get modtime on the file.
+//    long modtime = getModTime(r);
+//    HBaseTestingUtility.closeRegionAndWAL(r);
+//    Thread.sleep(1001);
+//    r = HRegion.openHRegion(basedir, hri, fsTableDescriptors.get(TableName.META_TABLE_NAME),
+//        null, htu.getConfiguration());
+//    // Ensure the file is not written for a second time.
+//    long modtime2 = getModTime(r);
+//    assertEquals(modtime, modtime2);
+//    // Now load the file.
+//    HRegionInfo deserializedHri = RegionStorage.open(r.getRegionStorage().getConfiguration(),
+//        r.getRegionStorage.getRegionContainer(), false).getRegionInfo();
+//    assertTrue(hri.equals(deserializedHri));
+//    HBaseTestingUtility.closeRegionAndWAL(r);
+//  }
+//
+//  long getModTime(final HRegion r) throws IOException {
+//    FileStatus[] statuses = r.getRegionStorage().getFileSystem().listStatus(
+//      LegacyLayout.getRegionInfoFile(r.getRegionStorage().getRegionDir()));
+//    assertTrue(statuses != null && statuses.length == 1);
+//    return statuses[0].getModificationTime();
+//  }
 
   @Test
   public void testCreateHRegionInfoName() throws Exception {

http://git-wip-us.apache.org/repos/asf/hbase/blob/d6ef946f/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
index 542fa7a..de561d5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
@@ -178,13 +178,13 @@ public class TestHRegionReplayEvents {
       string+"-"+string, 1);
     when(rss.getExecutorService()).thenReturn(es);
 
-    primaryRegion = HRegion.createHRegion(CONF, rootDir, htd, primaryHri, walPrimary);
+//    primaryRegion = HRegion.createHRegion(CONF, rootDir, htd, primaryHri, walPrimary);
     primaryRegion.close();
     List<Region> regions = new ArrayList<Region>();
     regions.add(primaryRegion);
     when(rss.getOnlineRegions()).thenReturn(regions);
 
-    primaryRegion = HRegion.openHRegion(rootDir, primaryHri, htd, walPrimary, CONF, rss, null);
+//    primaryRegion = HRegion.openHRegion(rootDir, primaryHri, htd, walPrimary, CONF, rss, null);
     secondaryRegion = HRegion.openHRegion(secondaryHri, htd, null, CONF, rss, null);
 
     reader = null;
@@ -824,7 +824,7 @@ public class TestHRegionReplayEvents {
 
     // close the region and open again.
     primaryRegion.close();
-    primaryRegion = HRegion.openHRegion(rootDir, primaryHri, htd, walPrimary, CONF, rss, null);
+//    primaryRegion = HRegion.openHRegion(rootDir, primaryHri, htd, walPrimary, CONF, rss, null);
 
     // now replay the edits and the flush marker
     reader =  createWALReaderForPrimary();
@@ -904,7 +904,7 @@ public class TestHRegionReplayEvents {
 
     // close the region and open again.
     primaryRegion.close();
-    primaryRegion = HRegion.openHRegion(rootDir, primaryHri, htd, walPrimary, CONF, rss, null);
+//    primaryRegion = HRegion.openHRegion(rootDir, primaryHri, htd, walPrimary, CONF, rss, null);
 
     // now replay the edits and the flush marker
     reader =  createWALReaderForPrimary();
@@ -983,7 +983,7 @@ public class TestHRegionReplayEvents {
 
     // close the region and open again.
     primaryRegion.close();
-    primaryRegion = HRegion.openHRegion(rootDir, primaryHri, htd, walPrimary, CONF, rss, null);
+//    primaryRegion = HRegion.openHRegion(rootDir, primaryHri, htd, walPrimary, CONF, rss, null);
 
     // now replay the edits and the flush marker
     reader =  createWALReaderForPrimary();
@@ -1327,7 +1327,7 @@ public class TestHRegionReplayEvents {
     disableReads(secondaryRegion);
 
     primaryRegion.close();
-    primaryRegion = HRegion.openHRegion(rootDir, primaryHri, htd, walPrimary, CONF, rss, null);
+//    primaryRegion = HRegion.openHRegion(rootDir, primaryHri, htd, walPrimary, CONF, rss, null);
 
     reader = createWALReaderForPrimary();
     while (true) {
@@ -1477,7 +1477,7 @@ public class TestHRegionReplayEvents {
 
     // close the region and open again.
     primaryRegion.close();
-    primaryRegion = HRegion.openHRegion(rootDir, primaryHri, htd, walPrimary, CONF, rss, null);
+//    primaryRegion = HRegion.openHRegion(rootDir, primaryHri, htd, walPrimary, CONF, rss, null);
 
     // bulk load a file into primary region
     Random random = new Random();