You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by bu...@apache.org on 2016/11/03 21:19:33 UTC

[2/5] hbase git commit: HBASE-16957 Moved cleaners from master package to fs.legacy package and removed filesystem/ directory layout references from a few files in master package

http://git-wip-us.apache.org/repos/asf/hbase/blob/53f4ec9e/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/legacy/snapshot/TestSnapshotFileCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/legacy/snapshot/TestSnapshotFileCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/legacy/snapshot/TestSnapshotFileCache.java
new file mode 100644
index 0000000..a92a5bc
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/legacy/snapshot/TestSnapshotFileCache.java
@@ -0,0 +1,288 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.fs.legacy.snapshot;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.List;
+import java.util.UUID;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
+import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil;
+import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils.SnapshotMock;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test that we correctly reload the cache, filter directories, etc.
+ */
+@Category({MasterTests.class, MediumTests.class})
+public class TestSnapshotFileCache {
+
+  private static final Log LOG = LogFactory.getLog(TestSnapshotFileCache.class);
+  private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+  private static long sequenceId = 0;
+  private static FileSystem fs;
+  private static Path rootDir;
+
+  @BeforeClass
+  public static void startCluster() throws Exception {
+    UTIL.startMiniDFSCluster(1);
+    fs = UTIL.getDFSCluster().getFileSystem();
+    rootDir = UTIL.getDefaultRootDirPath();
+  }
+
+  @AfterClass
+  public static void stopCluster() throws Exception {
+    UTIL.shutdownMiniDFSCluster();
+  }
+
+  @After
+  public void cleanupFiles() throws Exception {
+    // cleanup the snapshot directory
+    Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir);
+    fs.delete(snapshotDir, true);
+  }
+
+  @Test(timeout = 10000000)
+  public void testLoadAndDelete() throws IOException {
+    // don't refresh the cache unless we tell it to
+    long period = Long.MAX_VALUE;
+    SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000,
+        "test-snapshot-file-cache-refresh", new SnapshotFiles());
+
+    createAndTestSnapshotV1(cache, "snapshot1a", false, true);
+    createAndTestSnapshotV1(cache, "snapshot1b", true, true);
+
+    createAndTestSnapshotV2(cache, "snapshot2a", false, true);
+    createAndTestSnapshotV2(cache, "snapshot2b", true, true);
+  }
+
+  @Test
+  public void testReloadModifiedDirectory() throws IOException {
+    // don't refresh the cache unless we tell it to
+    long period = Long.MAX_VALUE;
+    SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000,
+        "test-snapshot-file-cache-refresh", new SnapshotFiles());
+
+    createAndTestSnapshotV1(cache, "snapshot1", false, true);
+    // now delete the snapshot and add a file with a different name
+    createAndTestSnapshotV1(cache, "snapshot1", false, false);
+
+    createAndTestSnapshotV2(cache, "snapshot2", false, true);
+    // now delete the snapshot and add a file with a different name
+    createAndTestSnapshotV2(cache, "snapshot2", false, false);
+  }
+
+  @Test
+  public void testSnapshotTempDirReload() throws IOException {
+    long period = Long.MAX_VALUE;
+    // This doesn't refresh cache until we invoke it explicitly
+    SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000,
+        "test-snapshot-file-cache-refresh", new SnapshotFiles());
+
+    // Add a new non-tmp snapshot
+    createAndTestSnapshotV1(cache, "snapshot0v1", false, false);
+    createAndTestSnapshotV1(cache, "snapshot0v2", false, false);
+
+    // Add a new tmp snapshot
+    createAndTestSnapshotV2(cache, "snapshot1", true, false);
+
+    // Add another tmp snapshot
+    createAndTestSnapshotV2(cache, "snapshot2", true, false);
+  }
+
+  @Test
+  public void testWeNeverCacheTmpDirAndLoadIt() throws Exception {
+
+    final AtomicInteger count = new AtomicInteger(0);
+    // don't refresh the cache unless we tell it to
+    long period = Long.MAX_VALUE;
+    SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000,
+        "test-snapshot-file-cache-refresh", new SnapshotFiles()) {
+      @Override
+      List<String> getSnapshotsInProgress(final SnapshotManager snapshotManager)
+              throws IOException {
+        List<String> result = super.getSnapshotsInProgress(snapshotManager);
+        count.incrementAndGet();
+        return result;
+      }
+
+      @Override public void triggerCacheRefreshForTesting() {
+        super.triggerCacheRefreshForTesting();
+      }
+    };
+
+    SnapshotMock.SnapshotBuilder complete =
+        createAndTestSnapshotV1(cache, "snapshot", false, false);
+
+    SnapshotMock.SnapshotBuilder inProgress =
+        createAndTestSnapshotV1(cache, "snapshotInProgress", true, false);
+
+    int countBeforeCheck = count.get();
+
+    FSUtils.logFileSystemState(fs, rootDir, LOG);
+
+    List<FileStatus> allStoreFiles = getStoreFilesForSnapshot(complete);
+    Iterable<FileStatus> deletableFiles = cache.getUnreferencedFiles(allStoreFiles, null);
+    assertTrue(Iterables.isEmpty(deletableFiles));
+    // no need for tmp dir check as all files are accounted for.
+    assertEquals(0, count.get() - countBeforeCheck);
+
+
+    // add a random file to make sure we refresh
+    FileStatus randomFile = mockStoreFile(UUID.randomUUID().toString());
+    allStoreFiles.add(randomFile);
+    deletableFiles = cache.getUnreferencedFiles(allStoreFiles, null);
+    assertEquals(randomFile, Iterables.getOnlyElement(deletableFiles));
+    assertEquals(1, count.get() - countBeforeCheck); // we check the tmp directory
+  }
+
+  private List<FileStatus> getStoreFilesForSnapshot(SnapshotMock.SnapshotBuilder builder)
+      throws IOException {
+    final List<FileStatus> allStoreFiles = Lists.newArrayList();
+    SnapshotReferenceUtil
+        .visitReferencedFiles(UTIL.getConfiguration(), fs, builder.getSnapshotsDir(),
+            new SnapshotReferenceUtil.SnapshotVisitor() {
+              @Override public void storeFile(HRegionInfo regionInfo, String familyName,
+                  SnapshotProtos.SnapshotRegionManifest.StoreFile storeFile) throws IOException {
+                FileStatus status = mockStoreFile(storeFile.getName());
+                allStoreFiles.add(status);
+              }
+            });
+    return allStoreFiles;
+  }
+
+  private FileStatus mockStoreFile(String storeFileName) {
+    FileStatus status = mock(FileStatus.class);
+    Path path = mock(Path.class);
+    when(path.getName()).thenReturn(storeFileName);
+    when(status.getPath()).thenReturn(path);
+    return status;
+  }
+
+  class SnapshotFiles implements SnapshotFileCache.SnapshotFileInspector {
+    public Collection<String> filesUnderSnapshot(final Path snapshotDir) throws IOException {
+      Collection<String> files =  new HashSet<String>();
+      files.addAll(SnapshotReferenceUtil.getHFileNames(UTIL.getConfiguration(), fs, snapshotDir));
+      return files;
+    }
+  };
+
+  private SnapshotMock.SnapshotBuilder createAndTestSnapshotV1(final SnapshotFileCache cache,
+      final String name, final boolean tmp, final boolean removeOnExit) throws IOException {
+    SnapshotMock snapshotMock = new SnapshotMock(UTIL.getConfiguration(), fs, rootDir);
+    SnapshotMock.SnapshotBuilder builder = snapshotMock.createSnapshotV1(name, name);
+    createAndTestSnapshot(cache, builder, tmp, removeOnExit);
+    return builder;
+  }
+
+  private void createAndTestSnapshotV2(final SnapshotFileCache cache, final String name,
+      final boolean tmp, final boolean removeOnExit) throws IOException {
+    SnapshotMock snapshotMock = new SnapshotMock(UTIL.getConfiguration(), fs, rootDir);
+    SnapshotMock.SnapshotBuilder builder = snapshotMock.createSnapshotV2(name, name);
+    createAndTestSnapshot(cache, builder, tmp, removeOnExit);
+  }
+
+  private void createAndTestSnapshot(final SnapshotFileCache cache,
+      final SnapshotMock.SnapshotBuilder builder,
+      final boolean tmp, final boolean removeOnExit) throws IOException {
+    List<Path> files = new ArrayList<Path>();
+    for (int i = 0; i < 3; ++i) {
+      for (Path filePath: builder.addRegion()) {
+        String fileName = filePath.getName();
+        if (tmp) {
+          // We should be able to find all the files while the snapshot creation is in-progress
+          FSUtils.logFileSystemState(fs, rootDir, LOG);
+          Iterable<FileStatus> nonSnapshot = getNonSnapshotFiles(cache, filePath);
+          assertFalse("Cache didn't find " + fileName, Iterables.contains(nonSnapshot, fileName));
+        }
+        files.add(filePath);
+      }
+    }
+
+    // Finalize the snapshot
+    if (!tmp) {
+      builder.commit();
+    }
+
+    // Make sure that all files are still present
+    for (Path path: files) {
+      Iterable<FileStatus> nonSnapshotFiles = getNonSnapshotFiles(cache, path);
+      assertFalse("Cache didn't find " + path.getName(),
+          Iterables.contains(nonSnapshotFiles, path.getName()));
+    }
+
+    FSUtils.logFileSystemState(fs, rootDir, LOG);
+    if (removeOnExit) {
+      LOG.debug("Deleting snapshot.");
+      fs.delete(builder.getSnapshotsDir(), true);
+      FSUtils.logFileSystemState(fs, rootDir, LOG);
+
+      // The files should be in cache until next refresh
+      for (Path filePath: files) {
+        Iterable<FileStatus> nonSnapshotFiles = getNonSnapshotFiles(cache, filePath);
+        assertFalse("Cache didn't find " + filePath.getName(), Iterables.contains(nonSnapshotFiles,
+            filePath.getName()));
+      }
+
+      // then trigger a refresh
+      cache.triggerCacheRefreshForTesting();
+      // and not it shouldn't find those files
+      for (Path filePath: files) {
+        Iterable<FileStatus> nonSnapshotFiles = getNonSnapshotFiles(cache, filePath);
+        assertTrue("Cache found '" + filePath.getName() + "', but it shouldn't have.",
+            !Iterables.contains(nonSnapshotFiles, filePath.getName()));
+      }
+    }
+  }
+
+  private Iterable<FileStatus> getNonSnapshotFiles(SnapshotFileCache cache, Path storeFile)
+      throws IOException {
+    return cache.getUnreferencedFiles(
+        Arrays.asList(FSUtils.listStatus(fs, storeFile.getParent())), null
+    );
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/53f4ec9e/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/legacy/snapshot/TestSnapshotHFileCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/legacy/snapshot/TestSnapshotHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/legacy/snapshot/TestSnapshotHFileCleaner.java
new file mode 100644
index 0000000..98cd136
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/legacy/snapshot/TestSnapshotHFileCleaner.java
@@ -0,0 +1,190 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.fs.legacy.snapshot;
+
+import static org.junit.Assert.assertFalse;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.HashSet;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.snapshot.CorruptedSnapshotException;
+import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil;
+import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test that the snapshot hfile cleaner finds hfiles referenced in a snapshot
+ */
+@Category({MasterTests.class, SmallTests.class})
+public class TestSnapshotHFileCleaner {
+
+  private static final Log LOG = LogFactory.getLog(TestSnapshotFileCache.class);
+  private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  private static final String TABLE_NAME_STR = "testSnapshotManifest";
+  private static final String SNAPSHOT_NAME_STR = "testSnapshotManifest-snapshot";
+  private static Path rootDir;
+  private static FileSystem fs;
+
+  /**
+   * Setup the test environment
+   */
+  @BeforeClass
+  public static void setup() throws Exception {
+    Configuration conf = TEST_UTIL.getConfiguration();
+    rootDir = FSUtils.getRootDir(conf);
+    fs = FileSystem.get(conf);
+  }
+
+
+  @AfterClass
+  public static void cleanup() throws IOException {
+    // cleanup
+    fs.delete(rootDir, true);
+  }
+
+  @Test
+  public void testFindsSnapshotFilesWhenCleaning() throws IOException {
+    Configuration conf = TEST_UTIL.getConfiguration();
+    FSUtils.setRootDir(conf, TEST_UTIL.getDataTestDir());
+    Path rootDir = FSUtils.getRootDir(conf);
+    Path archivedHfileDir = new Path(TEST_UTIL.getDataTestDir(), HConstants.HFILE_ARCHIVE_DIRECTORY);
+
+    FileSystem fs = FileSystem.get(conf);
+    SnapshotHFileCleaner cleaner = new SnapshotHFileCleaner();
+    cleaner.setConf(conf);
+
+    // write an hfile to the snapshot directory
+    String snapshotName = "snapshot";
+    byte[] snapshot = Bytes.toBytes(snapshotName);
+    TableName tableName = TableName.valueOf("table");
+    Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
+    HRegionInfo mockRegion = new HRegionInfo(tableName);
+    Path regionSnapshotDir = new Path(snapshotDir, mockRegion.getEncodedName());
+    Path familyDir = new Path(regionSnapshotDir, "family");
+    // create a reference to a supposedly valid hfile
+    String hfile = "fd1e73e8a96c486090c5cec07b4894c4";
+    Path refFile = new Path(familyDir, hfile);
+
+    // make sure the reference file exists
+    fs.create(refFile);
+
+    // create the hfile in the archive
+    fs.mkdirs(archivedHfileDir);
+    fs.createNewFile(new Path(archivedHfileDir, hfile));
+
+    // make sure that the file isn't deletable
+    assertFalse(cleaner.isFileDeletable(fs.getFileStatus(refFile)));
+  }
+
+  class SnapshotFiles implements SnapshotFileCache.SnapshotFileInspector {
+    public Collection<String> filesUnderSnapshot(final Path snapshotDir) throws IOException {
+      Collection<String> files =  new HashSet<String>();
+      files.addAll(SnapshotReferenceUtil.getHFileNames(TEST_UTIL.getConfiguration(), fs, snapshotDir));
+      return files;
+    }
+  }
+
+  /**
+   * If there is a corrupted region manifest, it should throw out CorruptedSnapshotException,
+   * instead of an IOException
+   */
+  @Test
+  public void testCorruptedRegionManifest() throws IOException {
+    SnapshotTestingUtils.SnapshotMock
+        snapshotMock = new SnapshotTestingUtils.SnapshotMock(TEST_UTIL.getConfiguration(), fs, rootDir);
+    SnapshotTestingUtils.SnapshotMock.SnapshotBuilder builder = snapshotMock.createSnapshotV2(
+        SNAPSHOT_NAME_STR, TABLE_NAME_STR);
+    builder.addRegionV2();
+    builder.corruptOneRegionManifest();
+
+    long period = Long.MAX_VALUE;
+    SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000,
+        "test-snapshot-file-cache-refresh", new SnapshotFiles());
+    try {
+      cache.getSnapshotsInProgress(null);
+    } catch (CorruptedSnapshotException cse) {
+      LOG.info("Expected exception " + cse);
+    } finally {
+      fs.delete(SnapshotDescriptionUtils.getWorkingSnapshotDir(rootDir), true);
+    }
+  }
+
+  /**
+   * If there is a corrupted data manifest, it should throw out CorruptedSnapshotException,
+   * instead of an IOException
+   */
+  @Test
+  public void testCorruptedDataManifest() throws IOException {
+    SnapshotTestingUtils.SnapshotMock
+        snapshotMock = new SnapshotTestingUtils.SnapshotMock(TEST_UTIL.getConfiguration(), fs, rootDir);
+    SnapshotTestingUtils.SnapshotMock.SnapshotBuilder builder = snapshotMock.createSnapshotV2(
+        SNAPSHOT_NAME_STR, TABLE_NAME_STR);
+    builder.addRegionV2();
+    // consolidate to generate a data.manifest file
+    builder.consolidate();
+    builder.corruptDataManifest();
+
+    long period = Long.MAX_VALUE;
+    SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000,
+        "test-snapshot-file-cache-refresh", new SnapshotFiles());
+    try {
+      cache.getSnapshotsInProgress(null);
+    } catch (CorruptedSnapshotException cse) {
+      LOG.info("Expected exception " + cse);
+    } finally {
+      fs.delete(SnapshotDescriptionUtils.getWorkingSnapshotDir(rootDir), true);
+    }
+  }
+
+  /**
+  * HBASE-16464
+  */
+  @Test
+  public void testMissedTmpSnapshot() throws IOException {
+    SnapshotTestingUtils.SnapshotMock
+        snapshotMock = new SnapshotTestingUtils.SnapshotMock(TEST_UTIL.getConfiguration(), fs, rootDir);
+    SnapshotTestingUtils.SnapshotMock.SnapshotBuilder builder = snapshotMock.createSnapshotV2(
+        SNAPSHOT_NAME_STR, TABLE_NAME_STR);
+    builder.addRegionV2();
+    builder.missOneRegionSnapshotFile();
+
+      long period = Long.MAX_VALUE;
+    SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000,
+        "test-snapshot-file-cache-refresh", new SnapshotFiles());
+    cache.getSnapshotsInProgress(null);
+    assertFalse(fs.exists(builder.getSnapshotsDir()));
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/53f4ec9e/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestCleanerChore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestCleanerChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestCleanerChore.java
deleted file mode 100644
index 92c7bb6..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestCleanerChore.java
+++ /dev/null
@@ -1,319 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master.cleaner;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.Stoppable;
-import org.apache.hadoop.hbase.testclassification.MasterTests;
-import org.apache.hadoop.hbase.testclassification.SmallTests;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.StoppableImplementation;
-import org.junit.After;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.mockito.Mockito;
-import org.mockito.invocation.InvocationOnMock;
-import org.mockito.stubbing.Answer;
-
-@Category({MasterTests.class, SmallTests.class})
-public class TestCleanerChore {
-
-  private static final Log LOG = LogFactory.getLog(TestCleanerChore.class);
-  private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
-
-  @After
-  public void cleanup() throws Exception {
-    // delete and recreate the test directory, ensuring a clean test dir between tests
-    UTIL.cleanupTestDir();
-}
-
-
-  @Test
-  public void testSavesFilesOnRequest() throws Exception {
-    Stoppable stop = new StoppableImplementation();
-    Configuration conf = UTIL.getConfiguration();
-    Path testDir = UTIL.getDataTestDir();
-    FileSystem fs = UTIL.getTestFileSystem();
-    String confKey = "hbase.test.cleaner.delegates";
-    conf.set(confKey, NeverDelete.class.getName());
-
-    AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey);
-
-    // create the directory layout in the directory to clean
-    Path parent = new Path(testDir, "parent");
-    Path file = new Path(parent, "someFile");
-    fs.mkdirs(parent);
-    // touch a new file
-    fs.create(file).close();
-    assertTrue("Test file didn't get created.", fs.exists(file));
-
-    // run the chore
-    chore.chore();
-
-    // verify all the files got deleted
-    assertTrue("File didn't get deleted", fs.exists(file));
-    assertTrue("Empty directory didn't get deleted", fs.exists(parent));
-  }
-
-  @Test
-  public void testDeletesEmptyDirectories() throws Exception {
-    Stoppable stop = new StoppableImplementation();
-    Configuration conf = UTIL.getConfiguration();
-    Path testDir = UTIL.getDataTestDir();
-    FileSystem fs = UTIL.getTestFileSystem();
-    String confKey = "hbase.test.cleaner.delegates";
-    conf.set(confKey, AlwaysDelete.class.getName());
-
-    AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey);
-
-    // create the directory layout in the directory to clean
-    Path parent = new Path(testDir, "parent");
-    Path child = new Path(parent, "child");
-    Path emptyChild = new Path(parent, "emptyChild");
-    Path file = new Path(child, "someFile");
-    fs.mkdirs(child);
-    fs.mkdirs(emptyChild);
-    // touch a new file
-    fs.create(file).close();
-    // also create a file in the top level directory
-    Path topFile = new Path(testDir, "topFile");
-    fs.create(topFile).close();
-    assertTrue("Test file didn't get created.", fs.exists(file));
-    assertTrue("Test file didn't get created.", fs.exists(topFile));
-
-    // run the chore
-    chore.chore();
-
-    // verify all the files got deleted
-    assertFalse("File didn't get deleted", fs.exists(topFile));
-    assertFalse("File didn't get deleted", fs.exists(file));
-    assertFalse("Empty directory didn't get deleted", fs.exists(child));
-    assertFalse("Empty directory didn't get deleted", fs.exists(parent));
-  }
-
-  /**
-   * Test to make sure that we don't attempt to ask the delegate whether or not we should preserve a
-   * directory.
-   * @throws Exception on failure
-   */
-  @Test
-  public void testDoesNotCheckDirectories() throws Exception {
-    Stoppable stop = new StoppableImplementation();
-    Configuration conf = UTIL.getConfiguration();
-    Path testDir = UTIL.getDataTestDir();
-    FileSystem fs = UTIL.getTestFileSystem();
-    String confKey = "hbase.test.cleaner.delegates";
-    conf.set(confKey, AlwaysDelete.class.getName());
-
-    AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey);
-    // spy on the delegate to ensure that we don't check for directories
-    AlwaysDelete delegate = (AlwaysDelete) chore.cleanersChain.get(0);
-    AlwaysDelete spy = Mockito.spy(delegate);
-    chore.cleanersChain.set(0, spy);
-
-    // create the directory layout in the directory to clean
-    Path parent = new Path(testDir, "parent");
-    Path file = new Path(parent, "someFile");
-    fs.mkdirs(parent);
-    assertTrue("Test parent didn't get created.", fs.exists(parent));
-    // touch a new file
-    fs.create(file).close();
-    assertTrue("Test file didn't get created.", fs.exists(file));
-    
-    FileStatus fStat = fs.getFileStatus(parent);
-    chore.chore();
-    // make sure we never checked the directory
-    Mockito.verify(spy, Mockito.never()).isFileDeletable(fStat);
-    Mockito.reset(spy);
-  }
-
-  @Test
-  public void testStoppedCleanerDoesNotDeleteFiles() throws Exception {
-    Stoppable stop = new StoppableImplementation();
-    Configuration conf = UTIL.getConfiguration();
-    Path testDir = UTIL.getDataTestDir();
-    FileSystem fs = UTIL.getTestFileSystem();
-    String confKey = "hbase.test.cleaner.delegates";
-    conf.set(confKey, AlwaysDelete.class.getName());
-
-    AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey);
-
-    // also create a file in the top level directory
-    Path topFile = new Path(testDir, "topFile");
-    fs.create(topFile).close();
-    assertTrue("Test file didn't get created.", fs.exists(topFile));
-
-    // stop the chore
-    stop.stop("testing stop");
-
-    // run the chore
-    chore.chore();
-
-    // test that the file still exists
-    assertTrue("File got deleted while chore was stopped", fs.exists(topFile));
-  }
-
-  /**
-   * While cleaning a directory, all the files in the directory may be deleted, but there may be
-   * another file added, in which case the directory shouldn't be deleted.
-   * @throws IOException on failure
-   */
-  @Test
-  public void testCleanerDoesNotDeleteDirectoryWithLateAddedFiles() throws IOException {
-    Stoppable stop = new StoppableImplementation();
-    Configuration conf = UTIL.getConfiguration();
-    final Path testDir = UTIL.getDataTestDir();
-    final FileSystem fs = UTIL.getTestFileSystem();
-    String confKey = "hbase.test.cleaner.delegates";
-    conf.set(confKey, AlwaysDelete.class.getName());
-
-    AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey);
-    // spy on the delegate to ensure that we don't check for directories
-    AlwaysDelete delegate = (AlwaysDelete) chore.cleanersChain.get(0);
-    AlwaysDelete spy = Mockito.spy(delegate);
-    chore.cleanersChain.set(0, spy);
-
-    // create the directory layout in the directory to clean
-    final Path parent = new Path(testDir, "parent");
-    Path file = new Path(parent, "someFile");
-    fs.mkdirs(parent);
-    // touch a new file
-    fs.create(file).close();
-    assertTrue("Test file didn't get created.", fs.exists(file));
-    final Path addedFile = new Path(parent, "addedFile");
-
-    // when we attempt to delete the original file, add another file in the same directory
-    Mockito.doAnswer(new Answer<Boolean>() {
-      @Override
-      public Boolean answer(InvocationOnMock invocation) throws Throwable {
-        fs.create(addedFile).close();
-        FSUtils.logFileSystemState(fs, testDir, LOG);
-        return (Boolean) invocation.callRealMethod();
-      }
-    }).when(spy).isFileDeletable(Mockito.any(FileStatus.class));
-
-    // run the chore
-    chore.chore();
-
-    // make sure all the directories + added file exist, but the original file is deleted
-    assertTrue("Added file unexpectedly deleted", fs.exists(addedFile));
-    assertTrue("Parent directory deleted unexpectedly", fs.exists(parent));
-    assertFalse("Original file unexpectedly retained", fs.exists(file));
-    Mockito.verify(spy, Mockito.times(1)).isFileDeletable(Mockito.any(FileStatus.class));
-    Mockito.reset(spy);
-  }
-
-  /**
-   * The cleaner runs in a loop, where it first checks to see all the files under a directory can be
-   * deleted. If they all can, then we try to delete the directory. However, a file may be added
-   * that directory to after the original check. This ensures that we don't accidentally delete that
-   * directory on and don't get spurious IOExceptions.
-   * <p>
-   * This was from HBASE-7465.
-   * @throws Exception on failure
-   */
-  @Test
-  public void testNoExceptionFromDirectoryWithRacyChildren() throws Exception {
-    Stoppable stop = new StoppableImplementation();
-    // need to use a localutil to not break the rest of the test that runs on the local FS, which
-    // gets hosed when we start to use a minicluster.
-    HBaseTestingUtility localUtil = new HBaseTestingUtility();
-    Configuration conf = localUtil.getConfiguration();
-    final Path testDir = UTIL.getDataTestDir();
-    final FileSystem fs = UTIL.getTestFileSystem();
-    LOG.debug("Writing test data to: " + testDir);
-    String confKey = "hbase.test.cleaner.delegates";
-    conf.set(confKey, AlwaysDelete.class.getName());
-
-    AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey);
-    // spy on the delegate to ensure that we don't check for directories
-    AlwaysDelete delegate = (AlwaysDelete) chore.cleanersChain.get(0);
-    AlwaysDelete spy = Mockito.spy(delegate);
-    chore.cleanersChain.set(0, spy);
-
-    // create the directory layout in the directory to clean
-    final Path parent = new Path(testDir, "parent");
-    Path file = new Path(parent, "someFile");
-    fs.mkdirs(parent);
-    // touch a new file
-    fs.create(file).close();
-    assertTrue("Test file didn't get created.", fs.exists(file));
-    final Path racyFile = new Path(parent, "addedFile");
-
-    // when we attempt to delete the original file, add another file in the same directory
-    Mockito.doAnswer(new Answer<Boolean>() {
-      @Override
-      public Boolean answer(InvocationOnMock invocation) throws Throwable {
-        fs.create(racyFile).close();
-        FSUtils.logFileSystemState(fs, testDir, LOG);
-        return (Boolean) invocation.callRealMethod();
-      }
-    }).when(spy).isFileDeletable(Mockito.any(FileStatus.class));
-
-    // attempt to delete the directory, which
-    if (chore.checkAndDeleteDirectory(parent)) {
-      throw new Exception(
-          "Reported success deleting directory, should have failed when adding file mid-iteration");
-    }
-
-    // make sure all the directories + added file exist, but the original file is deleted
-    assertTrue("Added file unexpectedly deleted", fs.exists(racyFile));
-    assertTrue("Parent directory deleted unexpectedly", fs.exists(parent));
-    assertFalse("Original file unexpectedly retained", fs.exists(file));
-    Mockito.verify(spy, Mockito.times(1)).isFileDeletable(Mockito.any(FileStatus.class));
-  }
-
-  private static class AllValidPaths extends CleanerChore<BaseHFileCleanerDelegate> {
-
-    public AllValidPaths(String name, Stoppable s, Configuration conf, FileSystem fs,
-        Path oldFileDir, String confkey) {
-      super(name, Integer.MAX_VALUE, s, conf, fs, oldFileDir, confkey);
-    }
-
-    // all paths are valid
-    @Override
-    protected boolean validate(Path file) {
-      return true;
-    }
-  };
-
-  public static class AlwaysDelete extends BaseHFileCleanerDelegate {
-    @Override
-    public boolean isFileDeletable(FileStatus fStat) {
-      return true;
-    }
-  }
-
-  public static class NeverDelete extends BaseHFileCleanerDelegate {
-    @Override
-    public boolean isFileDeletable(FileStatus fStat) {
-      return false;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/53f4ec9e/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
deleted file mode 100644
index 6049701..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
+++ /dev/null
@@ -1,263 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master.cleaner;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.ChoreService;
-import org.apache.hadoop.hbase.CoordinatedStateManager;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.Server;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.client.ClusterConnection;
-import org.apache.hadoop.hbase.testclassification.MasterTests;
-import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.apache.hadoop.hbase.util.EnvironmentEdge;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-@Category({MasterTests.class, MediumTests.class})
-public class TestHFileCleaner {
-  private static final Log LOG = LogFactory.getLog(TestHFileCleaner.class);
-
-  private final static HBaseTestingUtility UTIL = new HBaseTestingUtility();
-
-  @BeforeClass
-  public static void setupCluster() throws Exception {
-    // have to use a minidfs cluster because the localfs doesn't modify file times correctly
-    UTIL.startMiniDFSCluster(1);
-  }
-
-  @AfterClass
-  public static void shutdownCluster() throws IOException {
-    UTIL.shutdownMiniDFSCluster();
-  }
-
-  @Test
-  public void testTTLCleaner() throws IOException, InterruptedException {
-    FileSystem fs = UTIL.getDFSCluster().getFileSystem();
-    Path root = UTIL.getDataTestDirOnTestFS();
-    Path file = new Path(root, "file");
-    fs.createNewFile(file);
-    long createTime = System.currentTimeMillis();
-    assertTrue("Test file not created!", fs.exists(file));
-    TimeToLiveHFileCleaner cleaner = new TimeToLiveHFileCleaner();
-    // update the time info for the file, so the cleaner removes it
-    fs.setTimes(file, createTime - 100, -1);
-    Configuration conf = UTIL.getConfiguration();
-    conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, 100);
-    cleaner.setConf(conf);
-    assertTrue("File not set deletable - check mod time:" + getFileStats(file, fs)
-        + " with create time:" + createTime, cleaner.isFileDeletable(fs.getFileStatus(file)));
-  }
-
-  /**
-   * @param file to check
-   * @return loggable information about the file
-   */
-  private String getFileStats(Path file, FileSystem fs) throws IOException {
-    FileStatus status = fs.getFileStatus(file);
-    return "File" + file + ", mtime:" + status.getModificationTime() + ", atime:"
-        + status.getAccessTime();
-  }
-
-  @Test(timeout = 60 *1000)
-  public void testHFileCleaning() throws Exception {
-    final EnvironmentEdge originalEdge = EnvironmentEdgeManager.getDelegate();
-    String prefix = "someHFileThatWouldBeAUUID";
-    Configuration conf = UTIL.getConfiguration();
-    // set TTL
-    long ttl = 2000;
-    conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
-      "org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner");
-    conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, ttl);
-    Server server = new DummyServer();
-    Path archivedHfileDir = new Path(UTIL.getDataTestDirOnTestFS(), HConstants.HFILE_ARCHIVE_DIRECTORY);
-    FileSystem fs = FileSystem.get(conf);
-    HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir);
-
-    // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
-    final long createTime = System.currentTimeMillis();
-    fs.delete(archivedHfileDir, true);
-    fs.mkdirs(archivedHfileDir);
-    // Case 1: 1 invalid file, which should be deleted directly
-    fs.createNewFile(new Path(archivedHfileDir, "dfd-dfd"));
-    // Case 2: 1 "recent" file, not even deletable for the first log cleaner
-    // (TimeToLiveLogCleaner), so we are not going down the chain
-    LOG.debug("Now is: " + createTime);
-    for (int i = 1; i < 32; i++) {
-      // Case 3: old files which would be deletable for the first log cleaner
-      // (TimeToLiveHFileCleaner),
-      Path fileName = new Path(archivedHfileDir, (prefix + "." + (createTime + i)));
-      fs.createNewFile(fileName);
-      // set the creation time past ttl to ensure that it gets removed
-      fs.setTimes(fileName, createTime - ttl - 1, -1);
-      LOG.debug("Creating " + getFileStats(fileName, fs));
-    }
-
-    // Case 2: 1 newer file, not even deletable for the first log cleaner
-    // (TimeToLiveLogCleaner), so we are not going down the chain
-    Path saved = new Path(archivedHfileDir, prefix + ".00000000000");
-    fs.createNewFile(saved);
-    // set creation time within the ttl
-    fs.setTimes(saved, createTime - ttl / 2, -1);
-    LOG.debug("Creating " + getFileStats(saved, fs));
-    for (FileStatus stat : fs.listStatus(archivedHfileDir)) {
-      LOG.debug(stat.getPath().toString());
-    }
-
-    assertEquals(33, fs.listStatus(archivedHfileDir).length);
-
-    // set a custom edge manager to handle time checking
-    EnvironmentEdge setTime = new EnvironmentEdge() {
-      @Override
-      public long currentTime() {
-        return createTime;
-      }
-    };
-    EnvironmentEdgeManager.injectEdge(setTime);
-
-    // run the chore
-    cleaner.chore();
-
-    // ensure we only end up with the saved file
-    assertEquals(1, fs.listStatus(archivedHfileDir).length);
-
-    for (FileStatus file : fs.listStatus(archivedHfileDir)) {
-      LOG.debug("Kept hfiles: " + file.getPath().getName());
-    }
-
-    // reset the edge back to the original edge
-    EnvironmentEdgeManager.injectEdge(originalEdge);
-  }
-
-  @Test
-  public void testRemovesEmptyDirectories() throws Exception {
-    Configuration conf = UTIL.getConfiguration();
-    // no cleaner policies = delete all files
-    conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, "");
-    Server server = new DummyServer();
-    Path archivedHfileDir = new Path(UTIL.getDataTestDirOnTestFS(), HConstants.HFILE_ARCHIVE_DIRECTORY);
-
-    // setup the cleaner
-    FileSystem fs = UTIL.getDFSCluster().getFileSystem();
-    HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir);
-
-    // make all the directories for archiving files
-    Path table = new Path(archivedHfileDir, "table");
-    Path region = new Path(table, "regionsomthing");
-    Path family = new Path(region, "fam");
-    Path file = new Path(family, "file12345");
-    fs.mkdirs(family);
-    if (!fs.exists(family)) throw new RuntimeException("Couldn't create test family:" + family);
-    fs.create(file).close();
-    if (!fs.exists(file)) throw new RuntimeException("Test file didn't get created:" + file);
-
-    // run the chore to cleanup the files (and the directories above it)
-    cleaner.chore();
-
-    // make sure all the parent directories get removed
-    assertFalse("family directory not removed for empty directory", fs.exists(family));
-    assertFalse("region directory not removed for empty directory", fs.exists(region));
-    assertFalse("table directory not removed for empty directory", fs.exists(table));
-    assertTrue("archive directory", fs.exists(archivedHfileDir));
-  }
-
-  static class DummyServer implements Server {
-
-    @Override
-    public Configuration getConfiguration() {
-      return UTIL.getConfiguration();
-    }
-
-    @Override
-    public ZooKeeperWatcher getZooKeeper() {
-      try {
-        return new ZooKeeperWatcher(getConfiguration(), "dummy server", this);
-      } catch (IOException e) {
-        e.printStackTrace();
-      }
-      return null;
-    }
-
-    @Override
-    public CoordinatedStateManager getCoordinatedStateManager() {
-      return null;
-    }
-
-    @Override
-    public ClusterConnection getConnection() {
-      return null;
-    }
-
-    @Override
-    public MetaTableLocator getMetaTableLocator() {
-      return null;
-    }
-
-    @Override
-    public ServerName getServerName() {
-      return ServerName.valueOf("regionserver,60020,000000");
-    }
-
-    @Override
-    public void abort(String why, Throwable e) {
-    }
-
-    @Override
-    public boolean isAborted() {
-      return false;
-    }
-
-    @Override
-    public void stop(String why) {
-    }
-
-    @Override
-    public boolean isStopped() {
-      return false;
-    }
-
-    @Override
-    public ChoreService getChoreService() {
-      return null;
-    }
-
-    @Override
-    public ClusterConnection getClusterConnection() {
-      // TODO Auto-generated method stub
-      return null;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/53f4ec9e/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java
deleted file mode 100644
index 0401ae8..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java
+++ /dev/null
@@ -1,201 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master.cleaner;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.CategoryBasedTimeout;
-import org.apache.hadoop.hbase.ChoreService;
-import org.apache.hadoop.hbase.CoordinatedStateManager;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.Server;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.ClusterConnection;
-import org.apache.hadoop.hbase.io.HFileLink;
-import org.apache.hadoop.hbase.testclassification.MasterTests;
-import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.HFileArchiveUtil;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.junit.rules.TestRule;
-
-/**
- * Test the HFileLink Cleaner.
- * HFiles with links cannot be deleted until a link is present.
- */
-@Category({MasterTests.class, MediumTests.class})
-public class TestHFileLinkCleaner {
-  @Rule public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()).
-      withLookingForStuckThread(true).build();
-
-  private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-
-  @Test
-  public void testHFileLinkCleaning() throws Exception {
-    Configuration conf = TEST_UTIL.getConfiguration();
-    FSUtils.setRootDir(conf, TEST_UTIL.getDataTestDir());
-    conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, HFileLinkCleaner.class.getName());
-    Path rootDir = FSUtils.getRootDir(conf);
-    FileSystem fs = FileSystem.get(conf);
-
-    final TableName tableName = TableName.valueOf("test-table");
-    final TableName tableLinkName = TableName.valueOf("test-link");
-    final String hfileName = "1234567890";
-    final String familyName = "cf";
-
-    HRegionInfo hri = new HRegionInfo(tableName);
-    HRegionInfo hriLink = new HRegionInfo(tableLinkName);
-
-    Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
-    Path archiveStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
-          tableName, hri.getEncodedName(), familyName);
-    Path archiveLinkStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
-          tableLinkName, hriLink.getEncodedName(), familyName);
-
-    // Create hfile /hbase/table-link/region/cf/getEncodedName.HFILE(conf);
-    Path familyPath = getFamilyDirPath(archiveDir, tableName, hri.getEncodedName(), familyName);
-    fs.mkdirs(familyPath);
-    Path hfilePath = new Path(familyPath, hfileName);
-    fs.createNewFile(hfilePath);
-
-    // Create link to hfile
-    Path familyLinkPath = getFamilyDirPath(rootDir, tableLinkName,
-                                        hriLink.getEncodedName(), familyName);
-    fs.mkdirs(familyLinkPath);
-    HFileLink.create(conf, fs, familyLinkPath, hri, hfileName);
-    Path linkBackRefDir = HFileLink.getBackReferencesDir(archiveStoreDir, hfileName);
-    assertTrue(fs.exists(linkBackRefDir));
-    FileStatus[] backRefs = fs.listStatus(linkBackRefDir);
-    assertEquals(1, backRefs.length);
-    Path linkBackRef = backRefs[0].getPath();
-
-    // Initialize cleaner
-    final long ttl = 1000;
-    conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, ttl);
-    Server server = new DummyServer();
-    HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archiveDir);
-
-    // Link backref cannot be removed
-    cleaner.chore();
-    assertTrue(fs.exists(linkBackRef));
-    assertTrue(fs.exists(hfilePath));
-
-    // Link backref can be removed
-    fs.rename(FSUtils.getTableDir(rootDir, tableLinkName),
-        FSUtils.getTableDir(archiveDir, tableLinkName));
-    cleaner.chore();
-    assertFalse("Link should be deleted", fs.exists(linkBackRef));
-
-    // HFile can be removed
-    Thread.sleep(ttl * 2);
-    cleaner.chore();
-    assertFalse("HFile should be deleted", fs.exists(hfilePath));
-
-    // Remove everything
-    for (int i = 0; i < 4; ++i) {
-      Thread.sleep(ttl * 2);
-      cleaner.chore();
-    }
-    assertFalse("HFile should be deleted", fs.exists(FSUtils.getTableDir(archiveDir, tableName)));
-    assertFalse("Link should be deleted", fs.exists(FSUtils.getTableDir(archiveDir, tableLinkName)));
-  }
-
-  private static Path getFamilyDirPath (final Path rootDir, final TableName table,
-    final String region, final String family) {
-    return new Path(new Path(FSUtils.getTableDir(rootDir, table), region), family);
-  }
-
-  static class DummyServer implements Server {
-
-    @Override
-    public Configuration getConfiguration() {
-      return TEST_UTIL.getConfiguration();
-    }
-
-    @Override
-    public ZooKeeperWatcher getZooKeeper() {
-      try {
-        return new ZooKeeperWatcher(getConfiguration(), "dummy server", this);
-      } catch (IOException e) {
-        e.printStackTrace();
-      }
-      return null;
-    }
-
-    @Override
-    public CoordinatedStateManager getCoordinatedStateManager() {
-      return null;
-    }
-
-    @Override
-    public ClusterConnection getConnection() {
-      return null;
-    }
-
-    @Override
-    public MetaTableLocator getMetaTableLocator() {
-      return null;
-    }
-
-    @Override
-    public ServerName getServerName() {
-      return ServerName.valueOf("regionserver,60020,000000");
-    }
-
-    @Override
-    public void abort(String why, Throwable e) {}
-
-    @Override
-    public boolean isAborted() {
-      return false;
-    }
-
-    @Override
-    public void stop(String why) {}
-
-    @Override
-    public boolean isStopped() {
-      return false;
-    }
-
-    @Override
-    public ChoreService getChoreService() {
-      return null;
-    }
-
-    @Override
-    public ClusterConnection getClusterConnection() {
-      // TODO Auto-generated method stub
-      return null;
-    }
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/53f4ec9e/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
deleted file mode 100644
index b6b5492..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
+++ /dev/null
@@ -1,309 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master.cleaner;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.doThrow;
-import static org.mockito.Mockito.spy;
-
-import java.io.IOException;
-import java.lang.reflect.Field;
-import java.net.URLEncoder;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-
-import com.google.common.collect.Lists;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.Abortable;
-import org.apache.hadoop.hbase.ChoreService;
-import org.apache.hadoop.hbase.CoordinatedStateManager;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.Server;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.Waiter;
-import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.client.ClusterConnection;
-import org.apache.hadoop.hbase.replication.ReplicationFactory;
-import org.apache.hadoop.hbase.replication.ReplicationQueues;
-import org.apache.hadoop.hbase.replication.ReplicationQueuesArguments;
-import org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
-import org.apache.hadoop.hbase.replication.ReplicationQueuesClientZKImpl;
-import org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner;
-import org.apache.hadoop.hbase.replication.regionserver.Replication;
-import org.apache.hadoop.hbase.testclassification.MasterTests;
-import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.data.Stat;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.mockito.Mockito;
-
-@Category({MasterTests.class, MediumTests.class})
-public class TestLogsCleaner {
-
-  private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-
-  /**
-   * @throws java.lang.Exception
-   */
-  @BeforeClass
-  public static void setUpBeforeClass() throws Exception {
-    TEST_UTIL.startMiniZKCluster();
-  }
-
-  /**
-   * @throws java.lang.Exception
-   */
-  @AfterClass
-  public static void tearDownAfterClass() throws Exception {
-    TEST_UTIL.shutdownMiniZKCluster();
-  }
-
-  @Test
-  public void testLogCleaning() throws Exception{
-    Configuration conf = TEST_UTIL.getConfiguration();
-    // set TTL
-    long ttl = 10000;
-    conf.setLong("hbase.master.logcleaner.ttl", ttl);
-    Replication.decorateMasterConfiguration(conf);
-    Server server = new DummyServer();
-    ReplicationQueues repQueues =
-        ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(conf, server, server.getZooKeeper()));
-    repQueues.init(server.getServerName().toString());
-    final Path oldLogDir = new Path(TEST_UTIL.getDataTestDir(),
-        HConstants.HREGION_OLDLOGDIR_NAME);
-    String fakeMachineName =
-      URLEncoder.encode(server.getServerName().toString(), "UTF8");
-
-    final FileSystem fs = FileSystem.get(conf);
-
-    // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
-    long now = System.currentTimeMillis();
-    fs.delete(oldLogDir, true);
-    fs.mkdirs(oldLogDir);
-    // Case 1: 2 invalid files, which would be deleted directly
-    fs.createNewFile(new Path(oldLogDir, "a"));
-    fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + "a"));
-    // Case 2: 1 "recent" file, not even deletable for the first log cleaner
-    // (TimeToLiveLogCleaner), so we are not going down the chain
-    System.out.println("Now is: " + now);
-    for (int i = 1; i < 31; i++) {
-      // Case 3: old files which would be deletable for the first log cleaner
-      // (TimeToLiveLogCleaner), and also for the second (ReplicationLogCleaner)
-      Path fileName = new Path(oldLogDir, fakeMachineName + "." + (now - i) );
-      fs.createNewFile(fileName);
-      // Case 4: put 3 old log files in ZK indicating that they are scheduled
-      // for replication so these files would pass the first log cleaner
-      // (TimeToLiveLogCleaner) but would be rejected by the second
-      // (ReplicationLogCleaner)
-      if (i % (30/3) == 1) {
-        repQueues.addLog(fakeMachineName, fileName.getName());
-        System.out.println("Replication log file: " + fileName);
-      }
-    }
-
-    // sleep for sometime to get newer modifcation time
-    Thread.sleep(ttl);
-    fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + now));
-
-    // Case 2: 1 newer file, not even deletable for the first log cleaner
-    // (TimeToLiveLogCleaner), so we are not going down the chain
-    fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + (now + 10000) ));
-
-    for (FileStatus stat : fs.listStatus(oldLogDir)) {
-      System.out.println(stat.getPath().toString());
-    }
-
-    assertEquals(34, fs.listStatus(oldLogDir).length);
-
-    LogCleaner cleaner  = new LogCleaner(1000, server, conf, fs, oldLogDir);
-
-    cleaner.chore();
-
-    // We end up with the current log file, a newer one and the 3 old log
-    // files which are scheduled for replication
-    TEST_UTIL.waitFor(1000, new Waiter.Predicate<Exception>() {
-      @Override
-      public boolean evaluate() throws Exception {
-        return 5 == fs.listStatus(oldLogDir).length;
-      }
-    });
-
-    for (FileStatus file : fs.listStatus(oldLogDir)) {
-      System.out.println("Kept log files: " + file.getPath().getName());
-    }
-  }
-
-  @Test(timeout=5000)
-  public void testZnodeCversionChange() throws Exception {
-    Configuration conf = TEST_UTIL.getConfiguration();
-    ReplicationLogCleaner cleaner = new ReplicationLogCleaner();
-    cleaner.setConf(conf);
-
-    ReplicationQueuesClientZKImpl rqcMock = Mockito.mock(ReplicationQueuesClientZKImpl.class);
-    Mockito.when(rqcMock.getQueuesZNodeCversion()).thenReturn(1, 2, 3, 4);
-
-    Field rqc = ReplicationLogCleaner.class.getDeclaredField("replicationQueues");
-    rqc.setAccessible(true);
-
-    rqc.set(cleaner, rqcMock);
-
-    // This should return eventually when cversion stabilizes
-    cleaner.getDeletableFiles(new LinkedList<FileStatus>());
-  }
-
-  /**
-   * ReplicationLogCleaner should be able to ride over ZooKeeper errors without
-   * aborting.
-   */
-  @Test
-  public void testZooKeeperAbort() throws Exception {
-    Configuration conf = TEST_UTIL.getConfiguration();
-    ReplicationLogCleaner cleaner = new ReplicationLogCleaner();
-
-    List<FileStatus> dummyFiles = Lists.newArrayList(
-        new FileStatus(100, false, 3, 100, System.currentTimeMillis(), new Path("log1")),
-        new FileStatus(100, false, 3, 100, System.currentTimeMillis(), new Path("log2"))
-    );
-
-    FaultyZooKeeperWatcher faultyZK =
-        new FaultyZooKeeperWatcher(conf, "testZooKeeperAbort-faulty", null);
-    try {
-      faultyZK.init();
-      cleaner.setConf(conf, faultyZK);
-      // should keep all files due to a ConnectionLossException getting the queues znodes
-      Iterable<FileStatus> toDelete = cleaner.getDeletableFiles(dummyFiles);
-      assertFalse(toDelete.iterator().hasNext());
-      assertFalse(cleaner.isStopped());
-    } finally {
-      faultyZK.close();
-    }
-
-    // when zk is working both files should be returned
-    cleaner = new ReplicationLogCleaner();
-    ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "testZooKeeperAbort-normal", null);
-    try {
-      cleaner.setConf(conf, zkw);
-      Iterable<FileStatus> filesToDelete = cleaner.getDeletableFiles(dummyFiles);
-      Iterator<FileStatus> iter = filesToDelete.iterator();
-      assertTrue(iter.hasNext());
-      assertEquals(new Path("log1"), iter.next().getPath());
-      assertTrue(iter.hasNext());
-      assertEquals(new Path("log2"), iter.next().getPath());
-      assertFalse(iter.hasNext());
-    } finally {
-      zkw.close();
-    }
-  }
-
-  static class DummyServer implements Server {
-
-    @Override
-    public Configuration getConfiguration() {
-      return TEST_UTIL.getConfiguration();
-    }
-
-    @Override
-    public ZooKeeperWatcher getZooKeeper() {
-      try {
-        return new ZooKeeperWatcher(getConfiguration(), "dummy server", this);
-      } catch (IOException e) {
-        e.printStackTrace();
-      }
-      return null;
-    }
-
-    @Override
-    public CoordinatedStateManager getCoordinatedStateManager() {
-      return null;
-    }
-
-    @Override
-    public ClusterConnection getConnection() {
-      return null;
-    }
-
-    @Override
-    public MetaTableLocator getMetaTableLocator() {
-      return null;
-    }
-
-    @Override
-    public ServerName getServerName() {
-      return ServerName.valueOf("regionserver,60020,000000");
-    }
-
-    @Override
-    public void abort(String why, Throwable e) {}
-
-    @Override
-    public boolean isAborted() {
-      return false;
-    }
-
-    @Override
-    public void stop(String why) {}
-
-    @Override
-    public boolean isStopped() {
-      return false;
-    }
-
-    @Override
-    public ChoreService getChoreService() {
-      return null;
-    }
-
-    @Override
-    public ClusterConnection getClusterConnection() {
-      // TODO Auto-generated method stub
-      return null;
-    }
-  }
-
-  static class FaultyZooKeeperWatcher extends ZooKeeperWatcher {
-    private RecoverableZooKeeper zk;
-
-    public FaultyZooKeeperWatcher(Configuration conf, String identifier, Abortable abortable)
-        throws ZooKeeperConnectionException, IOException {
-      super(conf, identifier, abortable);
-    }
-
-    public void init() throws Exception {
-      this.zk = spy(super.getRecoverableZooKeeper());
-      doThrow(new KeeperException.ConnectionLossException())
-          .when(zk).getData("/hbase/replication/rs", null, new Stat());
-    }
-
-    public RecoverableZooKeeper getRecoverableZooKeeper() {
-      return zk;
-    }
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/53f4ec9e/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
deleted file mode 100644
index fc3e516..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
+++ /dev/null
@@ -1,341 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
- * agreements. See the NOTICE file distributed with this work for additional information regarding
- * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License. You may obtain a
- * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable
- * law or agreed to in writing, software distributed under the License is distributed on an "AS IS"
- * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License
- * for the specific language governing permissions and limitations under the License.
- */
-package org.apache.hadoop.hbase.master.cleaner;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.mockito.Mockito.doThrow;
-import static org.mockito.Mockito.spy;
-
-import com.google.common.collect.Lists;
-
-import java.io.IOException;
-import java.lang.reflect.Field;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.Abortable;
-import org.apache.hadoop.hbase.ChoreService;
-import org.apache.hadoop.hbase.CoordinatedStateManager;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.Server;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.client.ClusterConnection;
-import org.apache.hadoop.hbase.replication.ReplicationException;
-import org.apache.hadoop.hbase.replication.ReplicationFactory;
-import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-import org.apache.hadoop.hbase.replication.ReplicationPeers;
-import org.apache.hadoop.hbase.replication.ReplicationQueues;
-import org.apache.hadoop.hbase.replication.ReplicationQueuesArguments;
-import org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
-import org.apache.hadoop.hbase.replication.ReplicationQueuesZKImpl;
-import org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner;
-import org.apache.hadoop.hbase.replication.regionserver.Replication;
-import org.apache.hadoop.hbase.testclassification.MasterTests;
-import org.apache.hadoop.hbase.testclassification.SmallTests;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.data.Stat;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.mockito.Mockito;
-
-@Category({ MasterTests.class, SmallTests.class })
-public class TestReplicationHFileCleaner {
-  private static final Log LOG = LogFactory.getLog(ReplicationQueuesZKImpl.class);
-  private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-  private static Server server;
-  private static ReplicationQueues rq;
-  private static ReplicationPeers rp;
-  private static final String peerId = "TestReplicationHFileCleaner";
-  private static Configuration conf = TEST_UTIL.getConfiguration();
-  static FileSystem fs = null;
-  Path root;
-
-  /**
-   * @throws java.lang.Exception
-   */
-  @BeforeClass
-  public static void setUpBeforeClass() throws Exception {
-    TEST_UTIL.startMiniZKCluster();
-    server = new DummyServer();
-    conf.setBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, true);
-    Replication.decorateMasterConfiguration(conf);
-    rp = ReplicationFactory.getReplicationPeers(server.getZooKeeper(), conf, server);
-    rp.init();
-    rq = ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(conf, server, server.getZooKeeper()));
-    rq.init(server.getServerName().toString());
-    try {
-      fs = FileSystem.get(conf);
-    } finally {
-      if (fs != null) {
-        fs.close();
-      }
-    }
-  }
-
-  /**
-   * @throws java.lang.Exception
-   */
-  @AfterClass
-  public static void tearDownAfterClass() throws Exception {
-    TEST_UTIL.shutdownMiniZKCluster();
-  }
-
-  @Before
-  public void setup() throws ReplicationException, IOException {
-    root = TEST_UTIL.getDataTestDirOnTestFS();
-    rp.registerPeer(peerId, new ReplicationPeerConfig().setClusterKey(TEST_UTIL.getClusterKey()));
-    rq.addPeerToHFileRefs(peerId);
-  }
-
-  @After
-  public void cleanup() throws ReplicationException {
-    try {
-      fs.delete(root, true);
-    } catch (IOException e) {
-      LOG.warn("Failed to delete files recursively from path " + root);
-    }
-    rp.unregisterPeer(peerId);
-  }
-
-  @Test
-  public void testIsFileDeletable() throws IOException, ReplicationException {
-    // 1. Create a file
-    Path file = new Path(root, "testIsFileDeletableWithNoHFileRefs");
-    fs.createNewFile(file);
-    // 2. Assert file is successfully created
-    assertTrue("Test file not created!", fs.exists(file));
-    ReplicationHFileCleaner cleaner = new ReplicationHFileCleaner();
-    cleaner.setConf(conf);
-    // 3. Assert that file as is should be deletable
-    assertTrue("Cleaner should allow to delete this file as there is no hfile reference node "
-        + "for it in the queue.",
-      cleaner.isFileDeletable(fs.getFileStatus(file)));
-
-    List<String> files = new ArrayList<String>(1);
-    files.add(file.getName());
-    // 4. Add the file to hfile-refs queue
-    rq.addHFileRefs(peerId, files);
-    // 5. Assert file should not be deletable
-    assertFalse("Cleaner should not allow to delete this file as there is a hfile reference node "
-        + "for it in the queue.",
-      cleaner.isFileDeletable(fs.getFileStatus(file)));
-  }
-
-  @Test
-  public void testGetDeletableFiles() throws Exception {
-    // 1. Create two files and assert that they do not exist
-    Path notDeletablefile = new Path(root, "testGetDeletableFiles_1");
-    fs.createNewFile(notDeletablefile);
-    assertTrue("Test file not created!", fs.exists(notDeletablefile));
-    Path deletablefile = new Path(root, "testGetDeletableFiles_2");
-    fs.createNewFile(deletablefile);
-    assertTrue("Test file not created!", fs.exists(deletablefile));
-
-    List<FileStatus> files = new ArrayList<FileStatus>(2);
-    FileStatus f = new FileStatus();
-    f.setPath(deletablefile);
-    files.add(f);
-    f = new FileStatus();
-    f.setPath(notDeletablefile);
-    files.add(f);
-
-    List<String> hfiles = new ArrayList<>(1);
-    hfiles.add(notDeletablefile.getName());
-    // 2. Add one file to hfile-refs queue
-    rq.addHFileRefs(peerId, hfiles);
-
-    ReplicationHFileCleaner cleaner = new ReplicationHFileCleaner();
-    cleaner.setConf(conf);
-    Iterator<FileStatus> deletableFilesIterator = cleaner.getDeletableFiles(files).iterator();
-    int i = 0;
-    while (deletableFilesIterator.hasNext() && i < 2) {
-      i++;
-    }
-    // 5. Assert one file should not be deletable and it is present in the list returned
-    if (i > 2) {
-      fail("File " + notDeletablefile
-          + " should not be deletable as its hfile reference node is not added.");
-    }
-    assertTrue(deletableFilesIterator.next().getPath().equals(deletablefile));
-  }
-
-  /*
-   * Test for HBASE-14621. This test will not assert directly anything. Without the fix the test
-   * will end up in a infinite loop, so it will timeout.
-   */
-  @Test(timeout = 15000)
-  public void testForDifferntHFileRefsZnodeVersion() throws Exception {
-    // 1. Create a file
-    Path file = new Path(root, "testForDifferntHFileRefsZnodeVersion");
-    fs.createNewFile(file);
-    // 2. Assert file is successfully created
-    assertTrue("Test file not created!", fs.exists(file));
-    ReplicationHFileCleaner cleaner = new ReplicationHFileCleaner();
-    cleaner.setConf(conf);
-
-    ReplicationQueuesClient replicationQueuesClient = Mockito.mock(ReplicationQueuesClient.class);
-    //Return different znode version for each call
-    Mockito.when(replicationQueuesClient.getHFileRefsNodeChangeVersion()).thenReturn(1, 2);
-
-    Class<? extends ReplicationHFileCleaner> cleanerClass = cleaner.getClass();
-    Field rqc = cleanerClass.getDeclaredField("rqc");
-    rqc.setAccessible(true);
-    rqc.set(cleaner, replicationQueuesClient);
-
-    cleaner.isFileDeletable(fs.getFileStatus(file));
-  }
-
-  /**
-   * ReplicationHFileCleaner should be able to ride over ZooKeeper errors without aborting.
-   */
-  @Test
-  public void testZooKeeperAbort() throws Exception {
-    ReplicationHFileCleaner cleaner = new ReplicationHFileCleaner();
-
-    List<FileStatus> dummyFiles =
-        Lists.newArrayList(new FileStatus(100, false, 3, 100, System.currentTimeMillis(), new Path(
-            "hfile1")), new FileStatus(100, false, 3, 100, System.currentTimeMillis(), new Path(
-            "hfile2")));
-
-    FaultyZooKeeperWatcher faultyZK =
-        new FaultyZooKeeperWatcher(conf, "testZooKeeperAbort-faulty", null);
-    try {
-      faultyZK.init();
-      cleaner.setConf(conf, faultyZK);
-      // should keep all files due to a ConnectionLossException getting the queues znodes
-      Iterable<FileStatus> toDelete = cleaner.getDeletableFiles(dummyFiles);
-      assertFalse(toDelete.iterator().hasNext());
-      assertFalse(cleaner.isStopped());
-    } finally {
-      faultyZK.close();
-    }
-
-    // when zk is working both files should be returned
-    cleaner = new ReplicationHFileCleaner();
-    ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "testZooKeeperAbort-normal", null);
-    try {
-      cleaner.setConf(conf, zkw);
-      Iterable<FileStatus> filesToDelete = cleaner.getDeletableFiles(dummyFiles);
-      Iterator<FileStatus> iter = filesToDelete.iterator();
-      assertTrue(iter.hasNext());
-      assertEquals(new Path("hfile1"), iter.next().getPath());
-      assertTrue(iter.hasNext());
-      assertEquals(new Path("hfile2"), iter.next().getPath());
-      assertFalse(iter.hasNext());
-    } finally {
-      zkw.close();
-    }
-  }
-
-  static class DummyServer implements Server {
-
-    @Override
-    public Configuration getConfiguration() {
-      return TEST_UTIL.getConfiguration();
-    }
-
-    @Override
-    public ZooKeeperWatcher getZooKeeper() {
-      try {
-        return new ZooKeeperWatcher(getConfiguration(), "dummy server", this);
-      } catch (IOException e) {
-        e.printStackTrace();
-      }
-      return null;
-    }
-
-    @Override
-    public CoordinatedStateManager getCoordinatedStateManager() {
-      return null;
-    }
-
-    @Override
-    public ClusterConnection getConnection() {
-      return null;
-    }
-
-    @Override
-    public MetaTableLocator getMetaTableLocator() {
-      return null;
-    }
-
-    @Override
-    public ServerName getServerName() {
-      return ServerName.valueOf("regionserver,60020,000000");
-    }
-
-    @Override
-    public void abort(String why, Throwable e) {
-    }
-
-    @Override
-    public boolean isAborted() {
-      return false;
-    }
-
-    @Override
-    public void stop(String why) {
-    }
-
-    @Override
-    public boolean isStopped() {
-      return false;
-    }
-
-    @Override
-    public ChoreService getChoreService() {
-      return null;
-    }
-
-    @Override
-    public ClusterConnection getClusterConnection() {
-      // TODO Auto-generated method stub
-      return null;
-    }
-  }
-
-  static class FaultyZooKeeperWatcher extends ZooKeeperWatcher {
-    private RecoverableZooKeeper zk;
-    public FaultyZooKeeperWatcher(Configuration conf, String identifier, Abortable abortable)
-        throws ZooKeeperConnectionException, IOException {
-      super(conf, identifier, abortable);
-    }
-
-    public void init() throws Exception {
-      this.zk = spy(super.getRecoverableZooKeeper());
-      doThrow(new KeeperException.ConnectionLossException())
-          .when(zk).getData("/hbase/replication/hfile-refs", null, new Stat());
-    }
-
-    public RecoverableZooKeeper getRecoverableZooKeeper() {
-      return zk;
-    }
-  }
-}
\ No newline at end of file