You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by bu...@apache.org on 2016/11/03 21:19:34 UTC
[3/5] hbase git commit: HBASE-16957 Moved cleaners from master
package to fs.legacy package and removed filesystem/ directory layout
references from a few files in master package
http://git-wip-us.apache.org/repos/asf/hbase/blob/53f4ec9e/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
index 074c113..13aba37 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
@@ -26,9 +26,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.master.cleaner.BaseLogCleanerDelegate;
-import org.apache.hadoop.hbase.replication.ReplicationException;
+import org.apache.hadoop.hbase.fs.legacy.cleaner.BaseLogCleanerDelegate;
import org.apache.hadoop.hbase.replication.ReplicationFactory;
import org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
import org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments;
@@ -39,9 +37,7 @@ import java.util.List;
import java.util.Set;
import com.google.common.base.Predicate;
-import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
-import com.google.common.collect.Sets;
import org.apache.zookeeper.KeeperException;
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/53f4ec9e/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
index 741065a..a4364eb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
@@ -32,7 +32,7 @@ import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
+import org.apache.hadoop.hbase.fs.legacy.cleaner.HFileCleaner;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
http://git-wip-us.apache.org/repos/asf/hbase/blob/53f4ec9e/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
index 0eacc2b..1d5e75f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
@@ -1050,63 +1050,6 @@ public abstract class FSUtils {
return blocksDistribution;
}
- // TODO move this method OUT of FSUtils. No dependencies to HMaster
- /**
- * Returns the total overall fragmentation percentage. Includes hbase:meta and
- * -ROOT- as well.
- *
- * @param master The master defining the HBase root and file system.
- * @return A map for each table and its percentage.
- * @throws IOException When scanning the directory fails.
- */
- public static int getTotalTableFragmentation(final HMaster master)
- throws IOException {
- Map<String, Integer> map = getTableFragmentation(master);
- return map != null && map.size() > 0 ? map.get("-TOTAL-") : -1;
- }
-
- /**
- * Runs through the HBase rootdir and checks how many stores for each table
- * have more than one file in them. Checks -ROOT- and hbase:meta too. The total
- * percentage across all tables is stored under the special key "-TOTAL-".
- *
- * @param master The master defining the HBase root and file system.
- * @return A map for each table and its percentage.
- *
- * @throws IOException When scanning the directory fails.
- */
- public static Map<String, Integer> getTableFragmentation(final HMaster master)
- throws IOException {
- final Map<String, Integer> frags = new HashMap<String, Integer>();
- int cfCountTotal = 0;
- int cfFragTotal = 0;
-
- MasterStorage<? extends StorageIdentifier> ms = master.getMasterStorage();
- for (TableName table: ms.getTables()) {
- int cfCount = 0;
- int cfFrag = 0;
- for (HRegionInfo hri: ms.getRegions(table)) {
- RegionStorage rfs = ms.getRegionStorage(hri);
- final Collection<String> families = rfs.getFamilies();
- for (String family: families) {
- cfCount++;
- cfCountTotal++;
- if (rfs.getStoreFiles(family).size() > 1) {
- cfFrag++;
- cfFragTotal++;
- }
- }
- }
- // compute percentage per table and store in result list
- frags.put(table.getNameAsString(),
- cfCount == 0? 0: Math.round((float) cfFrag / cfCount * 100));
- }
- // set overall percentage for all tables
- frags.put("-TOTAL-",
- cfCountTotal == 0? 0: Math.round((float) cfFragTotal / cfCountTotal * 100));
- return frags;
- }
-
/**
* Returns the {@link org.apache.hadoop.fs.Path} object representing the table directory under
* path rootdir
http://git-wip-us.apache.org/repos/asf/hbase/blob/53f4ec9e/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
index 86b70c7..93df9dd 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
@@ -25,7 +25,6 @@
import="java.util.List"
import="java.util.LinkedHashMap"
import="java.util.Map"
- import="java.util.Set"
import="java.util.Collection"
import="java.util.Collections"
import="java.util.Comparator"
@@ -40,7 +39,6 @@
import="org.apache.hadoop.hbase.master.HMaster"
import="org.apache.hadoop.hbase.zookeeper.MetaTableLocator"
import="org.apache.hadoop.hbase.util.Bytes"
- import="org.apache.hadoop.hbase.util.FSUtils"
import="org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos"
import="org.apache.hadoop.hbase.protobuf.generated.HBaseProtos"
import="org.apache.hadoop.hbase.TableName"
@@ -69,7 +67,7 @@
HConstants.DEFAULT_META_REPLICA_NUM);
Map<String, Integer> frags = null;
if (showFragmentation) {
- frags = FSUtils.getTableFragmentation(master);
+ frags = master.getMasterStorage().getTableFragmentation();
}
String action = request.getParameter("action");
String key = request.getParameter("key");
http://git-wip-us.apache.org/repos/asf/hbase/blob/53f4ec9e/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index 467e903..45d8aae 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -81,6 +81,7 @@ import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.fs.HFileSystem;
+import org.apache.hadoop.hbase.fs.legacy.cleaner.HFileCleaner;
import org.apache.hadoop.hbase.io.compress.Compression;
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
@@ -1153,6 +1154,19 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
MiniHBaseCluster.class.getName());
}
+ public HFileCleaner getHFileCleanerChore() {
+ HFileCleaner hfileCleaner = null;
+ HMaster master = getMiniHBaseCluster().getMaster();
+ Iterable<ScheduledChore> chores = master.getMasterStorage().getChores(master, null);
+ for (ScheduledChore chore : chores) {
+ if (chore instanceof HFileCleaner) {
+ hfileCleaner = (HFileCleaner) chore;
+ break;
+ }
+ }
+ return hfileCleaner;
+ }
+
/**
* Stops mini hbase, zk, and hdfs clusters.
* @throws IOException
http://git-wip-us.apache.org/repos/asf/hbase/blob/53f4ec9e/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java
index 305d2e8..8321f29 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java
@@ -33,13 +33,13 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.hbase.backup.HFileArchiver;
import org.apache.hadoop.hbase.ChoreService;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
+import org.apache.hadoop.hbase.fs.legacy.cleaner.HFileCleaner;
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
@@ -48,7 +48,6 @@ import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.HFileArchiveTestingUtil;
import org.apache.hadoop.hbase.util.HFileArchiveUtil;
import org.apache.hadoop.hbase.util.StoppableImplementation;
import org.junit.After;
@@ -59,8 +58,8 @@ import org.junit.Test;
import org.junit.experimental.categories.Category;
/**
- * Test that the {@link HFileArchiver} correctly removes all the parts of a region when cleaning up
- * a region
+ * Test that the {@link HFileArchiver} correctly removes all the
+ * parts of a region when cleaning up a region
*/
@Category({MediumTests.class, MiscTests.class})
public class TestHFileArchiving {
@@ -78,7 +77,10 @@ public class TestHFileArchiving {
UTIL.startMiniCluster();
// We don't want the cleaner to remove files. The tests do that.
- UTIL.getMiniHBaseCluster().getMaster().getHFileCleaner().cancel(true);
+ HFileCleaner chore = UTIL.getHFileCleanerChore();
+ if (chore != null) {
+ chore.cancel(true);
+ }
}
private static void setupConf(Configuration conf) {
@@ -386,8 +388,7 @@ public class TestHFileArchiving {
try {
// Try to archive the file
- HFileArchiver.archiveRegion(fs, rootDir,
- sourceRegionDir.getParent(), sourceRegionDir);
+ HFileArchiver.archiveRegion(fs, rootDir, sourceRegionDir.getParent(), sourceRegionDir);
// The archiver succeded, the file is no longer in the original location
// but it's in the archive location.
http://git-wip-us.apache.org/repos/asf/hbase/blob/53f4ec9e/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java
index 64139ee..0f920d7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java
@@ -42,8 +42,8 @@ import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate;
-import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
+import org.apache.hadoop.hbase.fs.legacy.cleaner.BaseHFileCleanerDelegate;
+import org.apache.hadoop.hbase.fs.legacy.cleaner.HFileCleaner;
import org.apache.hadoop.hbase.regionserver.CompactedHFilesDischarger;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.Region;
http://git-wip-us.apache.org/repos/asf/hbase/blob/53f4ec9e/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClient.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClient.java
index 65a67d0..500b26f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClient.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.NamespaceNotFoundException;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.fs.legacy.cleaner.HFileCleaner;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException;
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
@@ -249,7 +250,10 @@ public class TestCloneSnapshotFromClient {
// ==========================================================================
private void waitCleanerRun() throws InterruptedException {
- TEST_UTIL.getMiniHBaseCluster().getMaster().getHFileCleaner().choreForTesting();
+ HFileCleaner chore = TEST_UTIL.getHFileCleanerChore();
+ if (chore != null) {
+ chore.choreForTesting();
+ }
}
protected void verifyRowCount(final HBaseTestingUtility util, final TableName tableName,
http://git-wip-us.apache.org/repos/asf/hbase/blob/53f4ec9e/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
index 9b4206c..0658904 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.fs.MasterStorage;
import org.apache.hadoop.hbase.fs.RegionStorage.StoreFileVisitor;
+import org.apache.hadoop.hbase.fs.legacy.cleaner.HFileCleaner;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException;
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
@@ -298,7 +299,10 @@ public class TestRestoreSnapshotFromClient {
// Helpers
// ==========================================================================
private void waitCleanerRun() throws InterruptedException {
- TEST_UTIL.getMiniHBaseCluster().getMaster().getHFileCleaner().choreForTesting();
+ HFileCleaner chore = TEST_UTIL.getHFileCleanerChore();
+ if (chore != null) {
+ chore.choreForTesting();
+ }
}
private Set<String> getFamiliesFromFS(final TableName tableName) throws IOException {
http://git-wip-us.apache.org/repos/asf/hbase/blob/53f4ec9e/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/legacy/cleaner/TestCleanerChore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/legacy/cleaner/TestCleanerChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/legacy/cleaner/TestCleanerChore.java
new file mode 100644
index 0000000..efa69e2
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/legacy/cleaner/TestCleanerChore.java
@@ -0,0 +1,319 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.fs.legacy.cleaner;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.Stoppable;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.StoppableImplementation;
+import org.junit.After;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.Mockito;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+
+@Category({MasterTests.class, SmallTests.class})
+public class TestCleanerChore {
+
+ private static final Log LOG = LogFactory.getLog(TestCleanerChore.class);
+ private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+ @After
+ public void cleanup() throws Exception {
+ // delete and recreate the test directory, ensuring a clean test dir between tests
+ UTIL.cleanupTestDir();
+}
+
+
+ @Test
+ public void testSavesFilesOnRequest() throws Exception {
+ Stoppable stop = new StoppableImplementation();
+ Configuration conf = UTIL.getConfiguration();
+ Path testDir = UTIL.getDataTestDir();
+ FileSystem fs = UTIL.getTestFileSystem();
+ String confKey = "hbase.test.cleaner.delegates";
+ conf.set(confKey, NeverDelete.class.getName());
+
+ AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey);
+
+ // create the directory layout in the directory to clean
+ Path parent = new Path(testDir, "parent");
+ Path file = new Path(parent, "someFile");
+ fs.mkdirs(parent);
+ // touch a new file
+ fs.create(file).close();
+ assertTrue("Test file didn't get created.", fs.exists(file));
+
+ // run the chore
+ chore.chore();
+
+ // verify all the files got deleted
+ assertTrue("File didn't get deleted", fs.exists(file));
+ assertTrue("Empty directory didn't get deleted", fs.exists(parent));
+ }
+
+ @Test
+ public void testDeletesEmptyDirectories() throws Exception {
+ Stoppable stop = new StoppableImplementation();
+ Configuration conf = UTIL.getConfiguration();
+ Path testDir = UTIL.getDataTestDir();
+ FileSystem fs = UTIL.getTestFileSystem();
+ String confKey = "hbase.test.cleaner.delegates";
+ conf.set(confKey, AlwaysDelete.class.getName());
+
+ AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey);
+
+ // create the directory layout in the directory to clean
+ Path parent = new Path(testDir, "parent");
+ Path child = new Path(parent, "child");
+ Path emptyChild = new Path(parent, "emptyChild");
+ Path file = new Path(child, "someFile");
+ fs.mkdirs(child);
+ fs.mkdirs(emptyChild);
+ // touch a new file
+ fs.create(file).close();
+ // also create a file in the top level directory
+ Path topFile = new Path(testDir, "topFile");
+ fs.create(topFile).close();
+ assertTrue("Test file didn't get created.", fs.exists(file));
+ assertTrue("Test file didn't get created.", fs.exists(topFile));
+
+ // run the chore
+ chore.chore();
+
+ // verify all the files got deleted
+ assertFalse("File didn't get deleted", fs.exists(topFile));
+ assertFalse("File didn't get deleted", fs.exists(file));
+ assertFalse("Empty directory didn't get deleted", fs.exists(child));
+ assertFalse("Empty directory didn't get deleted", fs.exists(parent));
+ }
+
+ /**
+ * Test to make sure that we don't attempt to ask the delegate whether or not we should preserve a
+ * directory.
+ * @throws Exception on failure
+ */
+ @Test
+ public void testDoesNotCheckDirectories() throws Exception {
+ Stoppable stop = new StoppableImplementation();
+ Configuration conf = UTIL.getConfiguration();
+ Path testDir = UTIL.getDataTestDir();
+ FileSystem fs = UTIL.getTestFileSystem();
+ String confKey = "hbase.test.cleaner.delegates";
+ conf.set(confKey, AlwaysDelete.class.getName());
+
+ AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey);
+ // spy on the delegate to ensure that we don't check for directories
+ AlwaysDelete delegate = (AlwaysDelete) chore.cleanersChain.get(0);
+ AlwaysDelete spy = Mockito.spy(delegate);
+ chore.cleanersChain.set(0, spy);
+
+ // create the directory layout in the directory to clean
+ Path parent = new Path(testDir, "parent");
+ Path file = new Path(parent, "someFile");
+ fs.mkdirs(parent);
+ assertTrue("Test parent didn't get created.", fs.exists(parent));
+ // touch a new file
+ fs.create(file).close();
+ assertTrue("Test file didn't get created.", fs.exists(file));
+
+ FileStatus fStat = fs.getFileStatus(parent);
+ chore.chore();
+ // make sure we never checked the directory
+ Mockito.verify(spy, Mockito.never()).isFileDeletable(fStat);
+ Mockito.reset(spy);
+ }
+
+ @Test
+ public void testStoppedCleanerDoesNotDeleteFiles() throws Exception {
+ Stoppable stop = new StoppableImplementation();
+ Configuration conf = UTIL.getConfiguration();
+ Path testDir = UTIL.getDataTestDir();
+ FileSystem fs = UTIL.getTestFileSystem();
+ String confKey = "hbase.test.cleaner.delegates";
+ conf.set(confKey, AlwaysDelete.class.getName());
+
+ AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey);
+
+ // also create a file in the top level directory
+ Path topFile = new Path(testDir, "topFile");
+ fs.create(topFile).close();
+ assertTrue("Test file didn't get created.", fs.exists(topFile));
+
+ // stop the chore
+ stop.stop("testing stop");
+
+ // run the chore
+ chore.chore();
+
+ // test that the file still exists
+ assertTrue("File got deleted while chore was stopped", fs.exists(topFile));
+ }
+
+ /**
+ * While cleaning a directory, all the files in the directory may be deleted, but there may be
+ * another file added, in which case the directory shouldn't be deleted.
+ * @throws IOException on failure
+ */
+ @Test
+ public void testCleanerDoesNotDeleteDirectoryWithLateAddedFiles() throws IOException {
+ Stoppable stop = new StoppableImplementation();
+ Configuration conf = UTIL.getConfiguration();
+ final Path testDir = UTIL.getDataTestDir();
+ final FileSystem fs = UTIL.getTestFileSystem();
+ String confKey = "hbase.test.cleaner.delegates";
+ conf.set(confKey, AlwaysDelete.class.getName());
+
+ AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey);
+ // spy on the delegate to ensure that we don't check for directories
+ AlwaysDelete delegate = (AlwaysDelete) chore.cleanersChain.get(0);
+ AlwaysDelete spy = Mockito.spy(delegate);
+ chore.cleanersChain.set(0, spy);
+
+ // create the directory layout in the directory to clean
+ final Path parent = new Path(testDir, "parent");
+ Path file = new Path(parent, "someFile");
+ fs.mkdirs(parent);
+ // touch a new file
+ fs.create(file).close();
+ assertTrue("Test file didn't get created.", fs.exists(file));
+ final Path addedFile = new Path(parent, "addedFile");
+
+ // when we attempt to delete the original file, add another file in the same directory
+ Mockito.doAnswer(new Answer<Boolean>() {
+ @Override
+ public Boolean answer(InvocationOnMock invocation) throws Throwable {
+ fs.create(addedFile).close();
+ FSUtils.logFileSystemState(fs, testDir, LOG);
+ return (Boolean) invocation.callRealMethod();
+ }
+ }).when(spy).isFileDeletable(Mockito.any(FileStatus.class));
+
+ // run the chore
+ chore.chore();
+
+ // make sure all the directories + added file exist, but the original file is deleted
+ assertTrue("Added file unexpectedly deleted", fs.exists(addedFile));
+ assertTrue("Parent directory deleted unexpectedly", fs.exists(parent));
+ assertFalse("Original file unexpectedly retained", fs.exists(file));
+ Mockito.verify(spy, Mockito.times(1)).isFileDeletable(Mockito.any(FileStatus.class));
+ Mockito.reset(spy);
+ }
+
+ /**
+ * The cleaner runs in a loop, where it first checks to see all the files under a directory can be
+ * deleted. If they all can, then we try to delete the directory. However, a file may be added
+ * that directory to after the original check. This ensures that we don't accidentally delete that
+ * directory on and don't get spurious IOExceptions.
+ * <p>
+ * This was from HBASE-7465.
+ * @throws Exception on failure
+ */
+ @Test
+ public void testNoExceptionFromDirectoryWithRacyChildren() throws Exception {
+ Stoppable stop = new StoppableImplementation();
+ // need to use a localutil to not break the rest of the test that runs on the local FS, which
+ // gets hosed when we start to use a minicluster.
+ HBaseTestingUtility localUtil = new HBaseTestingUtility();
+ Configuration conf = localUtil.getConfiguration();
+ final Path testDir = UTIL.getDataTestDir();
+ final FileSystem fs = UTIL.getTestFileSystem();
+ LOG.debug("Writing test data to: " + testDir);
+ String confKey = "hbase.test.cleaner.delegates";
+ conf.set(confKey, AlwaysDelete.class.getName());
+
+ AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey);
+ // spy on the delegate to ensure that we don't check for directories
+ AlwaysDelete delegate = (AlwaysDelete) chore.cleanersChain.get(0);
+ AlwaysDelete spy = Mockito.spy(delegate);
+ chore.cleanersChain.set(0, spy);
+
+ // create the directory layout in the directory to clean
+ final Path parent = new Path(testDir, "parent");
+ Path file = new Path(parent, "someFile");
+ fs.mkdirs(parent);
+ // touch a new file
+ fs.create(file).close();
+ assertTrue("Test file didn't get created.", fs.exists(file));
+ final Path racyFile = new Path(parent, "addedFile");
+
+ // when we attempt to delete the original file, add another file in the same directory
+ Mockito.doAnswer(new Answer<Boolean>() {
+ @Override
+ public Boolean answer(InvocationOnMock invocation) throws Throwable {
+ fs.create(racyFile).close();
+ FSUtils.logFileSystemState(fs, testDir, LOG);
+ return (Boolean) invocation.callRealMethod();
+ }
+ }).when(spy).isFileDeletable(Mockito.any(FileStatus.class));
+
+ // attempt to delete the directory, which
+ if (chore.checkAndDeleteDirectory(parent)) {
+ throw new Exception(
+ "Reported success deleting directory, should have failed when adding file mid-iteration");
+ }
+
+ // make sure all the directories + added file exist, but the original file is deleted
+ assertTrue("Added file unexpectedly deleted", fs.exists(racyFile));
+ assertTrue("Parent directory deleted unexpectedly", fs.exists(parent));
+ assertFalse("Original file unexpectedly retained", fs.exists(file));
+ Mockito.verify(spy, Mockito.times(1)).isFileDeletable(Mockito.any(FileStatus.class));
+ }
+
+ private static class AllValidPaths extends CleanerChore<BaseHFileCleanerDelegate> {
+
+ public AllValidPaths(String name, Stoppable s, Configuration conf, FileSystem fs,
+ Path oldFileDir, String confkey) {
+ super(name, Integer.MAX_VALUE, s, conf, fs, oldFileDir, confkey);
+ }
+
+ // all paths are valid
+ @Override
+ protected boolean validate(Path file) {
+ return true;
+ }
+ };
+
+ public static class AlwaysDelete extends BaseHFileCleanerDelegate {
+ @Override
+ public boolean isFileDeletable(FileStatus fStat) {
+ return true;
+ }
+ }
+
+ public static class NeverDelete extends BaseHFileCleanerDelegate {
+ @Override
+ public boolean isFileDeletable(FileStatus fStat) {
+ return false;
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/53f4ec9e/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/legacy/cleaner/TestHFileCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/legacy/cleaner/TestHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/legacy/cleaner/TestHFileCleaner.java
new file mode 100644
index 0000000..01bc4bf
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/legacy/cleaner/TestHFileCleaner.java
@@ -0,0 +1,263 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.fs.legacy.cleaner;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.ChoreService;
+import org.apache.hadoop.hbase.CoordinatedStateManager;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.EnvironmentEdge;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({MasterTests.class, MediumTests.class})
+public class TestHFileCleaner {
+ private static final Log LOG = LogFactory.getLog(TestHFileCleaner.class);
+
+ private final static HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ // have to use a minidfs cluster because the localfs doesn't modify file times correctly
+ UTIL.startMiniDFSCluster(1);
+ }
+
+ @AfterClass
+ public static void shutdownCluster() throws IOException {
+ UTIL.shutdownMiniDFSCluster();
+ }
+
+ @Test
+ public void testTTLCleaner() throws IOException, InterruptedException {
+ FileSystem fs = UTIL.getDFSCluster().getFileSystem();
+ Path root = UTIL.getDataTestDirOnTestFS();
+ Path file = new Path(root, "file");
+ fs.createNewFile(file);
+ long createTime = System.currentTimeMillis();
+ assertTrue("Test file not created!", fs.exists(file));
+ TimeToLiveHFileCleaner cleaner = new TimeToLiveHFileCleaner();
+ // update the time info for the file, so the cleaner removes it
+ fs.setTimes(file, createTime - 100, -1);
+ Configuration conf = UTIL.getConfiguration();
+ conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, 100);
+ cleaner.setConf(conf);
+ assertTrue("File not set deletable - check mod time:" + getFileStats(file, fs)
+ + " with create time:" + createTime, cleaner.isFileDeletable(fs.getFileStatus(file)));
+ }
+
+ /**
+ * @param file to check
+ * @return loggable information about the file
+ */
+ private String getFileStats(Path file, FileSystem fs) throws IOException {
+ FileStatus status = fs.getFileStatus(file);
+ return "File" + file + ", mtime:" + status.getModificationTime() + ", atime:"
+ + status.getAccessTime();
+ }
+
+ @Test(timeout = 60 *1000)
+ public void testHFileCleaning() throws Exception {
+ final EnvironmentEdge originalEdge = EnvironmentEdgeManager.getDelegate();
+ String prefix = "someHFileThatWouldBeAUUID";
+ Configuration conf = UTIL.getConfiguration();
+ // set TTL
+ long ttl = 2000;
+ conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
+ "org.apache.hadoop.hbase.fs.legacy.cleaner.TimeToLiveHFileCleaner");
+ conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, ttl);
+ Server server = new DummyServer();
+ Path archivedHfileDir = new Path(UTIL.getDataTestDirOnTestFS(), HConstants.HFILE_ARCHIVE_DIRECTORY);
+ FileSystem fs = FileSystem.get(conf);
+ HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir);
+
+ // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
+ final long createTime = System.currentTimeMillis();
+ fs.delete(archivedHfileDir, true);
+ fs.mkdirs(archivedHfileDir);
+ // Case 1: 1 invalid file, which should be deleted directly
+ fs.createNewFile(new Path(archivedHfileDir, "dfd-dfd"));
+ // Case 2: 1 "recent" file, not even deletable for the first log cleaner
+ // (TimeToLiveLogCleaner), so we are not going down the chain
+ LOG.debug("Now is: " + createTime);
+ for (int i = 1; i < 32; i++) {
+ // Case 3: old files which would be deletable for the first log cleaner
+ // (TimeToLiveHFileCleaner),
+ Path fileName = new Path(archivedHfileDir, (prefix + "." + (createTime + i)));
+ fs.createNewFile(fileName);
+ // set the creation time past ttl to ensure that it gets removed
+ fs.setTimes(fileName, createTime - ttl - 1, -1);
+ LOG.debug("Creating " + getFileStats(fileName, fs));
+ }
+
+ // Case 2: 1 newer file, not even deletable for the first log cleaner
+ // (TimeToLiveLogCleaner), so we are not going down the chain
+ Path saved = new Path(archivedHfileDir, prefix + ".00000000000");
+ fs.createNewFile(saved);
+ // set creation time within the ttl
+ fs.setTimes(saved, createTime - ttl / 2, -1);
+ LOG.debug("Creating " + getFileStats(saved, fs));
+ for (FileStatus stat : fs.listStatus(archivedHfileDir)) {
+ LOG.debug(stat.getPath().toString());
+ }
+
+ assertEquals(33, fs.listStatus(archivedHfileDir).length);
+
+ // set a custom edge manager to handle time checking
+ EnvironmentEdge setTime = new EnvironmentEdge() {
+ @Override
+ public long currentTime() {
+ return createTime;
+ }
+ };
+ EnvironmentEdgeManager.injectEdge(setTime);
+
+ // run the chore
+ cleaner.chore();
+
+ // ensure we only end up with the saved file
+ assertEquals(1, fs.listStatus(archivedHfileDir).length);
+
+ for (FileStatus file : fs.listStatus(archivedHfileDir)) {
+ LOG.debug("Kept hfiles: " + file.getPath().getName());
+ }
+
+ // reset the edge back to the original edge
+ EnvironmentEdgeManager.injectEdge(originalEdge);
+ }
+
+ @Test
+ public void testRemovesEmptyDirectories() throws Exception {
+ Configuration conf = UTIL.getConfiguration();
+ // no cleaner policies = delete all files
+ conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, "");
+ Server server = new DummyServer();
+ Path archivedHfileDir = new Path(UTIL.getDataTestDirOnTestFS(), HConstants.HFILE_ARCHIVE_DIRECTORY);
+
+ // setup the cleaner
+ FileSystem fs = UTIL.getDFSCluster().getFileSystem();
+ HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir);
+
+ // make all the directories for archiving files
+ Path table = new Path(archivedHfileDir, "table");
+ Path region = new Path(table, "regionsomthing");
+ Path family = new Path(region, "fam");
+ Path file = new Path(family, "file12345");
+ fs.mkdirs(family);
+ if (!fs.exists(family)) throw new RuntimeException("Couldn't create test family:" + family);
+ fs.create(file).close();
+ if (!fs.exists(file)) throw new RuntimeException("Test file didn't get created:" + file);
+
+ // run the chore to cleanup the files (and the directories above it)
+ cleaner.chore();
+
+ // make sure all the parent directories get removed
+ assertFalse("family directory not removed for empty directory", fs.exists(family));
+ assertFalse("region directory not removed for empty directory", fs.exists(region));
+ assertFalse("table directory not removed for empty directory", fs.exists(table));
+ assertTrue("archive directory", fs.exists(archivedHfileDir));
+ }
+
+ static class DummyServer implements Server {
+
+ @Override
+ public Configuration getConfiguration() {
+ return UTIL.getConfiguration();
+ }
+
+ @Override
+ public ZooKeeperWatcher getZooKeeper() {
+ try {
+ return new ZooKeeperWatcher(getConfiguration(), "dummy server", this);
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ return null;
+ }
+
+ @Override
+ public CoordinatedStateManager getCoordinatedStateManager() {
+ return null;
+ }
+
+ @Override
+ public ClusterConnection getConnection() {
+ return null;
+ }
+
+ @Override
+ public MetaTableLocator getMetaTableLocator() {
+ return null;
+ }
+
+ @Override
+ public ServerName getServerName() {
+ return ServerName.valueOf("regionserver,60020,000000");
+ }
+
+ @Override
+ public void abort(String why, Throwable e) {
+ }
+
+ @Override
+ public boolean isAborted() {
+ return false;
+ }
+
+ @Override
+ public void stop(String why) {
+ }
+
+ @Override
+ public boolean isStopped() {
+ return false;
+ }
+
+ @Override
+ public ChoreService getChoreService() {
+ return null;
+ }
+
+ @Override
+ public ClusterConnection getClusterConnection() {
+ // TODO Auto-generated method stub
+ return null;
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/53f4ec9e/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/legacy/cleaner/TestHFileLinkCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/legacy/cleaner/TestHFileLinkCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/legacy/cleaner/TestHFileLinkCleaner.java
new file mode 100644
index 0000000..998481a
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/legacy/cleaner/TestHFileLinkCleaner.java
@@ -0,0 +1,201 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.fs.legacy.cleaner;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.CategoryBasedTimeout;
+import org.apache.hadoop.hbase.ChoreService;
+import org.apache.hadoop.hbase.CoordinatedStateManager;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.io.HFileLink;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.HFileArchiveUtil;
+import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestRule;
+
+/**
+ * Test the HFileLink Cleaner.
+ * HFiles with links cannot be deleted until a link is present.
+ */
+@Category({MasterTests.class, MediumTests.class})
+public class TestHFileLinkCleaner {
+ @Rule public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()).
+ withLookingForStuckThread(true).build();
+
+ private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+
+ @Test
+ public void testHFileLinkCleaning() throws Exception {
+ Configuration conf = TEST_UTIL.getConfiguration();
+ FSUtils.setRootDir(conf, TEST_UTIL.getDataTestDir());
+ conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, HFileLinkCleaner.class.getName());
+ Path rootDir = FSUtils.getRootDir(conf);
+ FileSystem fs = FileSystem.get(conf);
+
+ final TableName tableName = TableName.valueOf("test-table");
+ final TableName tableLinkName = TableName.valueOf("test-link");
+ final String hfileName = "1234567890";
+ final String familyName = "cf";
+
+ HRegionInfo hri = new HRegionInfo(tableName);
+ HRegionInfo hriLink = new HRegionInfo(tableLinkName);
+
+ Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
+ Path archiveStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
+ tableName, hri.getEncodedName(), familyName);
+ Path archiveLinkStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
+ tableLinkName, hriLink.getEncodedName(), familyName);
+
+ // Create hfile /hbase/table-link/region/cf/getEncodedName.HFILE(conf);
+ Path familyPath = getFamilyDirPath(archiveDir, tableName, hri.getEncodedName(), familyName);
+ fs.mkdirs(familyPath);
+ Path hfilePath = new Path(familyPath, hfileName);
+ fs.createNewFile(hfilePath);
+
+ // Create link to hfile
+ Path familyLinkPath = getFamilyDirPath(rootDir, tableLinkName,
+ hriLink.getEncodedName(), familyName);
+ fs.mkdirs(familyLinkPath);
+ HFileLink.create(conf, fs, familyLinkPath, hri, hfileName);
+ Path linkBackRefDir = HFileLink.getBackReferencesDir(archiveStoreDir, hfileName);
+ assertTrue(fs.exists(linkBackRefDir));
+ FileStatus[] backRefs = fs.listStatus(linkBackRefDir);
+ assertEquals(1, backRefs.length);
+ Path linkBackRef = backRefs[0].getPath();
+
+ // Initialize cleaner
+ final long ttl = 1000;
+ conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, ttl);
+ Server server = new DummyServer();
+ HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archiveDir);
+
+ // Link backref cannot be removed
+ cleaner.chore();
+ assertTrue(fs.exists(linkBackRef));
+ assertTrue(fs.exists(hfilePath));
+
+ // Link backref can be removed
+ fs.rename(FSUtils.getTableDir(rootDir, tableLinkName),
+ FSUtils.getTableDir(archiveDir, tableLinkName));
+ cleaner.chore();
+ assertFalse("Link should be deleted", fs.exists(linkBackRef));
+
+ // HFile can be removed
+ Thread.sleep(ttl * 2);
+ cleaner.chore();
+ assertFalse("HFile should be deleted", fs.exists(hfilePath));
+
+ // Remove everything
+ for (int i = 0; i < 4; ++i) {
+ Thread.sleep(ttl * 2);
+ cleaner.chore();
+ }
+ assertFalse("HFile should be deleted", fs.exists(FSUtils.getTableDir(archiveDir, tableName)));
+ assertFalse("Link should be deleted", fs.exists(FSUtils.getTableDir(archiveDir, tableLinkName)));
+ }
+
+ private static Path getFamilyDirPath (final Path rootDir, final TableName table,
+ final String region, final String family) {
+ return new Path(new Path(FSUtils.getTableDir(rootDir, table), region), family);
+ }
+
+ static class DummyServer implements Server {
+
+ @Override
+ public Configuration getConfiguration() {
+ return TEST_UTIL.getConfiguration();
+ }
+
+ @Override
+ public ZooKeeperWatcher getZooKeeper() {
+ try {
+ return new ZooKeeperWatcher(getConfiguration(), "dummy server", this);
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ return null;
+ }
+
+ @Override
+ public CoordinatedStateManager getCoordinatedStateManager() {
+ return null;
+ }
+
+ @Override
+ public ClusterConnection getConnection() {
+ return null;
+ }
+
+ @Override
+ public MetaTableLocator getMetaTableLocator() {
+ return null;
+ }
+
+ @Override
+ public ServerName getServerName() {
+ return ServerName.valueOf("regionserver,60020,000000");
+ }
+
+ @Override
+ public void abort(String why, Throwable e) {}
+
+ @Override
+ public boolean isAborted() {
+ return false;
+ }
+
+ @Override
+ public void stop(String why) {}
+
+ @Override
+ public boolean isStopped() {
+ return false;
+ }
+
+ @Override
+ public ChoreService getChoreService() {
+ return null;
+ }
+
+ @Override
+ public ClusterConnection getClusterConnection() {
+ // TODO Auto-generated method stub
+ return null;
+ }
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/53f4ec9e/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/legacy/cleaner/TestLogsCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/legacy/cleaner/TestLogsCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/legacy/cleaner/TestLogsCleaner.java
new file mode 100644
index 0000000..d313748
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/legacy/cleaner/TestLogsCleaner.java
@@ -0,0 +1,308 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.fs.legacy.cleaner;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.spy;
+
+import java.io.IOException;
+import java.lang.reflect.Field;
+import java.net.URLEncoder;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+
+import com.google.common.collect.Lists;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.ChoreService;
+import org.apache.hadoop.hbase.CoordinatedStateManager;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.Waiter;
+import org.apache.hadoop.hbase.ZooKeeperConnectionException;
+import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.replication.ReplicationFactory;
+import org.apache.hadoop.hbase.replication.ReplicationQueues;
+import org.apache.hadoop.hbase.replication.ReplicationQueuesArguments;
+import org.apache.hadoop.hbase.replication.ReplicationQueuesClientZKImpl;
+import org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner;
+import org.apache.hadoop.hbase.replication.regionserver.Replication;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
+import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.data.Stat;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.Mockito;
+
+@Category({MasterTests.class, MediumTests.class})
+public class TestLogsCleaner {
+
+ private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+
+ /**
+ * @throws java.lang.Exception
+ */
+ @BeforeClass
+ public static void setUpBeforeClass() throws Exception {
+ TEST_UTIL.startMiniZKCluster();
+ }
+
+ /**
+ * @throws java.lang.Exception
+ */
+ @AfterClass
+ public static void tearDownAfterClass() throws Exception {
+ TEST_UTIL.shutdownMiniZKCluster();
+ }
+
+ @Test
+ public void testLogCleaning() throws Exception{
+ Configuration conf = TEST_UTIL.getConfiguration();
+ // set TTL
+ long ttl = 10000;
+ conf.setLong("hbase.master.logcleaner.ttl", ttl);
+ Replication.decorateMasterConfiguration(conf);
+ Server server = new DummyServer();
+ ReplicationQueues repQueues =
+ ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(conf, server, server.getZooKeeper()));
+ repQueues.init(server.getServerName().toString());
+ final Path oldLogDir = new Path(TEST_UTIL.getDataTestDir(),
+ HConstants.HREGION_OLDLOGDIR_NAME);
+ String fakeMachineName =
+ URLEncoder.encode(server.getServerName().toString(), "UTF8");
+
+ final FileSystem fs = FileSystem.get(conf);
+
+ // Create 2 invalid files, 1 "recent" file, 1 very new file and 30 old files
+ long now = System.currentTimeMillis();
+ fs.delete(oldLogDir, true);
+ fs.mkdirs(oldLogDir);
+ // Case 1: 2 invalid files, which would be deleted directly
+ fs.createNewFile(new Path(oldLogDir, "a"));
+ fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + "a"));
+ // Case 2: 1 "recent" file, not even deletable for the first log cleaner
+ // (TimeToLiveLogCleaner), so we are not going down the chain
+ System.out.println("Now is: " + now);
+ for (int i = 1; i < 31; i++) {
+ // Case 3: old files which would be deletable for the first log cleaner
+ // (TimeToLiveLogCleaner), and also for the second (ReplicationLogCleaner)
+ Path fileName = new Path(oldLogDir, fakeMachineName + "." + (now - i) );
+ fs.createNewFile(fileName);
+ // Case 4: put 3 old log files in ZK indicating that they are scheduled
+ // for replication so these files would pass the first log cleaner
+ // (TimeToLiveLogCleaner) but would be rejected by the second
+ // (ReplicationLogCleaner)
+ if (i % (30/3) == 1) {
+ repQueues.addLog(fakeMachineName, fileName.getName());
+ System.out.println("Replication log file: " + fileName);
+ }
+ }
+
+ // sleep for sometime to get newer modifcation time
+ Thread.sleep(ttl);
+ fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + now));
+
+ // Case 2: 1 newer file, not even deletable for the first log cleaner
+ // (TimeToLiveLogCleaner), so we are not going down the chain
+ fs.createNewFile(new Path(oldLogDir, fakeMachineName + "." + (now + 10000) ));
+
+ for (FileStatus stat : fs.listStatus(oldLogDir)) {
+ System.out.println(stat.getPath().toString());
+ }
+
+ assertEquals(34, fs.listStatus(oldLogDir).length);
+
+ LogCleaner cleaner = new LogCleaner(1000, server, conf, fs, oldLogDir);
+
+ cleaner.chore();
+
+ // We end up with the current log file, a newer one and the 3 old log
+ // files which are scheduled for replication
+ TEST_UTIL.waitFor(1000, new Waiter.Predicate<Exception>() {
+ @Override
+ public boolean evaluate() throws Exception {
+ return 5 == fs.listStatus(oldLogDir).length;
+ }
+ });
+
+ for (FileStatus file : fs.listStatus(oldLogDir)) {
+ System.out.println("Kept log files: " + file.getPath().getName());
+ }
+ }
+
+ @Test(timeout=5000)
+ public void testZnodeCversionChange() throws Exception {
+ Configuration conf = TEST_UTIL.getConfiguration();
+ ReplicationLogCleaner cleaner = new ReplicationLogCleaner();
+ cleaner.setConf(conf);
+
+ ReplicationQueuesClientZKImpl rqcMock = Mockito.mock(ReplicationQueuesClientZKImpl.class);
+ Mockito.when(rqcMock.getQueuesZNodeCversion()).thenReturn(1, 2, 3, 4);
+
+ Field rqc = ReplicationLogCleaner.class.getDeclaredField("replicationQueues");
+ rqc.setAccessible(true);
+
+ rqc.set(cleaner, rqcMock);
+
+ // This should return eventually when cversion stabilizes
+ cleaner.getDeletableFiles(new LinkedList<FileStatus>());
+ }
+
+ /**
+ * ReplicationLogCleaner should be able to ride over ZooKeeper errors without
+ * aborting.
+ */
+ @Test
+ public void testZooKeeperAbort() throws Exception {
+ Configuration conf = TEST_UTIL.getConfiguration();
+ ReplicationLogCleaner cleaner = new ReplicationLogCleaner();
+
+ List<FileStatus> dummyFiles = Lists.newArrayList(
+ new FileStatus(100, false, 3, 100, System.currentTimeMillis(), new Path("log1")),
+ new FileStatus(100, false, 3, 100, System.currentTimeMillis(), new Path("log2"))
+ );
+
+ FaultyZooKeeperWatcher faultyZK =
+ new FaultyZooKeeperWatcher(conf, "testZooKeeperAbort-faulty", null);
+ try {
+ faultyZK.init();
+ cleaner.setConf(conf, faultyZK);
+ // should keep all files due to a ConnectionLossException getting the queues znodes
+ Iterable<FileStatus> toDelete = cleaner.getDeletableFiles(dummyFiles);
+ assertFalse(toDelete.iterator().hasNext());
+ assertFalse(cleaner.isStopped());
+ } finally {
+ faultyZK.close();
+ }
+
+ // when zk is working both files should be returned
+ cleaner = new ReplicationLogCleaner();
+ ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "testZooKeeperAbort-normal", null);
+ try {
+ cleaner.setConf(conf, zkw);
+ Iterable<FileStatus> filesToDelete = cleaner.getDeletableFiles(dummyFiles);
+ Iterator<FileStatus> iter = filesToDelete.iterator();
+ assertTrue(iter.hasNext());
+ assertEquals(new Path("log1"), iter.next().getPath());
+ assertTrue(iter.hasNext());
+ assertEquals(new Path("log2"), iter.next().getPath());
+ assertFalse(iter.hasNext());
+ } finally {
+ zkw.close();
+ }
+ }
+
+ static class DummyServer implements Server {
+
+ @Override
+ public Configuration getConfiguration() {
+ return TEST_UTIL.getConfiguration();
+ }
+
+ @Override
+ public ZooKeeperWatcher getZooKeeper() {
+ try {
+ return new ZooKeeperWatcher(getConfiguration(), "dummy server", this);
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ return null;
+ }
+
+ @Override
+ public CoordinatedStateManager getCoordinatedStateManager() {
+ return null;
+ }
+
+ @Override
+ public ClusterConnection getConnection() {
+ return null;
+ }
+
+ @Override
+ public MetaTableLocator getMetaTableLocator() {
+ return null;
+ }
+
+ @Override
+ public ServerName getServerName() {
+ return ServerName.valueOf("regionserver,60020,000000");
+ }
+
+ @Override
+ public void abort(String why, Throwable e) {}
+
+ @Override
+ public boolean isAborted() {
+ return false;
+ }
+
+ @Override
+ public void stop(String why) {}
+
+ @Override
+ public boolean isStopped() {
+ return false;
+ }
+
+ @Override
+ public ChoreService getChoreService() {
+ return null;
+ }
+
+ @Override
+ public ClusterConnection getClusterConnection() {
+ // TODO Auto-generated method stub
+ return null;
+ }
+ }
+
+ static class FaultyZooKeeperWatcher extends ZooKeeperWatcher {
+ private RecoverableZooKeeper zk;
+
+ public FaultyZooKeeperWatcher(Configuration conf, String identifier, Abortable abortable)
+ throws ZooKeeperConnectionException, IOException {
+ super(conf, identifier, abortable);
+ }
+
+ public void init() throws Exception {
+ this.zk = spy(super.getRecoverableZooKeeper());
+ doThrow(new KeeperException.ConnectionLossException())
+ .when(zk).getData("/hbase/replication/rs", null, new Stat());
+ }
+
+ public RecoverableZooKeeper getRecoverableZooKeeper() {
+ return zk;
+ }
+ }
+}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hbase/blob/53f4ec9e/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/legacy/cleaner/TestSnapshotFromMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/legacy/cleaner/TestSnapshotFromMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/legacy/cleaner/TestSnapshotFromMaster.java
new file mode 100644
index 0000000..8b8592b
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/legacy/cleaner/TestSnapshotFromMaster.java
@@ -0,0 +1,394 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.fs.legacy.cleaner;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.fs.legacy.snapshot.SnapshotHFileCleaner;
+import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
+import org.apache.hadoop.hbase.regionserver.CompactedHFilesDischarger;
+import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil;
+import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test the master-related aspects of a snapshot
+ */
+@Category({MasterTests.class, MediumTests.class})
+public class TestSnapshotFromMaster {
+
+ private static final Log LOG = LogFactory.getLog(TestSnapshotFromMaster.class);
+ private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+ private static final int NUM_RS = 2;
+ private static Path rootDir;
+ private static FileSystem fs;
+ private static HMaster master;
+
+ // for hfile archiving test.
+ private static Path archiveDir;
+ private static final byte[] TEST_FAM = Bytes.toBytes("fam");
+ private static final TableName TABLE_NAME =
+ TableName.valueOf("test");
+ // refresh the cache every 1/2 second
+ private static final long cacheRefreshPeriod = 500;
+ private static final int blockingStoreFiles = 12;
+
+ /**
+ * Setup the config for the cluster
+ */
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ setupConf(UTIL.getConfiguration());
+ UTIL.startMiniCluster(NUM_RS);
+ fs = UTIL.getDFSCluster().getFileSystem();
+ master = UTIL.getMiniHBaseCluster().getMaster();
+// rootDir = master.getMasterStorage().getRootDir();
+ archiveDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);
+ }
+
+ private static void setupConf(Configuration conf) {
+ // disable the ui
+ conf.setInt("hbase.regionsever.info.port", -1);
+ // change the flush size to a small amount, regulating number of store files
+ conf.setInt("hbase.hregion.memstore.flush.size", 25000);
+ // so make sure we get a compaction when doing a load, but keep around some
+ // files in the store
+ conf.setInt("hbase.hstore.compaction.min", 2);
+ conf.setInt("hbase.hstore.compactionThreshold", 5);
+ // block writes if we get to 12 store files
+ conf.setInt("hbase.hstore.blockingStoreFiles", blockingStoreFiles);
+ // Ensure no extra cleaners on by default (e.g. TimeToLiveHFileCleaner)
+ conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, "");
+ conf.set(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS, "");
+ // Enable snapshot
+ conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
+ conf.setLong(SnapshotHFileCleaner.HFILE_CACHE_REFRESH_PERIOD_CONF_KEY, cacheRefreshPeriod);
+ conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
+ ConstantSizeRegionSplitPolicy.class.getName());
+ conf.setInt("hbase.hfile.compactions.cleaner.interval", 20 * 1000);
+
+ }
+
+ @Before
+ public void setup() throws Exception {
+ UTIL.createTable(TABLE_NAME, TEST_FAM);
+ master.getSnapshotManager().setSnapshotHandlerForTesting(TABLE_NAME, null);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ UTIL.deleteTable(TABLE_NAME);
+ SnapshotTestingUtils.deleteAllSnapshots(UTIL.getHBaseAdmin());
+ SnapshotTestingUtils.deleteArchiveDirectory(UTIL);
+ }
+
+ @AfterClass
+ public static void cleanupTest() throws Exception {
+ try {
+ UTIL.shutdownMiniCluster();
+ } catch (Exception e) {
+ // NOOP;
+ }
+ }
+
+// /**
+// * Test that the contract from the master for checking on a snapshot are valid.
+// * <p>
+// * <ol>
+// * <li>If a snapshot fails with an error, we expect to get the source error.</li>
+// * <li>If there is no snapshot name supplied, we should get an error.</li>
+// * <li>If asking about a snapshot has hasn't occurred, you should get an error.</li>
+// * </ol>
+// */
+// @Test(timeout = 300000)
+// public void testIsDoneContract() throws Exception {
+//
+// IsSnapshotDoneRequest.Builder builder = IsSnapshotDoneRequest.newBuilder();
+//
+// String snapshotName = "asyncExpectedFailureTest";
+//
+// // check that we get an exception when looking up snapshot where one hasn't happened
+// SnapshotTestingUtils.expectSnapshotDoneException(master, builder.build(),
+// UnknownSnapshotException.class);
+//
+// // and that we get the same issue, even if we specify a name
+// SnapshotDescription desc = SnapshotDescription.newBuilder()
+// .setName(snapshotName).setTable(TABLE_NAME.getNameAsString()).build();
+// builder.setSnapshot(desc);
+// SnapshotTestingUtils.expectSnapshotDoneException(master, builder.build(),
+// UnknownSnapshotException.class);
+//
+// // set a mock handler to simulate a snapshot
+// DisabledTableSnapshotHandler mockHandler = Mockito.mock(DisabledTableSnapshotHandler.class);
+// Mockito.when(mockHandler.getException()).thenReturn(null);
+// Mockito.when(mockHandler.getSnapshot()).thenReturn(desc);
+// Mockito.when(mockHandler.isFinished()).thenReturn(new Boolean(true));
+// Mockito.when(mockHandler.getCompletionTimestamp())
+// .thenReturn(EnvironmentEdgeManager.currentTime());
+//
+// master.getSnapshotManager()
+// .setSnapshotHandlerForTesting(TABLE_NAME, mockHandler);
+//
+// // if we do a lookup without a snapshot name, we should fail - you should always know your name
+// builder = IsSnapshotDoneRequest.newBuilder();
+// SnapshotTestingUtils.expectSnapshotDoneException(master, builder.build(),
+// UnknownSnapshotException.class);
+//
+// // then do the lookup for the snapshot that it is done
+// builder.setSnapshot(desc);
+// IsSnapshotDoneResponse response =
+// master.getMasterRpcServices().isSnapshotDone(null, builder.build());
+// assertTrue("Snapshot didn't complete when it should have.", response.getDone());
+//
+// // now try the case where we are looking for a snapshot we didn't take
+// builder.setSnapshot(SnapshotDescription.newBuilder().setName("Not A Snapshot").build());
+// SnapshotTestingUtils.expectSnapshotDoneException(master, builder.build(),
+// UnknownSnapshotException.class);
+//
+// // then create a snapshot to the fs and make sure that we can find it when checking done
+// snapshotName = "completed";
+// Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
+// desc = desc.toBuilder().setName(snapshotName).build();
+// SnapshotDescriptionUtils.writeSnapshotInfo(desc, snapshotDir, fs);
+//
+// builder.setSnapshot(desc);
+// response = master.getMasterRpcServices().isSnapshotDone(null, builder.build());
+// assertTrue("Completed, on-disk snapshot not found", response.getDone());
+// }
+//
+// @Test(timeout = 300000)
+// public void testGetCompletedSnapshots() throws Exception {
+// // first check when there are no snapshots
+// GetCompletedSnapshotsRequest request = GetCompletedSnapshotsRequest.newBuilder().build();
+// GetCompletedSnapshotsResponse response =
+// master.getMasterRpcServices().getCompletedSnapshots(null, request);
+// assertEquals("Found unexpected number of snapshots", 0, response.getSnapshotsCount());
+//
+// // write one snapshot to the fs
+// String snapshotName = "completed";
+// Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
+// SnapshotDescription snapshot = SnapshotDescription.newBuilder().setName(snapshotName).build();
+// SnapshotDescriptionUtils.writeSnapshotInfo(snapshot, snapshotDir, fs);
+//
+// // check that we get one snapshot
+// response = master.getMasterRpcServices().getCompletedSnapshots(null, request);
+// assertEquals("Found unexpected number of snapshots", 1, response.getSnapshotsCount());
+// List<SnapshotDescription> snapshots = response.getSnapshotsList();
+// List<SnapshotDescription> expected = Lists.newArrayList(snapshot);
+// assertEquals("Returned snapshots don't match created snapshots", expected, snapshots);
+//
+// // write a second snapshot
+// snapshotName = "completed_two";
+// snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
+// snapshot = SnapshotDescription.newBuilder().setName(snapshotName).build();
+// SnapshotDescriptionUtils.writeSnapshotInfo(snapshot, snapshotDir, fs);
+// expected.add(snapshot);
+//
+// // check that we get one snapshot
+// response = master.getMasterRpcServices().getCompletedSnapshots(null, request);
+// assertEquals("Found unexpected number of snapshots", 2, response.getSnapshotsCount());
+// snapshots = response.getSnapshotsList();
+// assertEquals("Returned snapshots don't match created snapshots", expected, snapshots);
+// }
+//
+// @Test(timeout = 300000)
+// public void testDeleteSnapshot() throws Exception {
+//
+// String snapshotName = "completed";
+// SnapshotDescription snapshot = SnapshotDescription.newBuilder().setName(snapshotName).build();
+//
+// DeleteSnapshotRequest request = DeleteSnapshotRequest.newBuilder().setSnapshot(snapshot)
+// .build();
+// try {
+// master.getMasterRpcServices().deleteSnapshot(null, request);
+// fail("Master didn't throw exception when attempting to delete snapshot that doesn't exist");
+// } catch (ServiceException e) {
+// LOG.debug("Correctly failed delete of non-existant snapshot:" + e.getMessage());
+// }
+//
+// // write one snapshot to the fs
+// Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
+// SnapshotDescriptionUtils.writeSnapshotInfo(snapshot, snapshotDir, fs);
+//
+// // then delete the existing snapshot,which shouldn't cause an exception to be thrown
+// master.getMasterRpcServices().deleteSnapshot(null, request);
+// }
+
+ /**
+ * Test that the snapshot hfile archive cleaner works correctly. HFiles that are in snapshots
+ * should be retained, while those that are not in a snapshot should be deleted.
+ * @throws Exception on failure
+ */
+ @Test(timeout = 300000)
+ public void testSnapshotHFileArchiving() throws Exception {
+ Admin admin = UTIL.getHBaseAdmin();
+ // make sure we don't fail on listing snapshots
+ SnapshotTestingUtils.assertNoSnapshots(admin);
+
+ // recreate test table with disabled compactions; otherwise compaction may happen before
+ // snapshot, the call after snapshot will be a no-op and checks will fail
+ UTIL.deleteTable(TABLE_NAME);
+ HTableDescriptor htd = new HTableDescriptor(TABLE_NAME);
+ htd.setCompactionEnabled(false);
+ UTIL.createTable(htd, new byte[][] { TEST_FAM }, null);
+
+ // load the table
+ for (int i = 0; i < blockingStoreFiles / 2; i ++) {
+ UTIL.loadTable(UTIL.getConnection().getTable(TABLE_NAME), TEST_FAM);
+ UTIL.flush(TABLE_NAME);
+ }
+
+ // disable the table so we can take a snapshot
+ admin.disableTable(TABLE_NAME);
+ htd.setCompactionEnabled(true);
+
+ // take a snapshot of the table
+ String snapshotName = "snapshot";
+ byte[] snapshotNameBytes = Bytes.toBytes(snapshotName);
+ admin.snapshot(snapshotNameBytes, TABLE_NAME);
+
+ LOG.info("After snapshot File-System state");
+ FSUtils.logFileSystemState(fs, rootDir, LOG);
+
+ // ensure we only have one snapshot
+ SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshotNameBytes, TABLE_NAME);
+
+ // enable compactions now
+ admin.modifyTable(TABLE_NAME, htd);
+
+ // renable the table so we can compact the regions
+ admin.enableTable(TABLE_NAME);
+
+ // compact the files so we get some archived files for the table we just snapshotted
+ List<HRegion> regions = UTIL.getHBaseCluster().getRegions(TABLE_NAME);
+ for (HRegion region : regions) {
+ region.waitForFlushesAndCompactions(); // enable can trigger a compaction, wait for it.
+ region.compactStores(); // min is 2 so will compact and archive
+ }
+ List<RegionServerThread> regionServerThreads = UTIL.getMiniHBaseCluster()
+ .getRegionServerThreads();
+ HRegionServer hrs = null;
+ for (RegionServerThread rs : regionServerThreads) {
+ if (!rs.getRegionServer().getOnlineRegions(TABLE_NAME).isEmpty()) {
+ hrs = rs.getRegionServer();
+ break;
+ }
+ }
+ CompactedHFilesDischarger cleaner = new CompactedHFilesDischarger(100, null, hrs, false);
+ cleaner.chore();
+ LOG.info("After compaction File-System state");
+ FSUtils.logFileSystemState(fs, rootDir, LOG);
+
+ // make sure the cleaner has run
+ LOG.debug("Running hfile cleaners");
+ ensureHFileCleanersRun();
+ LOG.info("After cleaners File-System state: " + rootDir);
+ FSUtils.logFileSystemState(fs, rootDir, LOG);
+
+ // get the snapshot files for the table
+ Path snapshotTable = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
+ Set<String> snapshotHFiles = SnapshotReferenceUtil.getHFileNames(
+ UTIL.getConfiguration(), fs, snapshotTable);
+ // check that the files in the archive contain the ones that we need for the snapshot
+ LOG.debug("Have snapshot hfiles:");
+ for (String fileName : snapshotHFiles) {
+ LOG.debug(fileName);
+ }
+ // get the archived files for the table
+ Collection<String> archives = getHFiles(archiveDir, fs, TABLE_NAME);
+
+ // get the hfiles for the table
+ Collection<String> hfiles = getHFiles(rootDir, fs, TABLE_NAME);
+
+ // and make sure that there is a proper subset
+ for (String fileName : snapshotHFiles) {
+ boolean exist = archives.contains(fileName) || hfiles.contains(fileName);
+ assertTrue("Archived hfiles " + archives
+ + " and table hfiles " + hfiles + " is missing snapshot file:" + fileName, exist);
+ }
+
+ // delete the existing snapshot
+ admin.deleteSnapshot(snapshotNameBytes);
+ SnapshotTestingUtils.assertNoSnapshots(admin);
+
+ // make sure that we don't keep around the hfiles that aren't in a snapshot
+ // make sure we wait long enough to refresh the snapshot hfile
+ List<BaseHFileCleanerDelegate> delegates = UTIL.getHFileCleanerChore().cleanersChain;
+ for (BaseHFileCleanerDelegate delegate: delegates) {
+ if (delegate instanceof SnapshotHFileCleaner) {
+ ((SnapshotHFileCleaner)delegate).getFileCacheForTesting().triggerCacheRefreshForTesting();
+ }
+ }
+ // run the cleaner again
+ LOG.debug("Running hfile cleaners");
+ ensureHFileCleanersRun();
+ LOG.info("After delete snapshot cleaners run File-System state");
+ FSUtils.logFileSystemState(fs, rootDir, LOG);
+
+ archives = getHFiles(archiveDir, fs, TABLE_NAME);
+ assertEquals("Still have some hfiles in the archive, when their snapshot has been deleted.", 0,
+ archives.size());
+ }
+
+ /**
+ * @return all the HFiles for a given table in the specified dir
+ * @throws IOException on expected failure
+ */
+ private final Collection<String> getHFiles(Path dir, FileSystem fs, TableName tableName) throws IOException {
+ Path tableDir = FSUtils.getTableDir(dir, tableName);
+ return SnapshotTestingUtils.listHFileNames(fs, tableDir);
+ }
+
+ /**
+ * Make sure the {@link HFileCleaner HFileCleaners} run at least once
+ */
+ private static void ensureHFileCleanersRun() {
+ UTIL.getHFileCleanerChore().chore();
+ }
+}