You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by bu...@apache.org on 2016/11/03 21:19:35 UTC

[4/5] hbase git commit: HBASE-16957 Moved cleaners from master package to fs.legacy package and removed filesystem/ directory layout references from a few files in master package

http://git-wip-us.apache.org/repos/asf/hbase/blob/53f4ec9e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
index ab058da..7b94cb9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
@@ -46,9 +46,6 @@ import java.util.concurrent.locks.ReentrantLock;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.CoordinatedStateException;
 import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.HConstants;
@@ -69,6 +66,8 @@ import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.executor.EventHandler;
 import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.executor.ExecutorService;
+import org.apache.hadoop.hbase.fs.MasterStorage;
+import org.apache.hadoop.hbase.fs.StorageIdentifier;
 import org.apache.hadoop.hbase.ipc.FailedServerException;
 import org.apache.hadoop.hbase.ipc.RpcClient;
 import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
@@ -83,13 +82,11 @@ import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
 import org.apache.hadoop.hbase.regionserver.RegionServerAbortedException;
 import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.KeyLocker;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.PairOfSameType;
 import org.apache.hadoop.hbase.util.RetryCounter;
 import org.apache.hadoop.hbase.util.Threads;
-import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.util.StringUtils;
@@ -518,21 +515,17 @@ public class AssignmentManager {
       // if they don't have any WALs, this restart should be considered as a clean one
       Set<ServerName> queuedDeadServers = serverManager.getRequeuedDeadServers().keySet();
       if (!queuedDeadServers.isEmpty()) {
-        Configuration conf = server.getConfiguration();
-        Path rootdir = FSUtils.getRootDir(conf);
-        FileSystem fs = rootdir.getFileSystem(conf);
+        MasterStorage<? extends StorageIdentifier> ms = server.getMasterStorage();
         for (ServerName serverName: queuedDeadServers) {
           // In the case of a clean exit, the shutdown handler would have presplit any WALs and
           // removed empty directories.
-          Path logDir = new Path(rootdir,
-            AbstractFSWALProvider.getWALDirectoryName(serverName.toString()));
-          Path splitDir = logDir.suffix(AbstractFSWALProvider.SPLITTING_EXT);
-          if (checkWals(fs, logDir) || checkWals(fs, splitDir)) {
+          if (ms.hasWALs(serverName.toShortString())) {
             LOG.debug("Found queued dead server " + serverName);
             failover = true;
             break;
           }
         }
+
         if (!failover) {
           // We figured that it's not a failover, so no need to
           // work on these re-queued dead servers any more.
@@ -592,33 +585,6 @@ public class AssignmentManager {
     return failover;
   }
 
-  private boolean checkWals(FileSystem fs, Path dir) throws IOException {
-    if (!fs.exists(dir)) {
-      LOG.debug(dir + " doesn't exist");
-      return false;
-    }
-    if (!fs.getFileStatus(dir).isDirectory()) {
-      LOG.warn(dir + " is not a directory");
-      return false;
-    }
-    FileStatus[] files = FSUtils.listStatus(fs, dir);
-    if (files == null || files.length == 0) {
-      LOG.debug(dir + " has no files");
-      return false;
-    }
-    for (int i = 0; i < files.length; i++) {
-      if (files[i].isFile() && files[i].getLen() > 0) {
-        LOG.debug(dir + " has a non-empty file: " + files[i].getPath());
-        return true;
-      } else if (files[i].isDirectory() && checkWals(fs, dir)) {
-        LOG.debug(dir + " is a directory and has a non-empty file: " + files[i].getPath());
-        return true;
-      }
-    }
-    LOG.debug("Found 0 non-empty wal files for :" + dir);
-    return false;
-  }
-
   /**
    * When a region is closed, it should be removed from the regionsToReopen
    * @param hri HRegionInfo of the region which was closed

http://git-wip-us.apache.org/repos/asf/hbase/blob/53f4ec9e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 9181579..6b27056 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -99,8 +99,6 @@ import org.apache.hadoop.hbase.master.balancer.BalancerChore;
 import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
 import org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
 import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
-import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
-import org.apache.hadoop.hbase.master.cleaner.LogCleaner;
 import org.apache.hadoop.hbase.master.cleaner.ReplicationMetaCleaner;
 import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan;
 import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;
@@ -150,7 +148,6 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CompressionTest;
 import org.apache.hadoop.hbase.util.EncryptionTest;
 import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.HFileArchiveUtil;
 import org.apache.hadoop.hbase.util.HasThread;
 import org.apache.hadoop.hbase.util.IdLock;
 import org.apache.hadoop.hbase.util.ModifyRegionUtils;
@@ -312,8 +309,7 @@ public class HMaster extends HRegionServer implements MasterServices {
 
   CatalogJanitor catalogJanitorChore;
   private ReplicationMetaCleaner replicationMetaCleaner;
-  private LogCleaner logCleaner;
-  private HFileCleaner hfileCleaner;
+  private Iterable<ScheduledChore> storageManagerChores = new ArrayList<>();
   private ExpiredMobFileCleanerChore expiredMobFileCleanerChore;
   private MobCompactionChore mobCompactChore;
   private MasterMobCompactionThread mobCompactThread;
@@ -961,26 +957,22 @@ public class HMaster extends HRegionServer implements MasterServices {
    this.service.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1);
    startProcedureExecutor();
 
-   // Start log cleaner thread
-   int cleanerInterval = conf.getInt("hbase.master.cleaner.interval", 60 * 1000);
-   this.logCleaner =
-      new LogCleaner(cleanerInterval,
-         this, conf, getMasterWalManager().getFileSystem(),
-         getMasterWalManager().getOldLogDir());
-    getChoreService().scheduleChore(logCleaner);
-
-   //start the hfile archive cleaner thread
-    Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
-    Map<String, Object> params = new HashMap<String, Object>();
+    // Start storage chores
+    Map<String, Object> params = new HashMap<>(1);
     params.put(MASTER, this);
-    this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, getMasterStorage()
-        .getFileSystem(), archiveDir, params);
-    getChoreService().scheduleChore(hfileCleaner);
+    storageManagerChores = storageManager.getChores(this, params);
+    for (ScheduledChore chore: storageManagerChores) {
+      LOG.info("Starting storage chore: '" + chore.getName() + "'.");
+      getChoreService().scheduleChore(chore);
+    }
+
+   // Start log cleaner thread
     serviceStarted = true;
     if (LOG.isTraceEnabled()) {
       LOG.trace("Started service threads");
     }
 
+    int cleanerInterval = conf.getInt("hbase.master.cleaner.interval", 60 * 1000);
     replicationMetaCleaner = new ReplicationMetaCleaner(this, this, cleanerInterval);
     getChoreService().scheduleChore(replicationMetaCleaner);
   }
@@ -1014,8 +1006,9 @@ public class HMaster extends HRegionServer implements MasterServices {
       LOG.debug("Stopping service threads");
     }
     // Clean up and close up shop
-    if (this.logCleaner != null) this.logCleaner.cancel(true);
-    if (this.hfileCleaner != null) this.hfileCleaner.cancel(true);
+    for (ScheduledChore chore : storageManagerChores) {
+      if(chore != null) chore.cancel(true);
+    }
     if (this.replicationMetaCleaner != null) this.replicationMetaCleaner.cancel(true);
     if (this.quotaManager != null) this.quotaManager.stop();
     if (this.activeMasterManager != null) this.activeMasterManager.stop();
@@ -2453,10 +2446,6 @@ public class HMaster extends HRegionServer implements MasterServices {
     new HMasterCommandLine(HMaster.class).doMain(args);
   }
 
-  public HFileCleaner getHFileCleaner() {
-    return this.hfileCleaner;
-  }
-
   /**
    * @return the underlying snapshot manager
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/53f4ec9e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterStatusServlet.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterStatusServlet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterStatusServlet.java
index 1def1f8..d7c28c9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterStatusServlet.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterStatusServlet.java
@@ -31,7 +31,6 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl;
-import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 
 /**
@@ -92,7 +91,7 @@ public class MasterStatusServlet extends HttpServlet {
     boolean showFragmentation = conf.getBoolean(
         "hbase.master.ui.fragmentation.enabled", false);
     if (showFragmentation) {
-      return FSUtils.getTableFragmentation(master);
+      return master.getMasterStorage().getTableFragmentation();
     } else {
       return null;
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/53f4ec9e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseFileCleanerDelegate.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseFileCleanerDelegate.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseFileCleanerDelegate.java
deleted file mode 100644
index 891db22..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseFileCleanerDelegate.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master.cleaner;
-
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.hbase.BaseConfigurable;
-
-import com.google.common.base.Predicate;
-import com.google.common.collect.Iterables;
-
-import java.util.Map;
-
-/**
- * Base class for file cleaners which allows subclasses to implement a simple
- * isFileDeletable method (which used to be the FileCleanerDelegate contract).
- */
-public abstract class BaseFileCleanerDelegate extends BaseConfigurable
-implements FileCleanerDelegate {
-
-  @Override
-  public Iterable<FileStatus> getDeletableFiles(Iterable<FileStatus> files) {
-    return Iterables.filter(files, new Predicate<FileStatus>() {
-      @Override
-      public boolean apply(FileStatus file) {
-        return isFileDeletable(file);
-      }});
-  }
-
-  @Override
-  public void init(Map<String, Object> params) {
-    // subclass could override it if needed.
-  }
-
-  /**
-   * Should the master delete the file or keep it?
-   * @param fStat file status of the file to check
-   * @return <tt>true</tt> if the file is deletable, <tt>false</tt> if not
-   */
-  protected abstract boolean isFileDeletable(FileStatus fStat);
-
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/53f4ec9e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseHFileCleanerDelegate.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseHFileCleanerDelegate.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseHFileCleanerDelegate.java
deleted file mode 100644
index d2330f9..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseHFileCleanerDelegate.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master.cleaner;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-
-/**
- * Base class for the hfile cleaning function inside the master. By default, only the
- * {@link TimeToLiveHFileCleaner} is called.
- * <p>
- * If other effects are needed, implement your own LogCleanerDelegate and add it to the
- * configuration "hbase.master.hfilecleaner.plugins", which is a comma-separated list of fully
- * qualified class names. The <code>HFileCleaner</code> will build the cleaner chain in
- * order the order specified by the configuration.
- * </p>
- * <p>
- * For subclasses, setConf will be called exactly <i>once</i> before using the cleaner.
- * </p>
- * <p>
- * Since {@link BaseHFileCleanerDelegate HFileCleanerDelegates} are created in
- * HFileCleaner by reflection, classes that implements this interface <b>must</b>
- * provide a default constructor.
- * </p>
- */
-@InterfaceAudience.Private
-public abstract class BaseHFileCleanerDelegate extends BaseFileCleanerDelegate {
-
-  private boolean stopped = false;
-
-  @Override
-  public void stop(String why) {
-    this.stopped = true;
-  }
-
-  @Override
-  public boolean isStopped() {
-    return this.stopped;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/53f4ec9e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseLogCleanerDelegate.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseLogCleanerDelegate.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseLogCleanerDelegate.java
deleted file mode 100644
index 32e2a7b..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseLogCleanerDelegate.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master.cleaner;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.fs.FileStatus;
-
-/**
- * Base class for the log cleaning function inside the master. By default, two
- * cleaners: <code>TimeToLiveLogCleaner</code> and
- * <code>ReplicationLogCleaner</code> are called in order. So if other effects
- * are needed, implement your own LogCleanerDelegate and add it to the
- * configuration "hbase.master.logcleaner.plugins", which is a comma-separated
- * list of fully qualified class names. LogsCleaner will add it to the chain.
- * <p>
- * HBase ships with LogsCleaner as the default implementation.
- * <p>
- * This interface extends Configurable, so setConf needs to be called once
- * before using the cleaner. Since LogCleanerDelegates are created in
- * LogsCleaner by reflection. Classes that implements this interface should
- * provide a default constructor.
- */
-@InterfaceAudience.Private
-public abstract class BaseLogCleanerDelegate extends BaseFileCleanerDelegate {
-
-  @Override
-  public boolean isFileDeletable(FileStatus fStat) {
-    return isLogDeletable(fStat);
-  }
-
-  /**
-   * Should the master delete the log or keep it?
-   * <p>
-   * Implementing classes should override {@link #isFileDeletable(FileStatus)} instead.
-   * @param fStat file status of the file
-   * @return true if the log is deletable, false (default) if not
-   */
-  @Deprecated
-  public boolean isLogDeletable(FileStatus fStat) {
-    return false;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/53f4ec9e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
deleted file mode 100644
index b094507..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
+++ /dev/null
@@ -1,294 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master.cleaner;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Lists;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.ScheduledChore;
-import org.apache.hadoop.hbase.Stoppable;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.ipc.RemoteException;
-
-import java.io.IOException;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-
-/**
- * Abstract Cleaner that uses a chain of delegates to clean a directory of files
- * @param <T> Cleaner delegate class that is dynamically loaded from configuration
- */
-public abstract class CleanerChore<T extends FileCleanerDelegate> extends ScheduledChore {
-
-  private static final Log LOG = LogFactory.getLog(CleanerChore.class.getName());
-
-  private final FileSystem fs;
-  private final Path oldFileDir;
-  private final Configuration conf;
-  protected List<T> cleanersChain;
-  protected Map<String, Object> params;
-
-  public CleanerChore(String name, final int sleepPeriod, final Stoppable s, Configuration conf,
-                      FileSystem fs, Path oldFileDir, String confKey) {
-    this(name, sleepPeriod, s, conf, fs, oldFileDir, confKey, null);
-  }
-
-  /**
-   * @param name name of the chore being run
-   * @param sleepPeriod the period of time to sleep between each run
-   * @param s the stopper
-   * @param conf configuration to use
-   * @param fs handle to the FS
-   * @param oldFileDir the path to the archived files
-   * @param confKey configuration key for the classes to instantiate
-   * @param params members could be used in cleaner
-   */
-  public CleanerChore(String name, final int sleepPeriod, final Stoppable s, Configuration conf,
-      FileSystem fs, Path oldFileDir, String confKey, Map<String, Object> params) {
-    super(name, s, sleepPeriod);
-    this.fs = fs;
-    this.oldFileDir = oldFileDir;
-    this.conf = conf;
-    this.params = params;
-    initCleanerChain(confKey);
-  }
-
-
-  /**
-   * Validate the file to see if it even belongs in the directory. If it is valid, then the file
-   * will go through the cleaner delegates, but otherwise the file is just deleted.
-   * @param file full {@link Path} of the file to be checked
-   * @return <tt>true</tt> if the file is valid, <tt>false</tt> otherwise
-   */
-  protected abstract boolean validate(Path file);
-
-  /**
-   * Instantiate and initialize all the file cleaners set in the configuration
-   * @param confKey key to get the file cleaner classes from the configuration
-   */
-  private void initCleanerChain(String confKey) {
-    this.cleanersChain = new LinkedList<T>();
-    String[] logCleaners = conf.getStrings(confKey);
-    if (logCleaners != null) {
-      for (String className : logCleaners) {
-        T logCleaner = newFileCleaner(className, conf);
-        if (logCleaner != null) {
-          LOG.debug("initialize cleaner=" + className);
-          this.cleanersChain.add(logCleaner);
-        }
-      }
-    }
-  }
-
-  /**
-   * A utility method to create new instances of LogCleanerDelegate based on the class name of the
-   * LogCleanerDelegate.
-   * @param className fully qualified class name of the LogCleanerDelegate
-   * @param conf
-   * @return the new instance
-   */
-  private T newFileCleaner(String className, Configuration conf) {
-    try {
-      Class<? extends FileCleanerDelegate> c = Class.forName(className).asSubclass(
-        FileCleanerDelegate.class);
-      @SuppressWarnings("unchecked")
-      T cleaner = (T) c.newInstance();
-      cleaner.setConf(conf);
-      cleaner.init(this.params);
-      return cleaner;
-    } catch (Exception e) {
-      LOG.warn("Can NOT create CleanerDelegate: " + className, e);
-      // skipping if can't instantiate
-      return null;
-    }
-  }
-
-  @Override
-  protected void chore() {
-    try {
-      FileStatus[] files = FSUtils.listStatus(this.fs, this.oldFileDir);
-      checkAndDeleteEntries(files);
-    } catch (IOException e) {
-      e = e instanceof RemoteException ?
-              ((RemoteException)e).unwrapRemoteException() : e;
-      LOG.warn("Error while cleaning the logs", e);
-    }
-  }
-
-  /**
-   * Loop over the given directory entries, and check whether they can be deleted.
-   * If an entry is itself a directory it will be recursively checked and deleted itself iff
-   * all subentries are deleted (and no new subentries are added in the mean time)
-   *
-   * @param entries directory entries to check
-   * @return true if all entries were successfully deleted
-   */
-  private boolean checkAndDeleteEntries(FileStatus[] entries) {
-    if (entries == null) {
-      return true;
-    }
-    boolean allEntriesDeleted = true;
-    List<FileStatus> files = Lists.newArrayListWithCapacity(entries.length);
-    for (FileStatus child : entries) {
-      Path path = child.getPath();
-      if (child.isDirectory()) {
-        // for each subdirectory delete it and all entries if possible
-        if (!checkAndDeleteDirectory(path)) {
-          allEntriesDeleted = false;
-        }
-      } else {
-        // collect all files to attempt to delete in one batch
-        files.add(child);
-      }
-    }
-    if (!checkAndDeleteFiles(files)) {
-      allEntriesDeleted = false;
-    }
-    return allEntriesDeleted;
-  }
-  
-  /**
-   * Attempt to delete a directory and all files under that directory. Each child file is passed
-   * through the delegates to see if it can be deleted. If the directory has no children when the
-   * cleaners have finished it is deleted.
-   * <p>
-   * If new children files are added between checks of the directory, the directory will <b>not</b>
-   * be deleted.
-   * @param dir directory to check
-   * @return <tt>true</tt> if the directory was deleted, <tt>false</tt> otherwise.
-   */
-  @VisibleForTesting boolean checkAndDeleteDirectory(Path dir) {
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("Checking directory: " + dir);
-    }
-
-    try {
-      FileStatus[] children = FSUtils.listStatus(fs, dir);
-      boolean allChildrenDeleted = checkAndDeleteEntries(children);
-  
-      // if the directory still has children, we can't delete it, so we are done
-      if (!allChildrenDeleted) return false;
-    } catch (IOException e) {
-      e = e instanceof RemoteException ?
-              ((RemoteException)e).unwrapRemoteException() : e;
-      LOG.warn("Error while listing directory: " + dir, e);
-      // couldn't list directory, so don't try to delete, and don't return success
-      return false;
-    }
-
-    // otherwise, all the children (that we know about) have been deleted, so we should try to
-    // delete this directory. However, don't do so recursively so we don't delete files that have
-    // been added since we last checked.
-    try {
-      return fs.delete(dir, false);
-    } catch (IOException e) {
-      if (LOG.isTraceEnabled()) {
-        LOG.trace("Couldn't delete directory: " + dir, e);
-      }
-      // couldn't delete w/o exception, so we can't return success.
-      return false;
-    }
-  }
-
-  /**
-   * Run the given files through each of the cleaners to see if it should be deleted, deleting it if
-   * necessary.
-   * @param files List of FileStatus for the files to check (and possibly delete)
-   * @return true iff successfully deleted all files
-   */
-  private boolean checkAndDeleteFiles(List<FileStatus> files) {
-    // first check to see if the path is valid
-    List<FileStatus> validFiles = Lists.newArrayListWithCapacity(files.size());
-    List<FileStatus> invalidFiles = Lists.newArrayList();
-    for (FileStatus file : files) {
-      if (validate(file.getPath())) {
-        validFiles.add(file);
-      } else {
-        LOG.warn("Found a wrongly formatted file: " + file.getPath() + " - will delete it.");
-        invalidFiles.add(file);
-      }
-    }
-
-    Iterable<FileStatus> deletableValidFiles = validFiles;
-    // check each of the cleaners for the valid files
-    for (T cleaner : cleanersChain) {
-      if (cleaner.isStopped() || this.getStopper().isStopped()) {
-        LOG.warn("A file cleaner" + this.getName() + " is stopped, won't delete any more files in:"
-            + this.oldFileDir);
-        return false;
-      }
-
-      Iterable<FileStatus> filteredFiles = cleaner.getDeletableFiles(deletableValidFiles);
-      
-      // trace which cleaner is holding on to each file
-      if (LOG.isTraceEnabled()) {
-        ImmutableSet<FileStatus> filteredFileSet = ImmutableSet.copyOf(filteredFiles);
-        for (FileStatus file : deletableValidFiles) {
-          if (!filteredFileSet.contains(file)) {
-            LOG.trace(file.getPath() + " is not deletable according to:" + cleaner);
-          }
-        }
-      }
-      
-      deletableValidFiles = filteredFiles;
-    }
-    
-    Iterable<FileStatus> filesToDelete = Iterables.concat(invalidFiles, deletableValidFiles);
-    int deletedFileCount = 0;
-    for (FileStatus file : filesToDelete) {
-      Path filePath = file.getPath();
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Removing: " + filePath + " from archive");
-      }
-      try {
-        boolean success = this.fs.delete(filePath, false);
-        if (success) {
-          deletedFileCount++;
-        } else {
-          LOG.warn("Attempted to delete:" + filePath
-              + ", but couldn't. Run cleaner chain and attempt to delete on next pass.");
-        }
-      } catch (IOException e) {
-        e = e instanceof RemoteException ?
-                  ((RemoteException)e).unwrapRemoteException() : e;
-        LOG.warn("Error while deleting: " + filePath, e);
-      }
-    }
-
-    return deletedFileCount == files.size();
-  }
-
-  @Override
-  public void cleanup() {
-    for (T lc : this.cleanersChain) {
-      try {
-        lc.stop("Exiting");
-      } catch (Throwable t) {
-        LOG.warn("Stopping", t);
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/53f4ec9e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/FileCleanerDelegate.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/FileCleanerDelegate.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/FileCleanerDelegate.java
deleted file mode 100644
index 7a15b96..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/FileCleanerDelegate.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master.cleaner;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configurable;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.hbase.Stoppable;
-
-import java.util.Map;
-
-/**
- * General interface for cleaning files from a folder (generally an archive or
- * backup folder). These are chained via the {@link CleanerChore} to determine
- * if a given file should be deleted.
- */
-@InterfaceAudience.Private
-public interface FileCleanerDelegate extends Configurable, Stoppable {
-
-  /**
-   * Determines which of the given files are safe to delete
-   * @param files files to check for deletion
-   * @return files that are ok to delete according to this cleaner
-   */
-  Iterable<FileStatus> getDeletableFiles(Iterable<FileStatus> files);
-
-
-  /**
-   * this method is used to pass some instance into subclass
-   * */
-  void init(Map<String, Object> params);
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/53f4ec9e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
deleted file mode 100644
index 89c316b..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master.cleaner;
-
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.Stoppable;
-import org.apache.hadoop.hbase.io.HFileLink;
-import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
-/**
- * This Chore, every time it runs, will clear the HFiles in the hfile archive
- * folder that are deletable for each HFile cleaner in the chain.
- */
-@InterfaceAudience.Private
-public class HFileCleaner extends CleanerChore<BaseHFileCleanerDelegate> {
-
-  public static final String MASTER_HFILE_CLEANER_PLUGINS = "hbase.master.hfilecleaner.plugins";
-
-  public HFileCleaner(final int period, final Stoppable stopper, Configuration conf, FileSystem fs,
-      Path directory) {
-    this(period, stopper, conf, fs, directory, null);
-  }
-
-  /**
-   * @param period the period of time to sleep between each run
-   * @param stopper the stopper
-   * @param conf configuration to use
-   * @param fs handle to the FS
-   * @param directory directory to be cleaned
-   * @param params params could be used in subclass of BaseHFileCleanerDelegate
-   */
-  public HFileCleaner(final int period, final Stoppable stopper, Configuration conf, FileSystem fs,
-                      Path directory, Map<String, Object> params) {
-    super("HFileCleaner", period, stopper, conf, fs,
-      directory, MASTER_HFILE_CLEANER_PLUGINS, params);
-  }
-
-  @Override
-  protected boolean validate(Path file) {
-    if (HFileLink.isBackReferencesDir(file) || HFileLink.isBackReferencesDir(file.getParent())) {
-      return true;
-    }
-    return StoreFileInfo.validateStoreFileName(file.getName());
-  }
-
-  /**
-   * Exposed for TESTING!
-   */
-  public List<BaseHFileCleanerDelegate> getDelegatesForTesting() {
-    return this.cleanersChain;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/53f4ec9e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileLinkCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileLinkCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileLinkCleaner.java
deleted file mode 100644
index 328a269..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileLinkCleaner.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master.cleaner;
-
-import java.io.IOException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.io.HFileLink;
-import org.apache.hadoop.hbase.mob.MobUtils;
-import org.apache.hadoop.hbase.util.FSUtils;
-
-/**
- * HFileLink cleaner that determines if a hfile should be deleted.
- * HFiles can be deleted only if there're no links to them.
- *
- * When a HFileLink is created a back reference file is created in:
- *      /hbase/archive/table/region/cf/.links-hfile/ref-region.ref-table
- * To check if the hfile can be deleted the back references folder must be empty.
- */
-@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
-public class HFileLinkCleaner extends BaseHFileCleanerDelegate {
-  private static final Log LOG = LogFactory.getLog(HFileLinkCleaner.class);
-
-  private FileSystem fs = null;
-
-  @Override
-  public synchronized boolean isFileDeletable(FileStatus fStat) {
-    if (this.fs == null) return false;
-    Path filePath = fStat.getPath();
-    // HFile Link is always deletable
-    if (HFileLink.isHFileLink(filePath)) return true;
-
-    // If the file is inside a link references directory, means that it is a back ref link.
-    // The back ref can be deleted only if the referenced file doesn't exists.
-    Path parentDir = filePath.getParent();
-    if (HFileLink.isBackReferencesDir(parentDir)) {
-      Path hfilePath = null;
-      try {
-        // Also check if the HFile is in the HBASE_TEMP_DIRECTORY; this is where the referenced
-        // file gets created when cloning a snapshot.
-        hfilePath = HFileLink.getHFileFromBackReference(
-            new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY), filePath);
-        if (fs.exists(hfilePath)) {
-          return false;
-        }
-        // check whether the HFileLink still exists in mob dir.
-        hfilePath = HFileLink.getHFileFromBackReference(MobUtils.getMobHome(getConf()), filePath);
-        if (fs.exists(hfilePath)) {
-          return false;
-        }
-        hfilePath = HFileLink.getHFileFromBackReference(FSUtils.getRootDir(getConf()), filePath);
-        return !fs.exists(hfilePath);
-      } catch (IOException e) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Couldn't verify if the referenced file still exists, keep it just in case: "
-              + hfilePath);
-        }
-        return false;
-      }
-    }
-
-    // HFile is deletable only if has no links
-    Path backRefDir = null;
-    try {
-      backRefDir = HFileLink.getBackReferencesDir(parentDir, filePath.getName());
-      return FSUtils.listStatus(fs, backRefDir) == null;
-    } catch (IOException e) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Couldn't get the references, not deleting file, just in case. filePath="
-            + filePath + ", backRefDir=" + backRefDir);
-      }
-      return false;
-    }
-  }
-
-  @Override
-  public synchronized void setConf(Configuration conf) {
-    super.setConf(conf);
-
-    // setup filesystem
-    try {
-      this.fs = FileSystem.get(this.getConf());
-    } catch (IOException e) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Couldn't instantiate the file system, not deleting file, just in case. "
-            + FileSystem.FS_DEFAULT_NAME_KEY + "="
-            + getConf().get(FileSystem.FS_DEFAULT_NAME_KEY, FileSystem.DEFAULT_FS));
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/53f4ec9e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/LogCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/LogCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/LogCleaner.java
deleted file mode 100644
index 58da25a..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/LogCleaner.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master.cleaner;
-
-import static org.apache.hadoop.hbase.HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.Stoppable;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
-
-/**
- * This Chore, every time it runs, will attempt to delete the WALs in the old logs folder. The WAL
- * is only deleted if none of the cleaner delegates says otherwise.
- * @see BaseLogCleanerDelegate
- */
-@InterfaceAudience.Private
-public class LogCleaner extends CleanerChore<BaseLogCleanerDelegate> {
-  private static final Log LOG = LogFactory.getLog(LogCleaner.class.getName());
-
-  /**
-   * @param p the period of time to sleep between each run
-   * @param s the stopper
-   * @param conf configuration to use
-   * @param fs handle to the FS
-   * @param oldLogDir the path to the archived logs
-   */
-  public LogCleaner(final int p, final Stoppable s, Configuration conf, FileSystem fs,
-      Path oldLogDir) {
-    super("LogsCleaner", p, s, conf, fs, oldLogDir, HBASE_MASTER_LOGCLEANER_PLUGINS);
-  }
-
-  @Override
-  protected boolean validate(Path file) {
-    return AbstractFSWALProvider.validateWALFilename(file.getName());
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/53f4ec9e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveHFileCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveHFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveHFileCleaner.java
deleted file mode 100644
index e821b2e..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveHFileCleaner.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master.cleaner;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-
-/**
- * HFile cleaner that uses the timestamp of the hfile to determine if it should be deleted. By
- * default they are allowed to live for {@value #DEFAULT_TTL}
- */
-@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
-public class TimeToLiveHFileCleaner extends BaseHFileCleanerDelegate {
-
-  private static final Log LOG = LogFactory.getLog(TimeToLiveHFileCleaner.class.getName());
-  public static final String TTL_CONF_KEY = "hbase.master.hfilecleaner.ttl";
-  // default ttl = 5 minutes
-  public static final long DEFAULT_TTL = 60000 * 5;
-  // Configured time a hfile can be kept after it was moved to the archive
-  private long ttl;
-
-  @Override
-  public void setConf(Configuration conf) {
-    this.ttl = conf.getLong(TTL_CONF_KEY, DEFAULT_TTL);
-    super.setConf(conf);
-  }
-
-  @Override
-  public boolean isFileDeletable(FileStatus fStat) {
-    long currentTime = EnvironmentEdgeManager.currentTime();
-    long time = fStat.getModificationTime();
-    long life = currentTime - time;
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("HFile life:" + life + ", ttl:" + ttl + ", current:" + currentTime + ", from: "
-          + time);
-    }
-    if (life < 0) {
-      LOG.warn("Found a hfile (" + fStat.getPath() + ") newer than current time (" + currentTime
-          + " < " + time + "), probably a clock skew");
-      return false;
-    }
-    return life > ttl;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/53f4ec9e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java
deleted file mode 100644
index e46d6e1..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master.cleaner;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-
-/**
- * Log cleaner that uses the timestamp of the wal to determine if it should
- * be deleted. By default they are allowed to live for 10 minutes.
- */
-@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
-public class TimeToLiveLogCleaner extends BaseLogCleanerDelegate {
-  private static final Log LOG = LogFactory.getLog(TimeToLiveLogCleaner.class.getName());
-  // Configured time a log can be kept after it was closed
-  private long ttl;
-  private boolean stopped = false;
-
-  @Override
-  public boolean isLogDeletable(FileStatus fStat) {
-    long currentTime = EnvironmentEdgeManager.currentTime();
-    long time = fStat.getModificationTime();
-    long life = currentTime - time;
-    
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("Log life:" + life + ", ttl:" + ttl + ", current:" + currentTime + ", from: "
-          + time);
-    }
-    if (life < 0) {
-      LOG.warn("Found a log (" + fStat.getPath() + ") newer than current time (" + currentTime
-          + " < " + time + "), probably a clock skew");
-      return false;
-    }
-    return life > ttl;
-  }
-
-  @Override
-  public void setConf(Configuration conf) {
-    super.setConf(conf);
-    this.ttl = conf.getLong("hbase.master.logcleaner.ttl", 600000);
-  }
-
-
-  @Override
-  public void stop(String why) {
-    this.stopped = true;
-  }
-
-  @Override
-  public boolean isStopped() {
-    return this.stopped;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/53f4ec9e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
deleted file mode 100644
index f80d962..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
+++ /dev/null
@@ -1,389 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master.snapshot;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.Timer;
-import java.util.TimerTask;
-import java.util.concurrent.locks.ReentrantLock;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.Lists;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.Stoppable;
-import org.apache.hadoop.hbase.snapshot.CorruptedSnapshotException;
-import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
-import org.apache.hadoop.hbase.util.FSUtils;
-
-/**
- * Intelligently keep track of all the files for all the snapshots.
- * <p>
- * A cache of files is kept to avoid querying the {@link FileSystem} frequently. If there is a cache
- * miss the directory modification time is used to ensure that we don't rescan directories that we
- * already have in cache. We only check the modification times of the snapshot directories
- * (/hbase/.snapshot/[snapshot_name]) to determine if the files need to be loaded into the cache.
- * <p>
- * New snapshots will be added to the cache and deleted snapshots will be removed when we refresh
- * the cache. If the files underneath a snapshot directory are changed, but not the snapshot itself,
- * we will ignore updates to that snapshot's files.
- * <p>
- * This is sufficient because each snapshot has its own directory and is added via an atomic rename
- * <i>once</i>, when the snapshot is created. We don't need to worry about the data in the snapshot
- * being run.
- * <p>
- * Further, the cache is periodically refreshed ensure that files in snapshots that were deleted are
- * also removed from the cache.
- * <p>
- * A {@link SnapshotFileCache.SnapshotFileInspector} must be passed when creating <tt>this</tt> to
- * allow extraction of files under /hbase/.snapshot/[snapshot name] directory, for each snapshot.
- * This allows you to only cache files under, for instance, all the logs in the .logs directory or
- * all the files under all the regions.
- * <p>
- * <tt>this</tt> also considers all running snapshots (those under /hbase/.snapshot/.tmp) as valid
- * snapshots and will attempt to cache files from those snapshots as well.
- * <p>
- * Queries about a given file are thread-safe with respect to multiple queries and cache refreshes.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class SnapshotFileCache implements Stoppable {
-  interface SnapshotFileInspector {
-    /**
-     * Returns a collection of file names needed by the snapshot.
-     * @param snapshotDir {@link Path} to the snapshot directory to scan.
-     * @return the collection of file names needed by the snapshot.
-     */
-    Collection<String> filesUnderSnapshot(final Path snapshotDir) throws IOException;
-  }
-
-  private static final Log LOG = LogFactory.getLog(SnapshotFileCache.class);
-  private volatile boolean stop = false;
-  private final FileSystem fs;
-  private final SnapshotFileInspector fileInspector;
-  private final Path snapshotDir;
-  private final Set<String> cache = new HashSet<String>();
-  /**
-   * This is a helper map of information about the snapshot directories so we don't need to rescan
-   * them if they haven't changed since the last time we looked.
-   */
-  private final Map<String, SnapshotDirectoryInfo> snapshots =
-      new HashMap<String, SnapshotDirectoryInfo>();
-  private final Timer refreshTimer;
-
-  private long lastModifiedTime = Long.MIN_VALUE;
-
-  /**
-   * Create a snapshot file cache for all snapshots under the specified [root]/.snapshot on the
-   * filesystem.
-   * <p>
-   * Immediately loads the file cache.
-   * @param conf to extract the configured {@link FileSystem} where the snapshots are stored and
-   *          hbase root directory
-   * @param cacheRefreshPeriod frequency (ms) with which the cache should be refreshed
-   * @param refreshThreadName name of the cache refresh thread
-   * @param inspectSnapshotFiles Filter to apply to each snapshot to extract the files.
-   * @throws IOException if the {@link FileSystem} or root directory cannot be loaded
-   */
-  public SnapshotFileCache(Configuration conf, long cacheRefreshPeriod, String refreshThreadName,
-      SnapshotFileInspector inspectSnapshotFiles) throws IOException {
-    this(FSUtils.getCurrentFileSystem(conf), FSUtils.getRootDir(conf), 0, cacheRefreshPeriod,
-        refreshThreadName, inspectSnapshotFiles);
-  }
-
-  /**
-   * Create a snapshot file cache for all snapshots under the specified [root]/.snapshot on the
-   * filesystem
-   * @param fs {@link FileSystem} where the snapshots are stored
-   * @param rootDir hbase root directory
-   * @param cacheRefreshPeriod period (ms) with which the cache should be refreshed
-   * @param cacheRefreshDelay amount of time to wait for the cache to be refreshed
-   * @param refreshThreadName name of the cache refresh thread
-   * @param inspectSnapshotFiles Filter to apply to each snapshot to extract the files.
-   */
-  public SnapshotFileCache(FileSystem fs, Path rootDir, long cacheRefreshPeriod,
-      long cacheRefreshDelay, String refreshThreadName, SnapshotFileInspector inspectSnapshotFiles) {
-    this.fs = fs;
-    this.fileInspector = inspectSnapshotFiles;
-    this.snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir);
-    // periodically refresh the file cache to make sure we aren't superfluously saving files.
-    this.refreshTimer = new Timer(refreshThreadName, true);
-    this.refreshTimer.scheduleAtFixedRate(new RefreshCacheTask(), cacheRefreshDelay,
-      cacheRefreshPeriod);
-  }
-
-  /**
-   * Trigger a cache refresh, even if its before the next cache refresh. Does not affect pending
-   * cache refreshes.
-   * <p>
-   * Blocks until the cache is refreshed.
-   * <p>
-   * Exposed for TESTING.
-   */
-  public void triggerCacheRefreshForTesting() {
-    try {
-      SnapshotFileCache.this.refreshCache();
-    } catch (IOException e) {
-      LOG.warn("Failed to refresh snapshot hfile cache!", e);
-    }
-    LOG.debug("Current cache:" + cache);
-  }
-
-  /**
-   * Check to see if any of the passed file names is contained in any of the snapshots.
-   * First checks an in-memory cache of the files to keep. If its not in the cache, then the cache
-   * is refreshed and the cache checked again for that file.
-   * This ensures that we never return files that exist.
-   * <p>
-   * Note this may lead to periodic false positives for the file being referenced. Periodically, the
-   * cache is refreshed even if there are no requests to ensure that the false negatives get removed
-   * eventually. For instance, suppose you have a file in the snapshot and it gets loaded into the
-   * cache. Then at some point later that snapshot is deleted. If the cache has not been refreshed
-   * at that point, cache will still think the file system contains that file and return
-   * <tt>true</tt>, even if it is no longer present (false positive). However, if the file never was
-   * on the filesystem, we will never find it and always return <tt>false</tt>.
-   * @param files file to check, NOTE: Relies that files are loaded from hdfs before method
-   *              is called (NOT LAZY)
-   * @return <tt>unReferencedFiles</tt> the collection of files that do not have snapshot references
-   * @throws IOException if there is an unexpected error reaching the filesystem.
-   */
-  // XXX this is inefficient to synchronize on the method, when what we really need to guard against
-  // is an illegal access to the cache. Really we could do a mutex-guarded pointer swap on the
-  // cache, but that seems overkill at the moment and isn't necessarily a bottleneck.
-  public synchronized Iterable<FileStatus> getUnreferencedFiles(Iterable<FileStatus> files,
-      final SnapshotManager snapshotManager)
-      throws IOException {
-    List<FileStatus> unReferencedFiles = Lists.newArrayList();
-    List<String> snapshotsInProgress = null;
-    boolean refreshed = false;
-    for (FileStatus file : files) {
-      String fileName = file.getPath().getName();
-      if (!refreshed && !cache.contains(fileName)) {
-        refreshCache();
-        refreshed = true;
-      }
-      if (cache.contains(fileName)) {
-        continue;
-      }
-      if (snapshotsInProgress == null) {
-        snapshotsInProgress = getSnapshotsInProgress(snapshotManager);
-      }
-      if (snapshotsInProgress.contains(fileName)) {
-        continue;
-      }
-      unReferencedFiles.add(file);
-    }
-    return unReferencedFiles;
-  }
-
-  private synchronized void refreshCache() throws IOException {
-    long lastTimestamp = Long.MAX_VALUE;
-    boolean hasChanges = false;
-
-    // get the status of the snapshots directory and check if it is has changes
-    try {
-      FileStatus dirStatus = fs.getFileStatus(snapshotDir);
-      lastTimestamp = dirStatus.getModificationTime();
-      hasChanges |= (lastTimestamp >= lastModifiedTime);
-    } catch (FileNotFoundException e) {
-      if (this.cache.size() > 0) {
-        LOG.error("Snapshot directory: " + snapshotDir + " doesn't exist");
-      }
-      return;
-    }
-
-    // get the status of the snapshots temporary directory and check if it has changes
-    // The top-level directory timestamp is not updated, so we have to check the inner-level.
-    try {
-      Path snapshotTmpDir = new Path(snapshotDir, SnapshotDescriptionUtils.SNAPSHOT_TMP_DIR_NAME);
-      FileStatus tempDirStatus = fs.getFileStatus(snapshotTmpDir);
-      lastTimestamp = Math.min(lastTimestamp, tempDirStatus.getModificationTime());
-      hasChanges |= (lastTimestamp >= lastModifiedTime);
-      if (!hasChanges) {
-        FileStatus[] tmpSnapshots = FSUtils.listStatus(fs, snapshotDir);
-        if (tmpSnapshots != null) {
-          for (FileStatus dirStatus: tmpSnapshots) {
-            lastTimestamp = Math.min(lastTimestamp, dirStatus.getModificationTime());
-          }
-          hasChanges |= (lastTimestamp >= lastModifiedTime);
-        }
-      }
-    } catch (FileNotFoundException e) {
-      // Nothing todo, if the tmp dir is empty
-    }
-
-    // if the snapshot directory wasn't modified since we last check, we are done
-    if (!hasChanges) {
-      return;
-    }
-
-    // directory was modified, so we need to reload our cache
-    // there could be a slight race here where we miss the cache, check the directory modification
-    // time, then someone updates the directory, causing us to not scan the directory again.
-    // However, snapshot directories are only created once, so this isn't an issue.
-
-    // 1. update the modified time
-    this.lastModifiedTime = lastTimestamp;
-
-    // 2.clear the cache
-    this.cache.clear();
-    Map<String, SnapshotDirectoryInfo> known = new HashMap<String, SnapshotDirectoryInfo>();
-
-    // 3. check each of the snapshot directories
-    FileStatus[] snapshots = FSUtils.listStatus(fs, snapshotDir);
-    if (snapshots == null) {
-      // remove all the remembered snapshots because we don't have any left
-      if (LOG.isDebugEnabled() && this.snapshots.size() > 0) {
-        LOG.debug("No snapshots on-disk, cache empty");
-      }
-      this.snapshots.clear();
-      return;
-    }
-
-    // 3.1 iterate through the on-disk snapshots
-    for (FileStatus snapshot : snapshots) {
-      String name = snapshot.getPath().getName();
-      // its not the tmp dir,
-      if (!name.equals(SnapshotDescriptionUtils.SNAPSHOT_TMP_DIR_NAME)) {
-        SnapshotDirectoryInfo files = this.snapshots.remove(name);
-        // 3.1.1 if we don't know about the snapshot or its been modified, we need to update the
-        // files the latter could occur where I create a snapshot, then delete it, and then make a
-        // new snapshot with the same name. We will need to update the cache the information from
-        // that new snapshot, even though it has the same name as the files referenced have
-        // probably changed.
-        if (files == null || files.hasBeenModified(snapshot.getModificationTime())) {
-          // get all files for the snapshot and create a new info
-          Collection<String> storedFiles = fileInspector.filesUnderSnapshot(snapshot.getPath());
-          files = new SnapshotDirectoryInfo(snapshot.getModificationTime(), storedFiles);
-        }
-        // 3.2 add all the files to cache
-        this.cache.addAll(files.getFiles());
-        known.put(name, files);
-      }
-    }
-
-    // 4. set the snapshots we are tracking
-    this.snapshots.clear();
-    this.snapshots.putAll(known);
-  }
-
-  @VisibleForTesting List<String> getSnapshotsInProgress(
-    final SnapshotManager snapshotManager) throws IOException {
-    List<String> snapshotInProgress = Lists.newArrayList();
-    // only add those files to the cache, but not to the known snapshots
-    Path snapshotTmpDir = new Path(snapshotDir, SnapshotDescriptionUtils.SNAPSHOT_TMP_DIR_NAME);
-    // only add those files to the cache, but not to the known snapshots
-    FileStatus[] running = FSUtils.listStatus(fs, snapshotTmpDir);
-    if (running != null) {
-      for (FileStatus run : running) {
-        ReentrantLock lock = null;
-        if (snapshotManager != null) {
-          lock = snapshotManager.getLocks().acquireLock(run.getPath().getName());
-        }
-        try {
-          snapshotInProgress.addAll(fileInspector.filesUnderSnapshot(run.getPath()));
-        } catch (CorruptedSnapshotException e) {
-          // See HBASE-16464
-          if (e.getCause() instanceof FileNotFoundException) {
-            // If the snapshot is corrupt, we will delete it
-            fs.delete(run.getPath(), true);
-            LOG.warn("delete the " + run.getPath() + " due to exception:", e.getCause());
-          } else {
-            throw e;
-          }
-        } finally {
-          if (lock != null) {
-            lock.unlock();
-          }
-        }
-      }
-    }
-    return snapshotInProgress;
-  }
-
-  /**
-   * Simple helper task that just periodically attempts to refresh the cache
-   */
-  public class RefreshCacheTask extends TimerTask {
-    @Override
-    public void run() {
-      try {
-        SnapshotFileCache.this.refreshCache();
-      } catch (IOException e) {
-        LOG.warn("Failed to refresh snapshot hfile cache!", e);
-      }
-    }
-  }
-
-  @Override
-  public void stop(String why) {
-    if (!this.stop) {
-      this.stop = true;
-      this.refreshTimer.cancel();
-    }
-
-  }
-
-  @Override
-  public boolean isStopped() {
-    return this.stop;
-  }
-
-  /**
-   * Information about a snapshot directory
-   */
-  private static class SnapshotDirectoryInfo {
-    long lastModified;
-    Collection<String> files;
-
-    public SnapshotDirectoryInfo(long mtime, Collection<String> files) {
-      this.lastModified = mtime;
-      this.files = files;
-    }
-
-    /**
-     * @return the hfiles in the snapshot when <tt>this</tt> was made.
-     */
-    public Collection<String> getFiles() {
-      return this.files;
-    }
-
-    /**
-     * Check if the snapshot directory has been modified
-     * @param mtime current modification time of the directory
-     * @return <tt>true</tt> if it the modification time of the directory is newer time when we
-     *         created <tt>this</tt>
-     */
-    public boolean hasBeenModified(long mtime) {
-      return this.lastModified < mtime;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/53f4ec9e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java
deleted file mode 100644
index 2fdbd55..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotHFileCleaner.java
+++ /dev/null
@@ -1,126 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master.snapshot;
-
-import java.io.IOException;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Map;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-import org.apache.hadoop.hbase.master.HMaster;
-import org.apache.hadoop.hbase.master.MasterServices;
-import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate;
-import org.apache.hadoop.hbase.snapshot.CorruptedSnapshotException;
-import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil;
-import org.apache.hadoop.hbase.util.FSUtils;
-
-/**
- * Implementation of a file cleaner that checks if a hfile is still used by snapshots of HBase
- * tables.
- */
-@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
-@InterfaceStability.Evolving
-public class SnapshotHFileCleaner extends BaseHFileCleanerDelegate {
-  private static final Log LOG = LogFactory.getLog(SnapshotHFileCleaner.class);
-
-  /**
-   * Conf key for the frequency to attempt to refresh the cache of hfiles currently used in
-   * snapshots (ms)
-   */
-  public static final String HFILE_CACHE_REFRESH_PERIOD_CONF_KEY =
-      "hbase.master.hfilecleaner.plugins.snapshot.period";
-
-  /** Refresh cache, by default, every 5 minutes */
-  private static final long DEFAULT_HFILE_CACHE_REFRESH_PERIOD = 300000;
-
-  /** File cache for HFiles in the completed and currently running snapshots */
-  private SnapshotFileCache cache;
-
-  private MasterServices master;
-
-  @Override
-  public synchronized Iterable<FileStatus> getDeletableFiles(Iterable<FileStatus> files) {
-    try {
-      return cache.getUnreferencedFiles(files, master.getSnapshotManager());
-    } catch (CorruptedSnapshotException cse) {
-      LOG.debug("Corrupted in-progress snapshot file exception, ignored ", cse);
-    } catch (IOException e) {
-      LOG.error("Exception while checking if files were valid, keeping them just in case.", e);
-    }
-    return Collections.emptyList();
-  }
-
-  @Override
-  public void init(Map<String, Object> params) {
-    if (params.containsKey(HMaster.MASTER)) {
-      this.master = (MasterServices) params.get(HMaster.MASTER);
-    }
-  }
-
-  @Override
-  protected boolean isFileDeletable(FileStatus fStat) {
-    return false;
-  }
-
-  public void setConf(final Configuration conf) {
-    super.setConf(conf);
-    try {
-      long cacheRefreshPeriod = conf.getLong(HFILE_CACHE_REFRESH_PERIOD_CONF_KEY,
-        DEFAULT_HFILE_CACHE_REFRESH_PERIOD);
-      final FileSystem fs = FSUtils.getCurrentFileSystem(conf);
-      Path rootDir = FSUtils.getRootDir(conf);
-      cache = new SnapshotFileCache(fs, rootDir, cacheRefreshPeriod, cacheRefreshPeriod,
-          "snapshot-hfile-cleaner-cache-refresher", new SnapshotFileCache.SnapshotFileInspector() {
-            public Collection<String> filesUnderSnapshot(final Path snapshotDir)
-                throws IOException {
-              return SnapshotReferenceUtil.getHFileNames(conf, fs, snapshotDir);
-            }
-          });
-    } catch (IOException e) {
-      LOG.error("Failed to create cleaner util", e);
-    }
-  }
-
-
-  @Override
-  public void stop(String why) {
-    this.cache.stop(why);
-  }
-
-  @Override
-  public boolean isStopped() {
-    return this.cache.isStopped();
-  }
-
-  /**
-   * Exposed for Testing!
-   * @return the cache of all hfiles
-   */
-  public SnapshotFileCache getFileCacheForTesting() {
-    return this.cache;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/53f4ec9e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
index a2c0ea0..75a1a17 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
@@ -20,13 +20,10 @@ package org.apache.hadoop.hbase.master.snapshot;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Collections;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
-import java.util.Set;
 import java.util.concurrent.ThreadPoolExecutor;
 
 import org.apache.commons.logging.Log;
@@ -54,8 +51,6 @@ import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.MetricsMaster;
 import org.apache.hadoop.hbase.master.SnapshotSentinel;
-import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
-import org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner;
 import org.apache.hadoop.hbase.master.procedure.CloneSnapshotProcedure;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.master.procedure.RestoreSnapshotProcedure;
@@ -1054,15 +1049,6 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
     boolean snapshotEnabled = conf.getBoolean(HBASE_SNAPSHOT_ENABLED, false);
     boolean userDisabled = (enabled != null && enabled.trim().length() > 0 && !snapshotEnabled);
 
-    // Extract cleaners from conf
-    Set<String> hfileCleaners = new HashSet<String>();
-    String[] cleaners = conf.getStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS);
-    if (cleaners != null) Collections.addAll(hfileCleaners, cleaners);
-
-    Set<String> logCleaners = new HashSet<String>();
-    cleaners = conf.getStrings(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS);
-    if (cleaners != null) Collections.addAll(logCleaners, cleaners);
-
     // check if an older version of snapshot directory was present
     Path oldSnapshotDir = new Path(((LegacyPathIdentifier) ms.getRootContainer()).path, HConstants
         .OLD_SNAPSHOT_DIR_NAME);
@@ -1077,20 +1063,10 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
     // otherwise we still need to check if cleaners are enabled or not and verify
     // that there're no snapshot in the .snapshot folder.
     if (snapshotEnabled) {
-      // Inject snapshot cleaners, if snapshot.enable is true
-      hfileCleaners.add(SnapshotHFileCleaner.class.getName());
-      hfileCleaners.add(HFileLinkCleaner.class.getName());
-
-      // Set cleaners conf
-      conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
-        hfileCleaners.toArray(new String[hfileCleaners.size()]));
-      conf.setStrings(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS,
-        logCleaners.toArray(new String[logCleaners.size()]));
+      ms.enableSnapshots();
     } else {
       // Verify if cleaners are present
-      snapshotEnabled =
-        hfileCleaners.contains(SnapshotHFileCleaner.class.getName()) &&
-        hfileCleaners.contains(HFileLinkCleaner.class.getName());
+      snapshotEnabled = ms.isSnapshotsEnabled();
 
       // Warn if the cleaners are enabled but the snapshot.enabled property is false/not set.
       if (snapshotEnabled) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/53f4ec9e/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ServerProtobufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ServerProtobufUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ServerProtobufUtil.java
index 416ffee..cf2b630 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ServerProtobufUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ServerProtobufUtil.java
@@ -17,134 +17,14 @@
  */
 package org.apache.hadoop.hbase.protobuf;
 
-// TODO remove unused imports
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InterruptedIOException;
-import java.lang.reflect.Constructor;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
-import java.lang.reflect.ParameterizedType;
-import java.lang.reflect.Type;
-import java.nio.ByteBuffer;
-import java.security.PrivilegedExceptionAction;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
 import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.NavigableSet;
-import java.util.Set;
-import java.util.concurrent.Callable;
-import java.util.concurrent.TimeUnit;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellScanner;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.ClusterId;
-import org.apache.hadoop.hbase.ClusterStatus;
-import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HBaseIOException;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
+
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.NamespaceDescriptor;
-import org.apache.hadoop.hbase.ServerLoad;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.Tag;
-import org.apache.hadoop.hbase.TagUtil;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.Append;
-import org.apache.hadoop.hbase.client.CompactionState;
-import org.apache.hadoop.hbase.client.Consistency;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Increment;
-import org.apache.hadoop.hbase.client.Mutation;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.RegionLoadStats;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.SnapshotDescription;
-import org.apache.hadoop.hbase.client.SnapshotType;
-import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
-import org.apache.hadoop.hbase.client.security.SecurityCapability;
-import org.apache.hadoop.hbase.exceptions.DeserializationException;
-import org.apache.hadoop.hbase.filter.ByteArrayComparable;
-import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.fs.StorageIdentifier;
 import org.apache.hadoop.hbase.fs.legacy.LegacyPathIdentifier;
-import org.apache.hadoop.hbase.io.LimitInputStream;
-import org.apache.hadoop.hbase.io.TimeRange;
-import org.apache.hadoop.hbase.master.RegionState;
-import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos;
-import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService;
-import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
-import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest;
-import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse;
-import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest;
-import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse;
-import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRequest;
-import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileResponse;
-import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest;
-import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest;
-import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo;
-import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest;
-import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest;
-import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos;
-import org.apache.hadoop.hbase.protobuf.generated.CellProtos;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Column;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.DeleteType;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
-import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
-import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo;
-import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition;
-import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad;
-import org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos;
-import org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent;
-import org.apache.hadoop.hbase.protobuf.generated.FilterProtos;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
-import org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
-import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos;
-import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos;
-import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor;
 import org.apache.hadoop.hbase.util.ByteStringer;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/53f4ec9e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java
index a2a0dcc..2dcb1ae 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java
@@ -28,7 +28,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.ScheduledChore;
 import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner;
+import org.apache.hadoop.hbase.fs.legacy.cleaner.TimeToLiveHFileCleaner;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.util.StringUtils;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/53f4ec9e/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java
index 4c86244..5b94c4b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java
@@ -28,10 +28,9 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate;
-import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
+import org.apache.hadoop.hbase.fs.legacy.cleaner.BaseHFileCleanerDelegate;
+import org.apache.hadoop.hbase.fs.legacy.cleaner.HFileCleaner;
 import org.apache.hadoop.hbase.replication.ReplicationFactory;
 import org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
 import org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments;