You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by la...@apache.org on 2012/07/22 03:11:37 UTC
svn commit: r1364203 [2/3] - in /hbase/trunk:
hbase-common/src/main/java/org/apache/hadoop/hbase/
hbase-server/src/main/java/org/apache/hadoop/hbase/
hbase-server/src/main/java/org/apache/hadoop/hbase/backup/
hbase-server/src/main/java/org/apache/hadoo...
Added: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java?rev=1364203&view=auto
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java (added)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java Sun Jul 22 01:11:36 2012
@@ -0,0 +1,238 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.cleaner;
+
+import java.io.IOException;
+import java.util.LinkedList;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Chore;
+import org.apache.hadoop.hbase.RemoteExceptionHandler;
+import org.apache.hadoop.hbase.Stoppable;
+import org.apache.hadoop.hbase.util.FSUtils;
+
+/**
+ * Abstract Cleaner that uses a chain of delegates to clean a directory of files
+ * @param <T> Cleaner delegate class that is dynamically loaded from configuration
+ */
+public abstract class CleanerChore<T extends FileCleanerDelegate> extends Chore {
+
+ private static final Log LOG = LogFactory.getLog(CleanerChore.class.getName());
+
+ private final FileSystem fs;
+ private final Path oldFileDir;
+ private final Configuration conf;
+ private List<T> cleanersChain;
+
+ /**
+ * @param name name of the chore being run
+ * @param sleepPeriod the period of time to sleep between each run
+ * @param s the stopper
+ * @param conf configuration to use
+ * @param fs handle to the FS
+ * @param oldFileDir the path to the archived files
+ * @param confKey configuration key for the classes to instantiate
+ */
+ public CleanerChore(String name, final int sleepPeriod, final Stoppable s, Configuration conf,
+ FileSystem fs, Path oldFileDir, String confKey) {
+ super(name, sleepPeriod, s);
+ this.fs = fs;
+ this.oldFileDir = oldFileDir;
+ this.conf = conf;
+
+ initCleanerChain(confKey);
+ }
+
+ /**
+ * Validate the file to see if it even belongs in the directory. If it is valid, then the file
+ * will go through the cleaner delegates, but otherwise the file is just deleted.
+ * @param file full {@link Path} of the file to be checked
+ * @return <tt>true</tt> if the file is valid, <tt>false</tt> otherwise
+ */
+ protected abstract boolean validate(Path file);
+
+ /**
+ * Instanitate and initialize all the file cleaners set in the configuration
+ * @param confKey key to get the file cleaner classes from the configuration
+ */
+ private void initCleanerChain(String confKey) {
+ this.cleanersChain = new LinkedList<T>();
+ String[] logCleaners = conf.getStrings(confKey);
+ if (logCleaners != null) {
+ for (String className : logCleaners) {
+ T logCleaner = newFileCleaner(className, conf);
+ if (logCleaner != null) this.cleanersChain.add(logCleaner);
+ }
+ }
+ }
+
+ /**
+ * A utility method to create new instances of LogCleanerDelegate based on the class name of the
+ * LogCleanerDelegate.
+ * @param className fully qualified class name of the LogCleanerDelegate
+ * @param conf
+ * @return the new instance
+ */
+ public T newFileCleaner(String className, Configuration conf) {
+ try {
+ Class<? extends FileCleanerDelegate> c = Class.forName(className).asSubclass(
+ FileCleanerDelegate.class);
+ @SuppressWarnings("unchecked")
+ T cleaner = (T) c.newInstance();
+ cleaner.setConf(conf);
+ return cleaner;
+ } catch (Exception e) {
+ LOG.warn("Can NOT create CleanerDelegate: " + className, e);
+ // skipping if can't instantiate
+ return null;
+ }
+ }
+
+ @Override
+ protected void chore() {
+ try {
+ FileStatus[] files = FSUtils.listStatus(this.fs, this.oldFileDir, null);
+ // if the path (file or directory) doesn't exist, then we can just return
+ if (files == null) return;
+ // loop over the found files and see if they should be deleted
+ for (FileStatus file : files) {
+ try {
+ if (file.isDir()) checkDirectory(file.getPath());
+ else checkAndDelete(file.getPath());
+ } catch (IOException e) {
+ e = RemoteExceptionHandler.checkIOException(e);
+ LOG.warn("Error while cleaning the logs", e);
+ }
+ }
+ } catch (IOException e) {
+ LOG.warn("Failed to get status of:" + oldFileDir);
+ }
+
+ }
+
+ /**
+ * Check to see if we can delete a directory (and all the children files of that directory).
+ * <p>
+ * A directory will not be deleted if it has children that are subsequently deleted since that
+ * will require another set of lookups in the filesystem, which is semantically same as waiting
+ * until the next time the chore is run, so we might as well wait.
+ * @param fs {@link FileSystem} where he directory resides
+ * @param toCheck directory to check
+ * @throws IOException
+ */
+ private void checkDirectory(Path toCheck) throws IOException {
+ LOG.debug("Checking directory: " + toCheck);
+ FileStatus[] files = checkAndDeleteDirectory(toCheck);
+ // if the directory doesn't exist, then we are done
+ if (files == null) return;
+
+ // otherwise we need to check each of the child files
+ for (FileStatus file : files) {
+ Path filePath = file.getPath();
+ // if its a directory, then check to see if it should be deleted
+ if (file.isDir()) {
+ // check the subfiles to see if they can be deleted
+ checkDirectory(filePath);
+ continue;
+ }
+ // otherwise we can just check the file
+ checkAndDelete(filePath);
+ }
+
+ // recheck the directory to see if we can delete it this time
+ checkAndDeleteDirectory(toCheck);
+ }
+
+ /**
+ * Check and delete the passed directory if the directory is empty
+ * @param toCheck full path to the directory to check (and possibly delete)
+ * @return <tt>null</tt> if the directory was empty (and possibly deleted) and otherwise an array
+ * of <code>FileStatus</code> for the files in the directory
+ * @throws IOException
+ */
+ private FileStatus[] checkAndDeleteDirectory(Path toCheck) throws IOException {
+ LOG.debug("Attempting to delete directory:" + toCheck);
+ // if it doesn't exist, we are done
+ if (!fs.exists(toCheck)) return null;
+ // get the files below the directory
+ FileStatus[] files = FSUtils.listStatus(fs, toCheck, null);
+ // if there are no subfiles, then we can delete the directory
+ if (files == null) {
+ checkAndDelete(toCheck);
+ return null;
+ }
+
+ // return the status of the files in the directory
+ return files;
+ }
+
+ /**
+ * Run the given file through each of the cleaners to see if it should be deleted, deleting it if
+ * necessary.
+ * @param filePath path of the file to check (and possibly delete)
+ * @throws IOException if cann't delete a file because of a filesystem issue
+ * @throws IllegalArgumentException if the file is a directory and has children
+ */
+ private void checkAndDelete(Path filePath) throws IOException, IllegalArgumentException {
+ if (!validate(filePath)) {
+ LOG.warn("Found a wrongly formatted file: " + filePath.getName() + "deleting it.");
+ if (!this.fs.delete(filePath, true)) {
+ LOG.warn("Attempted to delete:" + filePath
+ + ", but couldn't. Run cleaner chain and attempt to delete on next pass.");
+ }
+ return;
+ }
+ for (T cleaner : cleanersChain) {
+ if (cleaner.isStopped()) {
+ LOG.warn("A file cleaner" + this.getName() + " is stopped, won't delete any file in:"
+ + this.oldFileDir);
+ return;
+ }
+
+ if (!cleaner.isFileDeleteable(filePath)) {
+ // this file is not deletable, then we are done
+ LOG.debug(filePath + " is not deletable according to:" + cleaner);
+ return;
+ }
+ }
+ // delete this file if it passes all the cleaners
+ LOG.debug("Removing:" + filePath + " from archive");
+ if (this.fs.delete(filePath, false)) {
+ LOG.warn("Attempted to delete:" + filePath
+ + ", but couldn't. Run cleaner chain and attempt to delete on next pass.");
+ }
+ }
+
+
+ @Override
+ public void cleanup() {
+ for (T lc : this.cleanersChain) {
+ try {
+ lc.stop("Exiting");
+ } catch (Throwable t) {
+ LOG.warn("Stopping", t);
+ }
+ }
+ }
+}
Added: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/FileCleanerDelegate.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/FileCleanerDelegate.java?rev=1364203&view=auto
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/FileCleanerDelegate.java (added)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/FileCleanerDelegate.java Sun Jul 22 01:11:36 2012
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.cleaner;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Stoppable;
+
+/**
+ * General interface for cleaning files from a folder (generally an archive or
+ * backup folder). These are chained via the {@link CleanerChore} to determine
+ * if a given file should be deleted.
+ */
+@InterfaceAudience.Private
+public interface FileCleanerDelegate extends Configurable, Stoppable {
+
+ /**
+ * Should the master delete the file or keep it?
+ * @param file full path to the file to check
+ * @return <tt>true</tt> if the file is deletable, <tt>false</tt> if not
+ */
+ public boolean isFileDeleteable(Path file);
+
+}
Added: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java?rev=1364203&view=auto
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java (added)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java Sun Jul 22 01:11:36 2012
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.cleaner;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Stoppable;
+import org.apache.hadoop.hbase.regionserver.StoreFile;
+/**
+ * This Chore, every time it runs, will clear the HFiles in the hfile archive
+ * folder that are deletable for each HFile cleaner in the chain.
+ */
+@InterfaceAudience.Private
+public class HFileCleaner extends CleanerChore<BaseHFileCleanerDelegate> {
+
+ public static final String MASTER_HFILE_CLEANER_PLUGINS = "hbase.master.hfilecleaner.plugins";
+
+ /**
+ * @param period the period of time to sleep between each run
+ * @param stopper the stopper
+ * @param conf configuration to use
+ * @param fs handle to the FS
+ * @param directory directory to be cleaned
+ */
+ public HFileCleaner(final int period, final Stoppable stopper, Configuration conf, FileSystem fs,
+ Path directory) {
+ super("HFileCleaner", period, stopper, conf, fs, directory, MASTER_HFILE_CLEANER_PLUGINS);
+ }
+
+ @Override
+ protected boolean validate(Path file) {
+ return StoreFile.validateStoreFileName(file.getName());
+ }
+}
Added: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/LogCleaner.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/LogCleaner.java?rev=1364203&view=auto
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/LogCleaner.java (added)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/LogCleaner.java Sun Jul 22 01:11:36 2012
@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.cleaner;
+
+import static org.apache.hadoop.hbase.HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Stoppable;
+import org.apache.hadoop.hbase.regionserver.wal.HLog;
+
+/**
+ * This Chore, every time it runs, will attempt to delete the HLogs in the old logs folder. The HLog
+ * is only deleted if none of the cleaner delegates says otherwise.
+ * @see BaseLogCleanerDelegate
+ */
+@InterfaceAudience.Private
+public class LogCleaner extends CleanerChore<BaseLogCleanerDelegate> {
+ static final Log LOG = LogFactory.getLog(LogCleaner.class.getName());
+
+ /**
+ * @param p the period of time to sleep between each run
+ * @param s the stopper
+ * @param conf configuration to use
+ * @param fs handle to the FS
+ * @param oldLogDir the path to the archived logs
+ */
+ public LogCleaner(final int p, final Stoppable s, Configuration conf, FileSystem fs,
+ Path oldLogDir) {
+ super("LogsCleaner", p, s, conf, fs, oldLogDir, HBASE_MASTER_LOGCLEANER_PLUGINS);
+ }
+
+ @Override
+ protected boolean validate(Path file) {
+ return HLog.validateHLogFilename(file.getName());
+ }
+}
Added: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveHFileCleaner.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveHFileCleaner.java?rev=1364203&view=auto
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveHFileCleaner.java (added)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveHFileCleaner.java Sun Jul 22 01:11:36 2012
@@ -0,0 +1,91 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.cleaner;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+
+/**
+ * HFile cleaner that uses the timestamp of the hfile to determine if it should be deleted. By
+ * default they are allowed to live for {@value TimeToLiveHFileCleaner#DEFAULT_TTL}
+ */
+@InterfaceAudience.Private
+public class TimeToLiveHFileCleaner extends BaseHFileCleanerDelegate {
+
+ public static final Log LOG = LogFactory.getLog(TimeToLiveHFileCleaner.class.getName());
+ public static final String TTL_CONF_KEY = "hbase.master.hfilecleaner.ttl";
+ // default ttl = 5 minute
+ private static final long DEFAULT_TTL = 60000 * 5;
+ // Configured time a hfile can be kept after it was moved to the archive
+ private long ttl;
+ private FileSystem fs;
+
+ @Override
+ public void setConf(Configuration conf) {
+ this.ttl = conf.getLong(TTL_CONF_KEY, DEFAULT_TTL);
+ super.setConf(conf);
+ }
+
+ @Override
+ public boolean isFileDeleteable(Path filePath) {
+ if (!instantiateFS()) {
+ return false;
+ }
+ long time = 0;
+ long currentTime = EnvironmentEdgeManager.currentTimeMillis();
+ try {
+ FileStatus fStat = fs.getFileStatus(filePath);
+ time = fStat.getModificationTime();
+ } catch (IOException e) {
+ LOG.error("Unable to get modification time of file " + filePath.getName()
+ + ", not deleting it.", e);
+ return false;
+ }
+ long life = currentTime - time;
+ LOG.debug("Life:" + life + ", tt:" + ttl + ", current:" + currentTime + ", from: " + time);
+ if (life < 0) {
+ LOG.warn("Found a log (" + filePath + ") newer than current time (" + currentTime + " < "
+ + time + "), probably a clock skew");
+ return false;
+ }
+ return life > ttl;
+ }
+
+ /**
+ * setup the filesystem, if it hasn't been already
+ */
+ private synchronized boolean instantiateFS() {
+ if (this.fs == null) {
+ try {
+ this.fs = FileSystem.get(this.getConf());
+ } catch (IOException e) {
+ LOG.error("Couldn't instantiate the file system, not deleting file, just incase");
+ return false;
+ }
+ }
+ return true;
+ }
+}
\ No newline at end of file
Added: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java?rev=1364203&view=auto
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java (added)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java Sun Jul 22 01:11:36 2012
@@ -0,0 +1,77 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.cleaner;
+
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+/**
+ * Log cleaner that uses the timestamp of the hlog to determine if it should
+ * be deleted. By default they are allowed to live for 10 minutes.
+ */
+@InterfaceAudience.Private
+public class TimeToLiveLogCleaner extends BaseLogCleanerDelegate {
+ static final Log LOG = LogFactory.getLog(TimeToLiveLogCleaner.class.getName());
+ // Configured time a log can be kept after it was closed
+ private long ttl;
+ private boolean stopped = false;
+
+ @Override
+ public boolean isLogDeletable(Path filePath) {
+ long time = 0;
+ long currentTime = System.currentTimeMillis();
+ try {
+ FileStatus fStat = filePath.getFileSystem(this.getConf()).getFileStatus(filePath);
+ time = fStat.getModificationTime();
+ } catch (IOException e) {
+ LOG.error("Unable to get modification time of file " + filePath.getName() +
+ ", not deleting it.", e);
+ return false;
+ }
+ long life = currentTime - time;
+ if (life < 0) {
+ LOG.warn("Found a log newer than current time, " +
+ "probably a clock skew");
+ return false;
+ }
+ return life > ttl;
+ }
+
+ @Override
+ public void setConf(Configuration conf) {
+ super.setConf(conf);
+ this.ttl = conf.getLong("hbase.master.logcleaner.ttl", 600000);
+ }
+
+
+ @Override
+ public void stop(String why) {
+ this.stopped = true;
+ }
+
+ @Override
+ public boolean isStopped() {
+ return this.stopped;
+ }
+}
\ No newline at end of file
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=1364203&r1=1364202&r2=1364203&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java Sun Jul 22 01:11:36 2012
@@ -84,6 +84,7 @@ import org.apache.hadoop.hbase.HTableDes
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.UnknownScannerException;
+import org.apache.hadoop.hbase.backup.HFileArchiver;
import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
@@ -938,16 +939,7 @@ public class HRegion implements HeapSize
writestate.writesEnabled = false;
wasFlushing = writestate.flushing;
LOG.debug("Closing " + this + ": disabling compactions & flushes");
- while (writestate.compacting > 0 || writestate.flushing) {
- LOG.debug("waiting for " + writestate.compacting + " compactions" +
- (writestate.flushing ? " & cache flush" : "") +
- " to complete for region " + this);
- try {
- writestate.wait();
- } catch (InterruptedException iex) {
- // continue
- }
- }
+ waitForFlushesAndCompactions();
}
// If we were not just flushing, is it worth doing a preflush...one
// that will clear out of the bulk of the memstore before we put up
@@ -1022,6 +1014,26 @@ public class HRegion implements HeapSize
}
}
+ /**
+ * Wait for all current flushes and compactions of the region to complete.
+ * <p>
+ * Exposed for TESTING.
+ */
+ public void waitForFlushesAndCompactions() {
+ synchronized (writestate) {
+ while (writestate.compacting > 0 || writestate.flushing) {
+ LOG.debug("waiting for " + writestate.compacting + " compactions"
+ + (writestate.flushing ? " & cache flush" : "") + " to complete for region " + this);
+ try {
+ writestate.wait();
+ } catch (InterruptedException iex) {
+ // essentially ignore and propagate the interrupt back up
+ Thread.currentThread().interrupt();
+ }
+ }
+ }
+ }
+
protected ThreadPoolExecutor getStoreOpenAndCloseThreadPool(
final String threadNamePrefix) {
int numStores = Math.max(1, this.htableDescriptor.getFamilies().size());
@@ -4170,8 +4182,13 @@ public class HRegion implements HeapSize
LOG.debug("Files for new region");
listPaths(fs, dstRegion.getRegionDir());
}
- deleteRegion(fs, a.getRegionDir());
- deleteRegion(fs, b.getRegionDir());
+
+ // delete out the 'A' region
+ HFileArchiver.archiveRegion(fs, FSUtils.getRootDir(a.getBaseConf()), a.getTableDir(),
+ a.getRegionDir());
+ // delete out the 'B' region
+ HFileArchiver.archiveRegion(fs, FSUtils.getRootDir(b.getBaseConf()), b.getTableDir(),
+ b.getRegionDir());
LOG.info("merge completed. New region is " + dstRegion);
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java?rev=1364203&r1=1364202&r2=1364203&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java Sun Jul 22 01:11:36 2012
@@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.HRegionIn
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValue.KVComparator;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
+import org.apache.hadoop.hbase.backup.HFileArchiver;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
@@ -488,7 +489,7 @@ public class Store extends SchemaConfigu
/**
* @return All store files.
*/
- List<StoreFile> getStorefiles() {
+ public List<StoreFile> getStorefiles() {
return this.storefiles;
}
@@ -1609,10 +1610,12 @@ public class Store extends SchemaConfigu
// Tell observers that list of StoreFiles has changed.
notifyChangedReadersObservers();
- // Finally, delete old store files.
- for (StoreFile hsf: compactedFiles) {
- hsf.deleteReader();
- }
+
+ // let the archive util decide if we should archive or delete the files
+ LOG.debug("Removing store files after compaction...");
+ HFileArchiver.archiveStoreFiles(this.fs, this.region, this.conf, this.family.getName(),
+ compactedFiles);
+
} catch (IOException e) {
e = RemoteExceptionHandler.checkIOException(e);
LOG.error("Failed replacing compacted files in " + this +
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java?rev=1364203&r1=1364202&r2=1364203&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java Sun Jul 22 01:11:36 2012
@@ -863,13 +863,20 @@ public class StoreFile extends SchemaCon
}
/**
- * Write out a split reference.
- *
- * Package local so it doesnt leak out of regionserver.
- *
+ * Validate the store file name.
+ * @param fileName name of the file to validate
+ * @return <tt>true</tt> if the file could be a valid store file, <tt>false</tt> otherwise
+ */
+ public static boolean validateStoreFileName(String fileName) {
+ return !fileName.contains("-");
+ }
+
+ /**
+ * Write out a split reference. Package local so it doesnt leak out of
+ * regionserver.
* @param fs
* @param splitDir Presumes path format is actually
- * <code>SOME_DIRECTORY/REGIONNAME/FAMILY</code>.
+ * <code>SOME_DIRECTORY/REGIONNAME/FAMILY</code>.
* @param f File to split.
* @param splitRow
* @param top True if we are referring to the top half of the hfile.
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java?rev=1364203&r1=1364202&r2=1364203&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java Sun Jul 22 01:11:36 2012
@@ -1766,6 +1766,11 @@ public class HLog implements Syncable {
return dir;
}
+ /**
+ * @param filename name of the file to validate
+ * @return <tt>true</tt> if the filename matches an HLog, <tt>false</tt>
+ * otherwise
+ */
public static boolean validateHLogFilename(String filename) {
return pattern.matcher(filename).matches();
}
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java?rev=1364203&r1=1364202&r2=1364203&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationLogCleaner.java Sun Jul 22 01:11:36 2012
@@ -27,7 +27,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.HConnectionManager;
-import org.apache.hadoop.hbase.master.LogCleanerDelegate;
+import org.apache.hadoop.hbase.master.cleaner.BaseLogCleanerDelegate;
import org.apache.hadoop.hbase.replication.ReplicationZookeeper;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.zookeeper.KeeperException;
@@ -42,18 +42,13 @@ import java.util.Set;
* replication before deleting it when its TTL is over.
*/
@InterfaceAudience.Private
-public class ReplicationLogCleaner implements LogCleanerDelegate, Abortable {
+public class ReplicationLogCleaner extends BaseLogCleanerDelegate implements Abortable {
private static final Log LOG = LogFactory.getLog(ReplicationLogCleaner.class);
- private Configuration conf;
private ReplicationZookeeper zkHelper;
private Set<String> hlogs = new HashSet<String>();
private boolean stopped = false;
private boolean aborted;
- /**
- * Instantiates the cleaner, does nothing more.
- */
- public ReplicationLogCleaner() {}
@Override
public boolean isLogDeletable(Path filePath) {
@@ -69,7 +64,7 @@ public class ReplicationLogCleaner imple
// all members of this class are null if replication is disabled, and we
// return true since false would render the LogsCleaner useless
- if (this.conf == null) {
+ if (this.getConf() == null) {
return true;
}
String log = filePath.getName();
@@ -124,18 +119,18 @@ public class ReplicationLogCleaner imple
}
@Override
- public void setConf(Configuration conf) {
+ public void setConf(Configuration config) {
// If replication is disabled, keep all members null
- if (!conf.getBoolean(HConstants.REPLICATION_ENABLE_KEY, false)) {
+ if (!config.getBoolean(HConstants.REPLICATION_ENABLE_KEY, false)) {
return;
}
// Make my own Configuration. Then I'll have my own connection to zk that
// I can close myself when comes time.
- this.conf = new Configuration(conf);
+ Configuration conf = new Configuration(config);
+ super.setConf(conf);
try {
- ZooKeeperWatcher zkw =
- new ZooKeeperWatcher(this.conf, "replicationLogCleaner", null);
- this.zkHelper = new ReplicationZookeeper(this, this.conf, zkw);
+ ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "replicationLogCleaner", null);
+ this.zkHelper = new ReplicationZookeeper(this, conf, zkw);
} catch (KeeperException e) {
LOG.error("Error while configuring " + this.getClass().getName(), e);
} catch (IOException e) {
@@ -144,10 +139,6 @@ public class ReplicationLogCleaner imple
refreshHLogsAndSearch(null);
}
- @Override
- public Configuration getConf() {
- return conf;
- }
@Override
public void stop(String why) {
@@ -158,7 +149,7 @@ public class ReplicationLogCleaner imple
this.zkHelper.getZookeeperWatcher().close();
}
// Not sure why we're deleting a connection that we never acquired or used
- HConnectionManager.deleteConnection(this.conf, true);
+ HConnectionManager.deleteConnection(this.getConf(), true);
}
@Override
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java?rev=1364203&r1=1364202&r2=1364203&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java Sun Jul 22 01:11:36 2012
@@ -656,6 +656,10 @@ public abstract class FSUtils {
return p.makeQualified(fs);
}
+ public static void setRootDir(final Configuration c, final Path root) throws IOException {
+ c.set(HConstants.HBASE_DIR, root.toString());
+ }
+
/**
* Checks if root region exists
*
@@ -1138,4 +1142,36 @@ public abstract class FSUtils {
public static boolean isExists(final FileSystem fs, final Path path) throws IOException {
return fs.exists(path);
}
+
+ /**
+ * Log the current state of the filesystem from a certain root directory
+ * @param fs filesystem to investigate
+ * @param root root file/directory to start logging from
+ * @param LOG log to output information
+ * @throws IOException if an unexpected exception occurs
+ */
+ public static void logFileSystemState(final FileSystem fs, final Path root, Log LOG)
+ throws IOException {
+ LOG.debug("Current file system:");
+ logFSTree(LOG, fs, root, "|-");
+ }
+
+ /**
+ * Recursive helper to log the state of the FS
+ * @see #logFileSystemState(FileSystem, Path, Log)
+ */
+ private static void logFSTree(Log LOG, final FileSystem fs, final Path root, String prefix)
+ throws IOException {
+ FileStatus[] files = FSUtils.listStatus(fs, root, null);
+ if (files == null) return;
+
+ for (FileStatus file : files) {
+ if (file.isDir()) {
+ LOG.debug(prefix + file.getPath().getName() + "/");
+ logFSTree(LOG, fs, file.getPath(), prefix + "---");
+ } else {
+ LOG.debug(prefix + file.getPath().getName());
+ }
+ }
+ }
}
Added: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java?rev=1364203&view=auto
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java (added)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java Sun Jul 22 01:11:36 2012
@@ -0,0 +1,128 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.util;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Store;
+
+/**
+ * Helper class for all utilities related to archival/retrieval of HFiles
+ */
+public class HFileArchiveUtil {
+ static final String DEFAULT_HFILE_ARCHIVE_DIRECTORY = ".archive";
+
+ private HFileArchiveUtil() {
+ // non-external instantiation - util class
+ }
+
+ /**
+ * Get the directory to archive a store directory
+ * @param conf {@link Configuration} to read for the archive directory name
+ * @param region parent region information under which the store currently
+ * lives
+ * @param family name of the family in the store
+ * @return {@link Path} to the directory to archive the given store or
+ * <tt>null</tt> if it should not be archived
+ */
+ public static Path getStoreArchivePath(Configuration conf, HRegion region, byte [] family){
+ return getStoreArchivePath(conf, region.getRegionInfo(), region.getTableDir(), family);
+ }
+
+ /**
+ * Get the directory to archive a store directory
+ * @param conf {@link Configuration} to read for the archive directory name. Can be null.
+ * @param region parent region information under which the store currently lives
+ * @param tabledir directory for the table under which the store currently lives
+ * @param family name of the family in the store
+ * @return {@link Path} to the directory to archive the given store or <tt>null</tt> if it should
+ * not be archived
+ */
+ public static Path getStoreArchivePath(Configuration conf, HRegionInfo region, Path tabledir,
+ byte[] family) {
+ Path tableArchiveDir = getTableArchivePath(conf, tabledir);
+ return Store.getStoreHomedir(tableArchiveDir,
+ HRegionInfo.encodeRegionName(region.getRegionName()), family);
+ }
+
+ /**
+ * Get the archive directory for a given region under the specified table
+ * @param conf {@link Configuration} to read the archive directory from. Can be null
+ * @param tabledir the original table directory. Cannot be null.
+ * @param regiondir the path to the region directory. Cannot be null.
+ * @return {@link Path} to the directory to archive the given region, or <tt>null</tt> if it
+ * should not be archived
+ */
+ public static Path getRegionArchiveDir(Configuration conf, Path tabledir, Path regiondir) {
+ // get the archive directory for a table
+ Path archiveDir = getTableArchivePath(conf, tabledir);
+
+ // then add on the region path under the archive
+ String encodedRegionName = regiondir.getName();
+ return HRegion.getRegionDir(archiveDir, encodedRegionName);
+ }
+
+ /**
+ * Get the path to the table archive directory based on the configured archive directory.
+ * <p>
+ * Assumed that the table should already be archived.
+ * @param conf {@link Configuration} to read the archive directory property. Can be null
+ * @param tabledir directory of the table to be archived. Cannot be null.
+ * @return {@link Path} to the archive directory for the table
+ */
+ public static Path getTableArchivePath(Configuration conf, Path tabledir) {
+ String archiveName = getConfiguredArchiveDirName(conf);
+ Path root = tabledir.getParent();
+ // now build the archive directory path
+ // first the top-level archive directory
+ // generally "/hbase/.archive/[table]
+ return archiveName.length() == 0 ? new Path(root, tabledir) : new Path(new Path(root,
+ archiveName), tabledir.getName());
+ }
+
+ /**
+ * Get the archive directory as per the configuration
+ * @param conf {@link Configuration} to read the archive directory from (can be null, in which
+ * case you get the default value). Can be null.
+ * @return the configured archived directory or the default specified by
+ * {@value HFileArchiveUtil#DEFAULT_HFILE_ARCHIVE_DIRECTORY}
+ */
+ public static String getConfiguredArchiveDirName(Configuration conf) {
+ return conf == null ? HFileArchiveUtil.DEFAULT_HFILE_ARCHIVE_DIRECTORY : conf.get(
+ HConstants.HFILE_ARCHIVE_DIRECTORY, HFileArchiveUtil.DEFAULT_HFILE_ARCHIVE_DIRECTORY);
+ }
+
+ /**
+ * Get the full path to the archive directory on the configured {@link FileSystem}
+ * @param conf to look for archive directory name and root directory. Cannot be null. Notes for
+ * testing: requires a FileSystem root directory to be specified.
+ * @return the full {@link Path} to the archive directory, as defined by the configuration
+ * @throws IOException if an unexpected error occurs
+ */
+ public static Path getArchivePath(Configuration conf) throws IOException {
+ return new Path(FSUtils.getRootDir(conf), getConfiguredArchiveDirName(conf));
+ }
+}
Modified: hbase/trunk/hbase-server/src/main/resources/hbase-default.xml
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/resources/hbase-default.xml?rev=1364203&r1=1364202&r2=1364203&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/resources/hbase-default.xml (original)
+++ hbase/trunk/hbase-server/src/main/resources/hbase-default.xml Sun Jul 22 01:11:36 2012
@@ -297,7 +297,7 @@
</property>
<property>
<name>hbase.master.logcleaner.plugins</name>
- <value>org.apache.hadoop.hbase.master.TimeToLiveLogCleaner</value>
+ <value>org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner</value>
<description>A comma-separated list of LogCleanerDelegate invoked by
the LogsCleaner service. These WAL/HLog cleaners are called in order,
so put the HLog cleaner that prunes the most HLog files in front. To
@@ -859,7 +859,6 @@
files when hbase.data.umask.enable is true
</description>
</property>
-
<property>
<name>hbase.metrics.showTableName</name>
<value>true</value>
@@ -869,7 +868,6 @@
In both cases, the aggregated metric M across tables and cfs will be reported.
</description>
</property>
-
<property>
<name>hbase.metrics.exposeOperationTimes</name>
<value>true</value>
@@ -878,5 +876,23 @@
have their times exposed through Hadoop metrics per CF and per region.
</description>
</property>
-
+ <property>
+ <name>hbase.table.archive.directory</name>
+ <value>.archive</value>
+ <description>Per-table directory name under which to backup files for a
+ table. Files are moved to the same directories as they would be under the
+ table directory, but instead are just one level lower (under
+ table/.archive/... rather than table/...). Currently only applies to HFiles.</description>
+ </property>
+ <property>
+ <name>hbase.master.hfilecleaner.plugins</name>
+ <value>org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner</value>
+ <description>A comma-separated list of HFileCleanerDelegate invoked by
+ the HFileCleaner service. These HFiles cleaners are called in order,
+ so put the cleaner that prunes the most files in front. To
+ implement your own HFileCleanerDelegate, just put it in HBase's classpath
+ and add the fully qualified class name here. Always add the above
+ default log cleaners in the list as they will be overwritten in hbase-site.xml.
+ </description>
+ </property>
</configuration>
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java?rev=1364203&r1=1364202&r2=1364203&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java Sun Jul 22 01:11:36 2012
@@ -950,7 +950,11 @@ public class HBaseTestingUtility {
* @param tableName existing table
*/
public void deleteTable(byte[] tableName) throws IOException {
- getHBaseAdmin().disableTable(tableName);
+ try {
+ getHBaseAdmin().disableTable(tableName);
+ } catch (TableNotEnabledException e) {
+ LOG.debug("Table: " + Bytes.toString(tableName) + " already disabled, so just deleting it.");
+ }
getHBaseAdmin().deleteTable(tableName);
}
Added: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java?rev=1364203&view=auto
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java (added)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java Sun Jul 22 01:11:36 2012
@@ -0,0 +1,293 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.HFileArchiveTestingUtil;
+import org.apache.hadoop.hbase.util.HFileArchiveUtil;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test that the {@link HFileArchiver} correctly removes all the parts of a region when cleaning up
+ * a region
+ */
+@Category(MediumTests.class)
+public class TestHFileArchiving {
+
+ private static final String STRING_TABLE_NAME = "test_table";
+
+ private static final Log LOG = LogFactory.getLog(TestHFileArchiving.class);
+ private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+ private static final byte[] TABLE_NAME = Bytes.toBytes(STRING_TABLE_NAME);
+ private static final byte[] TEST_FAM = Bytes.toBytes("fam");
+
+ /**
+ * Setup the config for the cluster
+ */
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ setupConf(UTIL.getConfiguration());
+ UTIL.startMiniCluster();
+ }
+
+ private static void setupConf(Configuration conf) {
+ // disable the ui
+ conf.setInt("hbase.regionsever.info.port", -1);
+ // drop the memstore size so we get flushes
+ conf.setInt("hbase.hregion.memstore.flush.size", 25000);
+ // disable major compactions
+ conf.setInt(HConstants.MAJOR_COMPACTION_PERIOD, 0);
+ }
+
+ @Before
+ public void setup() throws Exception {
+ UTIL.createTable(TABLE_NAME, TEST_FAM);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ // cleanup the cluster if its up still
+ if (UTIL.getHBaseAdmin().tableExists(STRING_TABLE_NAME)) {
+ UTIL.deleteTable(TABLE_NAME);
+ }
+ // and cleanup the archive directory
+ try {
+ clearArchiveDirectory();
+ } catch (IOException e) {
+ Assert.fail("Failure to delete archive directory:" + e.getMessage());
+ }
+ }
+
+ @AfterClass
+ public static void cleanupTest() throws Exception {
+ try {
+ UTIL.shutdownMiniCluster();
+ } catch (Exception e) {
+ // NOOP;
+ }
+ }
+
+ @Test
+ public void testRemovesRegionDirOnArchive() throws Exception {
+ final HBaseAdmin admin = UTIL.getHBaseAdmin();
+
+ // get the current store files for the region
+ List<HRegion> servingRegions = UTIL.getHBaseCluster().getRegions(TABLE_NAME);
+ // make sure we only have 1 region serving this table
+ assertEquals(1, servingRegions.size());
+ HRegion region = servingRegions.get(0);
+
+ // and load the table
+ UTIL.loadRegion(region, TEST_FAM);
+
+ // shutdown the table so we can manipulate the files
+ admin.disableTable(STRING_TABLE_NAME);
+
+ FileSystem fs = UTIL.getTestFileSystem();
+
+ // now attempt to depose the region
+ Path regionDir = HRegion.getRegionDir(region.getTableDir().getParent(), region.getRegionInfo());
+
+ HFileArchiver.archiveRegion(fs, region.getRegionInfo());
+
+ // check for the existence of the archive directory and some files in it
+ Path archiveDir = HFileArchiveTestingUtil.getRegionArchiveDir(UTIL.getConfiguration(), region);
+ assertTrue(fs.exists(archiveDir));
+
+ // check to make sure the store directory was copied
+ FileStatus[] stores = fs.listStatus(archiveDir);
+ assertTrue(stores.length == 1);
+
+ // make sure we archived the store files
+ FileStatus[] storeFiles = fs.listStatus(stores[0].getPath());
+ assertTrue(storeFiles.length > 0);
+
+ // then ensure the region's directory isn't present
+ assertFalse(fs.exists(regionDir));
+ }
+
+ /**
+ * Test that the region directory is removed when we archive a region without store files, but
+ * still has hidden files.
+ * @throws Exception
+ */
+ @Test
+ public void testDeleteRegionWithNoStoreFiles() throws Exception {
+ // get the current store files for the region
+ List<HRegion> servingRegions = UTIL.getHBaseCluster().getRegions(TABLE_NAME);
+ // make sure we only have 1 region serving this table
+ assertEquals(1, servingRegions.size());
+ HRegion region = servingRegions.get(0);
+
+ FileSystem fs = region.getFilesystem();
+
+ // make sure there are some files in the regiondir
+ Path rootDir = FSUtils.getRootDir(fs.getConf());
+ Path regionDir = HRegion.getRegionDir(rootDir, region.getRegionInfo());
+ FileStatus[] regionFiles = FSUtils.listStatus(fs, regionDir, null);
+ Assert.assertNotNull("No files in the region directory", regionFiles);
+ if (LOG.isDebugEnabled()) {
+ List<Path> files = new ArrayList<Path>();
+ for (FileStatus file : regionFiles) {
+ files.add(file.getPath());
+ }
+ LOG.debug("Current files:" + files);
+ }
+ // delete the visible folders so we just have hidden files/folders
+ final PathFilter dirFilter = new FSUtils.DirFilter(fs);
+ PathFilter nonHidden = new PathFilter() {
+ @Override
+ public boolean accept(Path file) {
+ return dirFilter.accept(file) && !file.getName().toString().startsWith(".");
+ }
+ };
+ FileStatus[] storeDirs = FSUtils.listStatus(fs, regionDir, nonHidden);
+ for (FileStatus store : storeDirs) {
+ LOG.debug("Deleting store for test");
+ fs.delete(store.getPath(), true);
+ }
+
+ // then archive the region
+ HFileArchiver.archiveRegion(fs, region.getRegionInfo());
+
+ // and check to make sure the region directoy got deleted
+ assertFalse("Region directory (" + regionDir + "), still exists.", fs.exists(regionDir));
+ }
+
+ @Test
+ public void testArchiveOnTableDelete() throws Exception {
+ List<HRegion> servingRegions = UTIL.getHBaseCluster().getRegions(TABLE_NAME);
+ // make sure we only have 1 region serving this table
+ assertEquals(1, servingRegions.size());
+ HRegion region = servingRegions.get(0);
+
+ // get the parent RS and monitor
+ HRegionServer hrs = UTIL.getRSForFirstRegionInTable(TABLE_NAME);
+ FileSystem fs = hrs.getFileSystem();
+
+ // put some data on the region
+ LOG.debug("-------Loading table");
+ UTIL.loadRegion(region, TEST_FAM);
+
+ // get the hfiles in the region
+ List<HRegion> regions = hrs.getOnlineRegions(TABLE_NAME);
+ assertEquals("More that 1 region for test table.", 1, regions.size());
+
+ region = regions.get(0);
+ // wait for all the compactions to complete
+ region.waitForFlushesAndCompactions();
+
+ // disable table to prevent new updates
+ UTIL.getHBaseAdmin().disableTable(TABLE_NAME);
+ LOG.debug("Disabled table");
+
+ // remove all the files from the archive to get a fair comparison
+ clearArchiveDirectory();
+
+ // then get the current store files
+ Path regionDir = region.getRegionDir();
+ List<String> storeFiles = getAllFileNames(fs, regionDir);
+ // remove all the non-storefile named files for the region
+ for (int i = 0; i < storeFiles.size(); i++) {
+ String file = storeFiles.get(i);
+ if (file.contains(HRegion.REGIONINFO_FILE) || file.contains("hlog")) {
+ storeFiles.remove(i--);
+ }
+ }
+ storeFiles.remove(HRegion.REGIONINFO_FILE);
+
+ // then delete the table so the hfiles get archived
+ UTIL.deleteTable(TABLE_NAME);
+
+ // then get the files in the archive directory.
+ Path archiveDir = HFileArchiveUtil.getArchivePath(UTIL.getConfiguration());
+ List<String> archivedFiles = getAllFileNames(fs, archiveDir);
+ Collections.sort(storeFiles);
+ Collections.sort(archivedFiles);
+
+ LOG.debug("Store files:");
+ for (int i = 0; i < storeFiles.size(); i++) {
+ LOG.debug(i + " - " + storeFiles.get(i));
+ }
+ LOG.debug("Archive files:");
+ for (int i = 0; i < archivedFiles.size(); i++) {
+ LOG.debug(i + " - " + archivedFiles.get(i));
+ }
+
+ assertTrue("Archived files are missing some of the store files!",
+ archivedFiles.containsAll(storeFiles));
+ }
+
+ private void clearArchiveDirectory() throws IOException {
+ UTIL.getTestFileSystem().delete(new Path(UTIL.getDefaultRootDirPath(), ".archive"), true);
+ }
+
+ /**
+ * Get the names of all the files below the given directory
+ * @param fs
+ * @param archiveDir
+ * @return
+ * @throws IOException
+ */
+ private List<String> getAllFileNames(final FileSystem fs, Path archiveDir) throws IOException {
+ FileStatus[] files = FSUtils.listStatus(fs, archiveDir, null);
+ return recurseOnFiles(fs, files, new ArrayList<String>());
+ }
+
+ /** Recursively lookup all the file names under the file[] array **/
+ private List<String> recurseOnFiles(FileSystem fs, FileStatus[] files, List<String> fileNames)
+ throws IOException {
+ if (files == null || files.length == 0) return fileNames;
+
+ for (FileStatus file : files) {
+ if (file.isDir()) {
+ recurseOnFiles(fs, FSUtils.listStatus(fs, file.getPath(), null), fileNames);
+ } else fileNames.add(file.getPath().getName());
+ }
+ return fileNames;
+ }
+}
\ No newline at end of file
Added: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java?rev=1364203&view=auto
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java (added)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java Sun Jul 22 01:11:36 2012
@@ -0,0 +1,365 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.backup.example;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
+import org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner;
+import org.apache.hadoop.hbase.regionserver.CheckedArchivingHFileCleaner;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.HFileArchiveTestingUtil;
+import org.apache.hadoop.hbase.util.HFileArchiveUtil;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Spin up a small cluster and check that the hfiles of region are properly long-term archived as
+ * specified via the {@link ZKTableArchiveClient}.
+ */
+@Category(MediumTests.class)
+public class TestZooKeeperTableArchiveClient {
+
+ private static final Log LOG = LogFactory.getLog(TestZooKeeperTableArchiveClient.class);
+ private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+ private static final String STRING_TABLE_NAME = "test";
+ private static final byte[] TEST_FAM = Bytes.toBytes("fam");
+ private static final byte[] TABLE_NAME = Bytes.toBytes(STRING_TABLE_NAME);
+ private static final int numRS = 2;
+ private static final int maxTries = 5;
+ private static final long ttl = 1000;
+ private static ZKTableArchiveClient archivingClient;
+
+ /**
+ * Setup the config for the cluster
+ */
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ setupConf(UTIL.getConfiguration());
+ UTIL.startMiniCluster(numRS);
+ archivingClient = new ZKTableArchiveClient(UTIL.getConfiguration(), UTIL.getHBaseAdmin()
+ .getConnection());
+ }
+
+ private static void setupConf(Configuration conf) {
+ // disable the ui
+ conf.setInt("hbase.regionsever.info.port", -1);
+ // change the flush size to a small amount, regulating number of store files
+ conf.setInt("hbase.hregion.memstore.flush.size", 25000);
+ // so make sure we get a compaction when doing a load, but keep around some
+ // files in the store
+ conf.setInt("hbase.hstore.compaction.min", 10);
+ conf.setInt("hbase.hstore.compactionThreshold", 10);
+ // block writes if we get to 12 store files
+ conf.setInt("hbase.hstore.blockingStoreFiles", 12);
+ // drop the number of attempts for the hbase admin
+ conf.setInt("hbase.client.retries.number", 1);
+ // set the ttl on the hfiles
+ conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, ttl);
+ conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
+ CheckedArchivingHFileCleaner.class.getCanonicalName(),
+ TimeToLiveHFileCleaner.class.getCanonicalName(),
+ LongTermArchivingHFileCleaner.class.getCanonicalName());
+ }
+
+ @Before
+ public void setup() throws Exception {
+ UTIL.createTable(TABLE_NAME, TEST_FAM);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ UTIL.deleteTable(TABLE_NAME);
+ // and cleanup the archive directory
+ try {
+ UTIL.getTestFileSystem().delete(new Path(UTIL.getDefaultRootDirPath(), ".archive"), true);
+ } catch (IOException e) {
+ LOG.warn("Failure to delete archive directory", e);
+ }
+ // make sure that backups are off for all tables
+ archivingClient.disableHFileBackup();
+ }
+
+ @AfterClass
+ public static void cleanupTest() throws Exception {
+ try {
+ UTIL.shutdownMiniCluster();
+ } catch (Exception e) {
+ LOG.warn("problem shutting down cluster", e);
+ }
+ }
+
+ /**
+ * Test turning on/off archiving
+ */
+ @Test
+ public void testArchivingEnableDisable() throws Exception {
+ // 1. turn on hfile backups
+ LOG.debug("----Starting archiving");
+ archivingClient.enableHFileBackupAsync(TABLE_NAME);
+ assertTrue("Archving didn't get turned on", archivingClient
+ .getArchivingEnabled(TABLE_NAME));
+
+ // 2. Turn off archiving and make sure its off
+ archivingClient.disableHFileBackup();
+ assertFalse("Archving didn't get turned off.", archivingClient.getArchivingEnabled(TABLE_NAME));
+
+ // 3. Check enable/disable on a single table
+ archivingClient.enableHFileBackupAsync(TABLE_NAME);
+ assertTrue("Archving didn't get turned on", archivingClient
+ .getArchivingEnabled(TABLE_NAME));
+
+ // 4. Turn off archiving and make sure its off
+ archivingClient.disableHFileBackup(TABLE_NAME);
+ assertFalse("Archving didn't get turned off for " + STRING_TABLE_NAME,
+ archivingClient.getArchivingEnabled(TABLE_NAME));
+ }
+
+ @Test
+ public void testArchivingOnSingleTable() throws Exception {
+ // turn on hfile retention
+ LOG.debug("----Starting archiving");
+ archivingClient.enableHFileBackupAsync(TABLE_NAME);
+ assertTrue("Archving didn't get turned on", archivingClient
+ .getArchivingEnabled(TABLE_NAME));
+
+ // get the RS and region serving our table
+ List<HRegion> servingRegions = UTIL.getHBaseCluster().getRegions(TABLE_NAME);
+ // make sure we only have 1 region serving this table
+ assertEquals(1, servingRegions.size());
+ HRegion region = servingRegions.get(0);
+
+ // get the parent RS and monitor
+ HRegionServer hrs = UTIL.getRSForFirstRegionInTable(TABLE_NAME);
+ FileSystem fs = hrs.getFileSystem();
+
+ // put some data on the region
+ LOG.debug("-------Loading table");
+ UTIL.loadRegion(region, TEST_FAM);
+ loadAndCompact(region);
+
+ // check that we actually have some store files that were archived
+ Store store = region.getStore(TEST_FAM);
+ Path storeArchiveDir = HFileArchiveTestingUtil.getStoreArchivePath(UTIL.getConfiguration(),
+ region, store);
+
+ // check to make sure we archived some files
+ assertTrue("Didn't create a store archive directory", fs.exists(storeArchiveDir));
+ assertTrue("No files in the store archive",
+ FSUtils.listStatus(fs, storeArchiveDir, null).length > 0);
+
+ // and then put some non-tables files in the archive
+ Configuration conf = UTIL.getConfiguration();
+ Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
+ // write a tmp file to the archive dir
+ Path tmpFile = new Path(archiveDir, "toDelete");
+ FSDataOutputStream out = fs.create(tmpFile);
+ out.write(1);
+ out.close();
+
+ assertTrue(fs.exists(tmpFile));
+ // make sure we wait long enough for the file to expire
+ Thread.sleep(ttl);
+
+ // print currrent state for comparison
+ FSUtils.logFileSystemState(fs, archiveDir, LOG);
+
+ // ensure there are no archived files after waiting for a timeout
+ ensureHFileCleanersRun();
+
+ // check to make sure the right things get deleted
+ assertTrue("Store archive got deleted", fs.exists(storeArchiveDir));
+ assertTrue("Archived HFiles got deleted",
+ FSUtils.listStatus(fs, storeArchiveDir, null).length > 0);
+
+ assertFalse(
+ "Tmp file (non-table archive file) didn't " + "get deleted, archive dir: "
+ + fs.listStatus(archiveDir), fs.exists(tmpFile));
+ LOG.debug("Turning off hfile backup.");
+ // stop archiving the table
+ archivingClient.disableHFileBackup();
+ LOG.debug("Deleting table from archive.");
+ // now remove the archived table
+ Path primaryTable = new Path(HFileArchiveUtil.getArchivePath(UTIL.getConfiguration()),
+ STRING_TABLE_NAME);
+ fs.delete(primaryTable, true);
+ LOG.debug("Deleted primary table, waiting for file cleaners to run");
+ // and make sure the archive directory is retained after a cleanup
+ // have to do this manually since delegates aren't run if there isn't any files in the archive
+ // dir to cleanup
+ Thread.sleep(ttl);
+ UTIL.getHBaseCluster().getMaster().getHFileCleaner().triggerNow();
+ Thread.sleep(ttl);
+ LOG.debug("File cleaners done, checking results.");
+ // but we still have the archive directory
+ assertTrue(fs.exists(HFileArchiveUtil.getArchivePath(UTIL.getConfiguration())));
+ }
+
+ /**
+ * Make sure all the {@link HFileCleaner} run.
+ * <p>
+ * Blocking operation up to 3x ttl
+ * @throws InterruptedException
+ */
+ private void ensureHFileCleanersRun() throws InterruptedException {
+ CheckedArchivingHFileCleaner.resetCheck();
+ do {
+ UTIL.getHBaseCluster().getMaster().getHFileCleaner().triggerNow();
+ LOG.debug("Triggered, sleeping an amount until we can pass the check.");
+ Thread.sleep(ttl);
+ } while (!CheckedArchivingHFileCleaner.getChecked());
+ }
+
+ /**
+ * Test archiving/cleaning across multiple tables, where some are retained, and others aren't
+ * @throws Exception
+ */
+ @Test
+ public void testMultipleTables() throws Exception {
+ archivingClient.enableHFileBackupAsync(TABLE_NAME);
+ assertTrue("Archving didn't get turned on", archivingClient
+ .getArchivingEnabled(TABLE_NAME));
+
+ // create the another table that we don't archive
+ String otherTable = "otherTable";
+ UTIL.createTable(Bytes.toBytes(otherTable), TEST_FAM);
+
+ // get the parent RS and monitor
+ FileSystem fs = FileSystem.get(UTIL.getConfiguration());
+
+ // put data in the filesystem of the first table
+ loadAndCompact(STRING_TABLE_NAME);
+ // and some data in the other table
+ loadAndCompact(otherTable);
+
+ // make sure we wait long enough for the other tables files to expire
+ Thread.sleep(ttl);
+ ensureHFileCleanersRun();
+
+ // check to make sure the right things get deleted
+ Path primaryStoreArchive = HFileArchiveTestingUtil.getStoreArchivePath(UTIL, STRING_TABLE_NAME,
+ TEST_FAM);
+ Path otherStoreArchive = HFileArchiveTestingUtil
+ .getStoreArchivePath(UTIL, otherTable, TEST_FAM);
+ // make sure the primary store doesn't have any files
+ assertTrue("Store archive got deleted", fs.exists(primaryStoreArchive));
+ assertTrue("Archived HFiles got deleted",
+ FSUtils.listStatus(fs, primaryStoreArchive, null).length > 0);
+ assertNull("Archived HFiles should have gotten deleted, but didn't",
+ FSUtils.listStatus(fs, otherStoreArchive, null));
+ // sleep again to make sure we the other table gets cleaned up
+ Thread.sleep(ttl);
+ ensureHFileCleanersRun();
+ // first pass removes the store archive
+ assertFalse(fs.exists(otherStoreArchive));
+ // second pass removes the region
+ Thread.sleep(ttl);
+ ensureHFileCleanersRun();
+ Path parent = otherStoreArchive.getParent();
+ assertFalse(fs.exists(parent));
+ // thrid pass remove the table
+ Thread.sleep(ttl);
+ ensureHFileCleanersRun();
+ parent = otherStoreArchive.getParent();
+ assertFalse(fs.exists(parent));
+ // but we still have the archive directory
+ assertTrue(fs.exists(HFileArchiveUtil.getArchivePath(UTIL.getConfiguration())));
+
+ FSUtils.logFileSystemState(fs, HFileArchiveUtil.getArchivePath(UTIL.getConfiguration()), LOG);
+ UTIL.deleteTable(Bytes.toBytes(otherTable));
+ }
+
+ private void loadAndCompact(String tableName) throws Exception {
+ byte[] table = Bytes.toBytes(tableName);
+ // get the RS and region serving our table
+ List<HRegion> servingRegions = UTIL.getHBaseCluster().getRegions(table);
+ // make sure we only have 1 region serving this table
+ assertEquals(1, servingRegions.size());
+ HRegion region = servingRegions.get(0);
+
+ // get the parent RS and monitor
+ HRegionServer hrs = UTIL.getRSForFirstRegionInTable(table);
+ FileSystem fs = hrs.getFileSystem();
+
+ // put some data on the region
+ LOG.debug("-------Loading table");
+ UTIL.loadRegion(region, TEST_FAM);
+ loadAndCompact(region);
+
+ // check that we actually have some store files that were archived
+ Store store = region.getStore(TEST_FAM);
+ Path storeArchiveDir = HFileArchiveTestingUtil.getStoreArchivePath(UTIL.getConfiguration(),
+ region, store);
+
+ // check to make sure we archived some files
+ assertTrue("Didn't create a store archive directory", fs.exists(storeArchiveDir));
+ assertTrue("No files in the store archive",
+ FSUtils.listStatus(fs, storeArchiveDir, null).length > 0);
+ }
+
+ /**
+ * Load the given region and then ensure that it compacts some files
+ */
+ private void loadAndCompact(HRegion region) throws Exception {
+ int tries = 0;
+ Exception last = null;
+ while (tries++ <= maxTries) {
+ try {
+ // load the region with data
+ UTIL.loadRegion(region, TEST_FAM);
+ // and then trigger a compaction to be sure we try to archive
+ compactRegion(region, TEST_FAM);
+ return;
+ } catch (Exception e) {
+ // keep this around for if we fail later
+ last = e;
+ }
+ }
+ throw last;
+ }
+
+ /**
+ * Compact all the store files in a given region.
+ */
+ private void compactRegion(HRegion region, byte[] family) throws IOException {
+ Store store = region.getStores().get(TEST_FAM);
+ store.compactRecentForTesting(store.getStorefiles().size());
+ }
+}
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java?rev=1364203&r1=1364202&r2=1364203&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java Sun Jul 22 01:11:36 2012
@@ -19,11 +19,12 @@
*/
package org.apache.hadoop.hbase.master;
+import static org.apache.hadoop.hbase.util.HFileArchiveTestingUtil.assertArchiveEqualToOriginal;
+import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.spy;
@@ -36,6 +37,8 @@ import java.util.SortedMap;
import java.util.TreeMap;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -64,9 +67,12 @@ import org.apache.hadoop.hbase.protobuf.
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateResponse;
import org.apache.hadoop.hbase.regionserver.Store;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.HFileArchiveUtil;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.junit.Ignore;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.mockito.Mockito;
@@ -560,6 +566,161 @@ public class TestCatalogJanitor {
janitor.join();
}
+ @Test
+ public void testArchiveOldRegion() throws Exception {
+ String table = "table";
+ HBaseTestingUtility htu = new HBaseTestingUtility();
+ setRootDirAndCleanIt(htu, "testCleanParent");
+ Server server = new MockServer(htu);
+ MasterServices services = new MockMasterServices(server);
+
+ // create the janitor
+ CatalogJanitor janitor = new CatalogJanitor(server, services);
+
+ // Create regions.
+ HTableDescriptor htd = new HTableDescriptor(table);
+ htd.addFamily(new HColumnDescriptor("f"));
+ HRegionInfo parent = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"), Bytes.toBytes("eee"));
+ HRegionInfo splita = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
+ HRegionInfo splitb = new HRegionInfo(htd.getName(), Bytes.toBytes("ccc"), Bytes.toBytes("eee"));
+ // Test that when both daughter regions are in place, that we do not
+ // remove the parent.
+ List<KeyValue> kvs = new ArrayList<KeyValue>();
+ kvs.add(new KeyValue(parent.getRegionName(), HConstants.CATALOG_FAMILY,
+ HConstants.SPLITA_QUALIFIER, Writables.getBytes(splita)));
+ kvs.add(new KeyValue(parent.getRegionName(), HConstants.CATALOG_FAMILY,
+ HConstants.SPLITB_QUALIFIER, Writables.getBytes(splitb)));
+ Result r = new Result(kvs);
+
+ FileSystem fs = FileSystem.get(htu.getConfiguration());
+ Path rootdir = services.getMasterFileSystem().getRootDir();
+ // have to set the root directory since we use it in HFileDisposer to figure out to get to the
+ // archive directory. Otherwise, it just seems to pick the first root directory it can find (so
+ // the single test passes, but when the full suite is run, things get borked).
+ FSUtils.setRootDir(fs.getConf(), rootdir);
+ Path tabledir = HTableDescriptor.getTableDir(rootdir, htd.getName());
+ Path storedir = Store.getStoreHomedir(tabledir, parent.getEncodedName(),
+ htd.getColumnFamilies()[0].getName());
+
+ // delete the file and ensure that the files have been archived
+ Path storeArchive = HFileArchiveUtil.getStoreArchivePath(services.getConfiguration(), parent,
+ tabledir, htd.getColumnFamilies()[0].getName());
+
+ // enable archiving, make sure that files get archived
+ addMockStoreFiles(2, services, storedir);
+ // get the current store files for comparison
+ FileStatus[] storeFiles = fs.listStatus(storedir);
+ for (FileStatus file : storeFiles) {
+ System.out.println("Have store file:" + file.getPath());
+ }
+
+ // do the cleaning of the parent
+ assertTrue(janitor.cleanParent(parent, r));
+
+ // and now check to make sure that the files have actually been archived
+ FileStatus[] archivedStoreFiles = fs.listStatus(storeArchive);
+ assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs);
+
+ // cleanup
+ services.stop("Test finished");
+ server.stop("shutdown");
+ janitor.join();
+ }
+
+ /**
+ * Test that if a store file with the same name is present as those already backed up cause the
+ * already archived files to be timestamped backup
+ */
+ @Test
+ public void testDuplicateHFileResolution() throws Exception {
+ String table = "table";
+ HBaseTestingUtility htu = new HBaseTestingUtility();
+ setRootDirAndCleanIt(htu, "testCleanParent");
+ Server server = new MockServer(htu);
+ MasterServices services = new MockMasterServices(server);
+
+ // create the janitor
+ CatalogJanitor janitor = new CatalogJanitor(server, services);
+
+ // Create regions.
+ HTableDescriptor htd = new HTableDescriptor(table);
+ htd.addFamily(new HColumnDescriptor("f"));
+ HRegionInfo parent = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"), Bytes.toBytes("eee"));
+ HRegionInfo splita = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
+ HRegionInfo splitb = new HRegionInfo(htd.getName(), Bytes.toBytes("ccc"), Bytes.toBytes("eee"));
+ // Test that when both daughter regions are in place, that we do not
+ // remove the parent.
+ List<KeyValue> kvs = new ArrayList<KeyValue>();
+ kvs.add(new KeyValue(parent.getRegionName(), HConstants.CATALOG_FAMILY,
+ HConstants.SPLITA_QUALIFIER, Writables.getBytes(splita)));
+ kvs.add(new KeyValue(parent.getRegionName(), HConstants.CATALOG_FAMILY,
+ HConstants.SPLITB_QUALIFIER, Writables.getBytes(splitb)));
+ Result r = new Result(kvs);
+
+ FileSystem fs = FileSystem.get(htu.getConfiguration());
+
+ Path rootdir = services.getMasterFileSystem().getRootDir();
+ // have to set the root directory since we use it in HFileDisposer to figure out to get to the
+ // archive directory. Otherwise, it just seems to pick the first root directory it can find (so
+ // the single test passes, but when the full suite is run, things get borked).
+ FSUtils.setRootDir(fs.getConf(), rootdir);
+ Path tabledir = HTableDescriptor.getTableDir(rootdir, parent.getTableName());
+ Path storedir = Store.getStoreHomedir(tabledir, parent.getEncodedName(),
+ htd.getColumnFamilies()[0].getName());
+ System.out.println("Old root:" + rootdir);
+ System.out.println("Old table:" + tabledir);
+ System.out.println("Old store:" + storedir);
+
+ Path storeArchive = HFileArchiveUtil.getStoreArchivePath(services.getConfiguration(), parent,
+ tabledir, htd.getColumnFamilies()[0].getName());
+ System.out.println("Old archive:" + storeArchive);
+
+ // enable archiving, make sure that files get archived
+ addMockStoreFiles(2, services, storedir);
+ // get the current store files for comparison
+ FileStatus[] storeFiles = fs.listStatus(storedir);
+
+ // do the cleaning of the parent
+ assertTrue(janitor.cleanParent(parent, r));
+
+ // and now check to make sure that the files have actually been archived
+ FileStatus[] archivedStoreFiles = fs.listStatus(storeArchive);
+ assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs);
+
+ // now add store files with the same names as before to check backup
+ // enable archiving, make sure that files get archived
+ addMockStoreFiles(2, services, storedir);
+
+ // do the cleaning of the parent
+ assertTrue(janitor.cleanParent(parent, r));
+
+ // and now check to make sure that the files have actually been archived
+ archivedStoreFiles = fs.listStatus(storeArchive);
+ assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs, true);
+
+ // cleanup
+ services.stop("Test finished");
+ server.stop("shutdown");
+ janitor.join();
+ }
+
+ private void addMockStoreFiles(int count, MasterServices services, Path storedir)
+ throws IOException {
+ // get the existing store files
+ FileSystem fs = services.getMasterFileSystem().getFileSystem();
+ fs.mkdirs(storedir);
+ // create the store files in the parent
+ for (int i = 0; i < count; i++) {
+ Path storeFile = new Path(storedir, "_store" + i);
+ FSDataOutputStream dos = fs.create(storeFile, true);
+ dos.writeBytes("Some data: " + i);
+ dos.close();
+ }
+ // make sure the mock store files are there
+ FileStatus[] storeFiles = fs.listStatus(storedir);
+ assertEquals(count, storeFiles.length);
+ }
+
private Result makeResultFromHRegionInfo(HRegionInfo region, HRegionInfo splita,
HRegionInfo splitb) throws IOException {
List<KeyValue> kvs = new ArrayList<KeyValue>();