You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by mb...@apache.org on 2013/03/04 12:24:53 UTC

svn commit: r1452257 [11/14] - in /hbase/branches/0.94: security/src/main/java/org/apache/hadoop/hbase/security/access/ security/src/test/java/org/apache/hadoop/hbase/security/access/ src/main/jamon/org/apache/hadoop/hbase/tmpl/master/ src/main/java/or...

Modified: hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java?rev=1452257&r1=1452256&r2=1452257&view=diff
==============================================================================
--- hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java (original)
+++ hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java Mon Mar  4 11:24:50 2013
@@ -42,6 +42,8 @@ import org.apache.hadoop.hbase.client.HT
 import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
+import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Threads;
@@ -92,6 +94,14 @@ public class TestMasterObserver {
     private boolean postStartMasterCalled;
     private boolean startCalled;
     private boolean stopCalled;
+    private boolean preSnapshotCalled;
+    private boolean postSnapshotCalled;
+    private boolean preCloneSnapshotCalled;
+    private boolean postCloneSnapshotCalled;
+    private boolean preRestoreSnapshotCalled;
+    private boolean postRestoreSnapshotCalled;
+    private boolean preDeleteSnapshotCalled;
+    private boolean postDeleteSnapshotCalled;
 
     public void enableBypass(boolean bypass) {
       this.bypass = bypass;
@@ -124,6 +134,14 @@ public class TestMasterObserver {
       postBalanceCalled = false;
       preBalanceSwitchCalled = false;
       postBalanceSwitchCalled = false;
+      preSnapshotCalled = false;
+      postSnapshotCalled = false;
+      preCloneSnapshotCalled = false;
+      postCloneSnapshotCalled = false;
+      preRestoreSnapshotCalled = false;
+      postRestoreSnapshotCalled = false;
+      preDeleteSnapshotCalled = false;
+      postDeleteSnapshotCalled = false;
     }
 
     @Override
@@ -463,10 +481,82 @@ public class TestMasterObserver {
     public boolean wasStarted() { return startCalled; }
 
     public boolean wasStopped() { return stopCalled; }
+
+    @Override
+    public void preSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+        final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor)
+        throws IOException {
+      preSnapshotCalled = true;
+    }
+
+    @Override
+    public void postSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+        final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor)
+        throws IOException {
+      postSnapshotCalled = true;
+    }
+
+    public boolean wasSnapshotCalled() {
+      return preSnapshotCalled && postSnapshotCalled;
+    }
+
+    @Override
+    public void preCloneSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+        final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor)
+        throws IOException {
+      preCloneSnapshotCalled = true;
+    }
+
+    @Override
+    public void postCloneSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+        final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor)
+        throws IOException {
+      postCloneSnapshotCalled = true;
+    }
+
+    public boolean wasCloneSnapshotCalled() {
+      return preCloneSnapshotCalled && postCloneSnapshotCalled;
+    }
+
+    @Override
+    public void preRestoreSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+        final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor)
+        throws IOException {
+      preRestoreSnapshotCalled = true;
+    }
+
+    @Override
+    public void postRestoreSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+        final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor)
+        throws IOException {
+      postRestoreSnapshotCalled = true;
+    }
+
+    public boolean wasRestoreSnapshotCalled() {
+      return preRestoreSnapshotCalled && postRestoreSnapshotCalled;
+    }
+
+    @Override
+    public void preDeleteSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+        final SnapshotDescription snapshot) throws IOException {
+      preDeleteSnapshotCalled = true;
+    }
+
+    @Override
+    public void postDeleteSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+        final SnapshotDescription snapshot) throws IOException {
+      postDeleteSnapshotCalled = true;
+    }
+
+    public boolean wasDeleteSnapshotCalled() {
+      return preDeleteSnapshotCalled && postDeleteSnapshotCalled;
+    }
   }
 
   private static HBaseTestingUtility UTIL = new HBaseTestingUtility();
+  private static byte[] TEST_SNAPSHOT = Bytes.toBytes("observed_snapshot");
   private static byte[] TEST_TABLE = Bytes.toBytes("observed_table");
+  private static byte[] TEST_CLONE = Bytes.toBytes("observed_clone");
   private static byte[] TEST_FAMILY = Bytes.toBytes("fam1");
   private static byte[] TEST_FAMILY2 = Bytes.toBytes("fam2");
 
@@ -475,6 +565,8 @@ public class TestMasterObserver {
     Configuration conf = UTIL.getConfiguration();
     conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
         CPMasterObserver.class.getName());
+    // Enable snapshot
+    conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
     // We need more than one data server on this test
     UTIL.startMiniCluster(2);
   }
@@ -719,6 +811,63 @@ public class TestMasterObserver {
         cp.wasBalanceCalled());
   }
 
+  @Test
+  public void testSnapshotOperations() throws Exception {
+    MiniHBaseCluster cluster = UTIL.getHBaseCluster();
+    HMaster master = cluster.getMaster();
+    MasterCoprocessorHost host = master.getCoprocessorHost();
+    CPMasterObserver cp = (CPMasterObserver)host.findCoprocessor(
+        CPMasterObserver.class.getName());
+    cp.resetStates();
+
+    // create a table
+    HTableDescriptor htd = new HTableDescriptor(TEST_TABLE);
+    htd.addFamily(new HColumnDescriptor(TEST_FAMILY));
+    HBaseAdmin admin = UTIL.getHBaseAdmin();
+
+    // delete table if exists
+    if (admin.tableExists(TEST_TABLE)) {
+      UTIL.deleteTable(TEST_TABLE);
+    }
+
+    admin.createTable(htd);
+    admin.disableTable(TEST_TABLE);
+    assertTrue(admin.isTableDisabled(TEST_TABLE));
+
+    try {
+      // Test snapshot operation
+      assertFalse("Coprocessor should not have been called yet",
+        cp.wasSnapshotCalled());
+      admin.snapshot(TEST_SNAPSHOT, TEST_TABLE);
+      assertTrue("Coprocessor should have been called on snapshot",
+        cp.wasSnapshotCalled());
+
+      // Test clone operation
+      admin.cloneSnapshot(TEST_SNAPSHOT, TEST_CLONE);
+      assertTrue("Coprocessor should have been called on snapshot clone",
+        cp.wasCloneSnapshotCalled());
+      assertFalse("Coprocessor restore should not have been called on snapshot clone",
+        cp.wasRestoreSnapshotCalled());
+      admin.disableTable(TEST_CLONE);
+      assertTrue(admin.isTableDisabled(TEST_TABLE));
+      admin.deleteTable(TEST_CLONE);
+
+      // Test restore operation
+      cp.resetStates();
+      admin.restoreSnapshot(TEST_SNAPSHOT);
+      assertTrue("Coprocessor should have been called on snapshot restore",
+        cp.wasRestoreSnapshotCalled());
+      assertFalse("Coprocessor clone should not have been called on snapshot restore",
+        cp.wasCloneSnapshotCalled());
+
+      admin.deleteSnapshot(TEST_SNAPSHOT);
+      assertTrue("Coprocessor should have been called on snapshot delete",
+        cp.wasDeleteSnapshotCalled());
+    } finally {
+      admin.deleteTable(TEST_TABLE);
+    }
+  }
+
   private void waitForRITtoBeZero(HMaster master) throws IOException {
     // wait for assignments to finish
     AssignmentManager mgr = master.getAssignmentManager();

Added: hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/errorhandling/TestForeignExceptionDispatcher.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/errorhandling/TestForeignExceptionDispatcher.java?rev=1452257&view=auto
==============================================================================
--- hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/errorhandling/TestForeignExceptionDispatcher.java (added)
+++ hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/errorhandling/TestForeignExceptionDispatcher.java Mon Mar  4 11:24:50 2013
@@ -0,0 +1,123 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.errorhandling;
+
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.SmallTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.Mockito;
+
+/**
+ * Test that we propagate errors through an dispatcher exactly once via different failure
+ * injection mechanisms.
+ */
+@Category(SmallTests.class)
+public class TestForeignExceptionDispatcher {
+  private static final Log LOG = LogFactory.getLog(TestForeignExceptionDispatcher.class);
+
+  /**
+   * Exception thrown from the test
+   */
+  final ForeignException EXTEXN = new ForeignException("FORTEST", new IllegalArgumentException("FORTEST"));
+  final ForeignException EXTEXN2 = new ForeignException("FORTEST2", new IllegalArgumentException("FORTEST2"));
+
+  /**
+   * Tests that a dispatcher only dispatches only the first exception, and does not propagate
+   * subsequent exceptions.
+   */
+  @Test
+  public void testErrorPropagation() {
+    ForeignExceptionListener listener1 = Mockito.mock(ForeignExceptionListener.class);
+    ForeignExceptionListener listener2 = Mockito.mock(ForeignExceptionListener.class);
+    ForeignExceptionDispatcher dispatcher = new ForeignExceptionDispatcher();
+
+    // add the listeners
+    dispatcher.addListener(listener1);
+    dispatcher.addListener(listener2);
+
+    // create an artificial error
+    dispatcher.receive(EXTEXN);
+
+    // make sure the listeners got the error
+    Mockito.verify(listener1, Mockito.times(1)).receive(EXTEXN);
+    Mockito.verify(listener2, Mockito.times(1)).receive(EXTEXN);
+
+    // make sure that we get an exception
+    try {
+      dispatcher.rethrowException();
+      fail("Monitor should have thrown an exception after getting error.");
+    } catch (ForeignException ex) {
+      assertTrue("Got an unexpected exception:" + ex, ex.getCause() == EXTEXN.getCause());
+      LOG.debug("Got the testing exception!");
+    }
+
+    // push another error, which should be not be passed to listeners
+    dispatcher.receive(EXTEXN2);
+    Mockito.verify(listener1, Mockito.never()).receive(EXTEXN2);
+    Mockito.verify(listener2, Mockito.never()).receive(EXTEXN2);
+  }
+
+  @Test
+  public void testSingleDispatcherWithTimer() {
+    ForeignExceptionListener listener1 = Mockito.mock(ForeignExceptionListener.class);
+    ForeignExceptionListener listener2 = Mockito.mock(ForeignExceptionListener.class);
+
+    ForeignExceptionDispatcher monitor = new ForeignExceptionDispatcher();
+
+    // add the listeners
+    monitor.addListener(listener1);
+    monitor.addListener(listener2);
+
+    TimeoutExceptionInjector timer = new TimeoutExceptionInjector(monitor, 1000);
+    timer.start();
+    timer.trigger();
+
+    assertTrue("Monitor didn't get timeout", monitor.hasException());
+
+    // verify that that we propagated the error
+    Mockito.verify(listener1).receive(Mockito.any(ForeignException.class));
+    Mockito.verify(listener2).receive(Mockito.any(ForeignException.class));
+  }
+
+  /**
+   * Test that the dispatcher can receive an error via the timer mechanism.
+   */
+  @Test
+  public void testAttemptTimer() {
+    ForeignExceptionListener listener1 = Mockito.mock(ForeignExceptionListener.class);
+    ForeignExceptionListener listener2 = Mockito.mock(ForeignExceptionListener.class);
+    ForeignExceptionDispatcher orchestrator = new ForeignExceptionDispatcher();
+
+    // add the listeners
+    orchestrator.addListener(listener1);
+    orchestrator.addListener(listener2);
+
+    // now create a timer and check for that error
+    TimeoutExceptionInjector timer = new TimeoutExceptionInjector(orchestrator, 1000);
+    timer.start();
+    timer.trigger();
+    // make sure that we got the timer error
+    Mockito.verify(listener1, Mockito.times(1)).receive(Mockito.any(ForeignException.class));
+    Mockito.verify(listener2, Mockito.times(1)).receive(Mockito.any(ForeignException.class));
+  }
+}
\ No newline at end of file

Added: hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/errorhandling/TestForeignExceptionSerialization.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/errorhandling/TestForeignExceptionSerialization.java?rev=1452257&view=auto
==============================================================================
--- hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/errorhandling/TestForeignExceptionSerialization.java (added)
+++ hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/errorhandling/TestForeignExceptionSerialization.java Mon Mar  4 11:24:50 2013
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.errorhandling;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import org.apache.hadoop.hbase.SmallTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.google.protobuf.InvalidProtocolBufferException;
+
+/**
+ * Test that we correctly serialize exceptions from a remote source
+ */
+@Category(SmallTests.class)
+public class TestForeignExceptionSerialization {
+  private static final String srcName = "someNode";
+
+  /**
+   * Verify that we get back similar stack trace information before an after serialization.
+   * @throws InvalidProtocolBufferException
+   */
+  @Test
+  public void testSimpleException() throws InvalidProtocolBufferException {
+    String data = "some bytes";
+    ForeignException in = new ForeignException("SRC", new IllegalArgumentException(data));
+    // check that we get the data back out
+    ForeignException e = ForeignException.deserialize(ForeignException.serialize(srcName, in));
+    assertNotNull(e);
+
+    // now check that we get the right stack trace
+    StackTraceElement elem = new StackTraceElement(this.getClass().toString(), "method", "file", 1);
+    in.setStackTrace(new StackTraceElement[] { elem });
+    e = ForeignException.deserialize(ForeignException.serialize(srcName, in));
+
+    assertNotNull(e);
+    assertEquals("Stack trace got corrupted", elem, e.getCause().getStackTrace()[0]);
+    assertEquals("Got an unexpectedly long stack trace", 1, e.getCause().getStackTrace().length);
+  }
+
+  /**
+   * Compare that a generic exception's stack trace has the same stack trace elements after
+   * serialization and deserialization
+   * @throws InvalidProtocolBufferException
+   */
+  @Test
+  public void testRemoteFromLocal() throws InvalidProtocolBufferException {
+    String errorMsg = "some message";
+    Exception generic = new Exception(errorMsg);
+    generic.printStackTrace();
+    assertTrue(generic.getMessage().contains(errorMsg));
+
+    ForeignException e = ForeignException.deserialize(ForeignException.serialize(srcName, generic));
+    assertArrayEquals("Local stack trace got corrupted", generic.getStackTrace(), e.getCause().getStackTrace());
+
+    e.printStackTrace(); // should have ForeignException and source node in it.
+    assertTrue(e.getCause().getCause() == null);
+
+    // verify that original error message is present in Foreign exception message
+    assertTrue(e.getCause().getMessage().contains(errorMsg));
+  }
+
+}

Added: hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/errorhandling/TestTimeoutExceptionInjector.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/errorhandling/TestTimeoutExceptionInjector.java?rev=1452257&view=auto
==============================================================================
--- hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/errorhandling/TestTimeoutExceptionInjector.java (added)
+++ hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/errorhandling/TestTimeoutExceptionInjector.java Mon Mar  4 11:24:50 2013
@@ -0,0 +1,103 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.errorhandling;
+
+import static org.junit.Assert.fail;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.SmallTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.Mockito;
+
+/**
+ * Test the {@link TimeoutExceptionInjector} to ensure we fulfill contracts
+ */
+@Category(SmallTests.class)
+public class TestTimeoutExceptionInjector {
+
+  private static final Log LOG = LogFactory.getLog(TestTimeoutExceptionInjector.class);
+
+  /**
+   * Test that a manually triggered timer fires an exception.
+   */
+  @Test(timeout = 1000)
+  public void testTimerTrigger() {
+    final long time = 10000000; // pick a value that is very far in the future
+    ForeignExceptionListener listener = Mockito.mock(ForeignExceptionListener.class);
+    TimeoutExceptionInjector timer = new TimeoutExceptionInjector(listener, time);
+    timer.start();
+    timer.trigger();
+    Mockito.verify(listener, Mockito.times(1)).receive(Mockito.any(ForeignException.class));
+  }
+
+  /**
+   * Test that a manually triggered exception with data fires with the data in receiveError.
+   */
+  @Test
+  public void testTimerPassesOnErrorInfo() {
+    final long time = 1000000;
+    ForeignExceptionListener listener = Mockito.mock(ForeignExceptionListener.class);
+    TimeoutExceptionInjector timer = new TimeoutExceptionInjector(listener, time);
+    timer.start();
+    timer.trigger();
+    Mockito.verify(listener).receive(Mockito.any(ForeignException.class));
+  }
+
+  /**
+   * Demonstrate TimeoutExceptionInjector semantics -- completion means no more exceptions passed to
+   * error listener.
+   */
+  @Test(timeout = 1000)
+  public void testStartAfterComplete() throws InterruptedException {
+    final long time = 10;
+    ForeignExceptionListener listener = Mockito.mock(ForeignExceptionListener.class);
+    TimeoutExceptionInjector timer = new TimeoutExceptionInjector(listener, time);
+    timer.complete();
+    try {
+      timer.start();
+      fail("Timer should fail to start after complete.");
+    } catch (IllegalStateException e) {
+      LOG.debug("Correctly failed timer: " + e.getMessage());
+    }
+    Thread.sleep(time + 1);
+    Mockito.verifyZeroInteractions(listener);
+  }
+
+  /**
+   * Demonstrate TimeoutExceptionInjector semantics -- triggering fires exception and completes
+   * the timer.
+   */
+  @Test(timeout = 1000)
+  public void testStartAfterTrigger() throws InterruptedException {
+    final long time = 10;
+    ForeignExceptionListener listener = Mockito.mock(ForeignExceptionListener.class);
+    TimeoutExceptionInjector timer = new TimeoutExceptionInjector(listener, time);
+    timer.trigger();
+    try {
+      timer.start();
+      fail("Timer should fail to start after complete.");
+    } catch (IllegalStateException e) {
+      LOG.debug("Correctly failed timer: " + e.getMessage());
+    }
+    Thread.sleep(time * 2);
+    Mockito.verify(listener, Mockito.times(1)).receive(Mockito.any(ForeignException.class));
+    Mockito.verifyNoMoreInteractions(listener);
+  }
+}

Added: hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/io/TestFileLink.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/io/TestFileLink.java?rev=1452257&view=auto
==============================================================================
--- hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/io/TestFileLink.java (added)
+++ hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/io/TestFileLink.java Mon Mar  4 11:24:50 2013
@@ -0,0 +1,244 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.io;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import org.junit.Test;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import junit.framework.TestCase;
+import org.junit.experimental.categories.Category;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.io.FileLink;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Test that FileLink switches between alternate locations
+ * when the current location moves or gets deleted.
+ */
+@Category(MediumTests.class)
+public class TestFileLink {
+  /**
+   * Test, on HDFS, that the FileLink is still readable
+   * even when the current file gets renamed.
+   */
+  @Test
+  public void testHDFSLinkReadDuringRename() throws Exception {
+    HBaseTestingUtility testUtil = new HBaseTestingUtility();
+    Configuration conf = testUtil.getConfiguration();
+    conf.setInt("dfs.blocksize", 1024 * 1024);
+    conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);
+
+    testUtil.startMiniDFSCluster(1);
+    MiniDFSCluster cluster = testUtil.getDFSCluster();
+    FileSystem fs = cluster.getFileSystem();
+    assertEquals("hdfs", fs.getUri().getScheme());
+
+    try {
+      testLinkReadDuringRename(fs, testUtil.getDefaultRootDirPath());
+    } finally {
+      testUtil.shutdownMiniCluster();
+    }
+  }
+
+  /**
+   * Test, on a local filesystem, that the FileLink is still readable
+   * even when the current file gets renamed.
+   */
+  @Test
+  public void testLocalLinkReadDuringRename() throws IOException {
+    HBaseTestingUtility testUtil = new HBaseTestingUtility();
+    FileSystem fs = testUtil.getTestFileSystem();
+    assertEquals("file", fs.getUri().getScheme());
+    testLinkReadDuringRename(fs, testUtil.getDataTestDir());
+  }
+
+  /**
+   * Test that link is still readable even when the current file gets renamed.
+   */
+  private void testLinkReadDuringRename(FileSystem fs, Path rootDir) throws IOException {
+    Path originalPath = new Path(rootDir, "test.file");
+    Path archivedPath = new Path(rootDir, "archived.file");
+
+    writeSomeData(fs, originalPath, 256 << 20, (byte)2);
+
+    List<Path> files = new ArrayList<Path>();
+    files.add(originalPath);
+    files.add(archivedPath);
+
+    FileLink link = new FileLink(files);
+    FSDataInputStream in = link.open(fs);
+    try {
+      byte[] data = new byte[8192];
+      long size = 0;
+
+      // Read from origin
+      int n = in.read(data);
+      dataVerify(data, n, (byte)2);
+      size += n;
+
+      // Move origin to archive
+      assertFalse(fs.exists(archivedPath));
+      fs.rename(originalPath, archivedPath);
+      assertFalse(fs.exists(originalPath));
+      assertTrue(fs.exists(archivedPath));
+
+      // Try to read to the end
+      while ((n = in.read(data)) > 0) {
+        dataVerify(data, n, (byte)2);
+        size += n;
+      }
+
+      assertEquals(256 << 20, size);
+    } finally {
+      in.close();
+      if (fs.exists(originalPath)) fs.delete(originalPath);
+      if (fs.exists(archivedPath)) fs.delete(archivedPath);
+    }
+  }
+
+  /**
+   * Test that link is still readable even when the current file gets deleted.
+   *
+   * NOTE: This test is valid only on HDFS.
+   * When a file is deleted from a local file-system, it is simply 'unlinked'.
+   * The inode, which contains the file's data, is not deleted until all
+   * processes have finished with it.
+   * In HDFS when the request exceed the cached block locations,
+   * a query to the namenode is performed, using the filename,
+   * and the deleted file doesn't exists anymore (FileNotFoundException).
+   */
+  @Test
+  public void testHDFSLinkReadDuringDelete() throws Exception {
+    HBaseTestingUtility testUtil = new HBaseTestingUtility();
+    Configuration conf = testUtil.getConfiguration();
+    conf.setInt("dfs.blocksize", 1024 * 1024);
+    conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);
+
+    testUtil.startMiniDFSCluster(1);
+    MiniDFSCluster cluster = testUtil.getDFSCluster();
+    FileSystem fs = cluster.getFileSystem();
+    assertEquals("hdfs", fs.getUri().getScheme());
+
+    try {
+      List<Path> files = new ArrayList<Path>();
+      for (int i = 0; i < 3; i++) {
+        Path path = new Path(String.format("test-data-%d", i));
+        writeSomeData(fs, path, 1 << 20, (byte)i);
+        files.add(path);
+      }
+
+      FileLink link = new FileLink(files);
+      FSDataInputStream in = link.open(fs);
+      try {
+        byte[] data = new byte[8192];
+        int n;
+
+        // Switch to file 1
+        n = in.read(data);
+        dataVerify(data, n, (byte)0);
+        fs.delete(files.get(0));
+        skipBuffer(in, (byte)0);
+
+        // Switch to file 2
+        n = in.read(data);
+        dataVerify(data, n, (byte)1);
+        fs.delete(files.get(1));
+        skipBuffer(in, (byte)1);
+
+        // Switch to file 3
+        n = in.read(data);
+        dataVerify(data, n, (byte)2);
+        fs.delete(files.get(2));
+        skipBuffer(in, (byte)2);
+
+        // No more files available
+        try {
+          n = in.read(data);
+          assert(n <= 0);
+        } catch (FileNotFoundException e) {
+          assertTrue(true);
+        }
+      } finally {
+        in.close();
+      }
+    } finally {
+      testUtil.shutdownMiniCluster();
+    }
+  }
+
+  /**
+   * Write up to 'size' bytes with value 'v' into a new file called 'path'.
+   */
+  private void writeSomeData (FileSystem fs, Path path, long size, byte v) throws IOException {
+    byte[] data = new byte[4096];
+    for (int i = 0; i < data.length; i++) {
+      data[i] = v;
+    }
+
+    FSDataOutputStream stream = fs.create(path);
+    try {
+      long written = 0;
+      while (written < size) {
+        stream.write(data, 0, data.length);
+        written += data.length;
+      }
+    } finally {
+      stream.close();
+    }
+  }
+
+  /**
+   * Verify that all bytes in 'data' have 'v' as value.
+   */
+  private static void dataVerify(byte[] data, int n, byte v) {
+    for (int i = 0; i < n; ++i) {
+      assertEquals(v, data[i]);
+    }
+  }
+
+  private static void skipBuffer(FSDataInputStream in, byte v) throws IOException {
+    byte[] data = new byte[8192];
+    try {
+      int n;
+      while ((n = in.read(data)) == data.length) {
+        for (int i = 0; i < data.length; ++i) {
+          if (data[i] != v)
+            throw new Exception("File changed");
+        }
+      }
+    } catch (Exception e) {
+    }
+  }
+}

Modified: hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java?rev=1452257&r1=1452256&r2=1452257&view=diff
==============================================================================
--- hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java (original)
+++ hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java Mon Mar  4 11:24:50 2013
@@ -19,6 +19,9 @@
  */
 package org.apache.hadoop.hbase.io.hfile;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
 import java.nio.ByteBuffer;
 import java.util.Collection;
 import java.util.Map;
@@ -28,19 +31,18 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.MediumTests;
 import org.apache.hadoop.hbase.io.HeapSize;
+import org.apache.hadoop.hbase.io.hfile.LruBlockCache.EvictionThread;
 import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
 import org.apache.hadoop.hbase.regionserver.metrics.TestSchemaMetrics;
 import org.apache.hadoop.hbase.util.ClassSize;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.experimental.categories.Category;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameters;
 
-import org.junit.experimental.categories.Category;
-import static org.junit.Assert.*;
-
 /**
  * Tests the concurrent LruBlockCache.<p>
  *
@@ -77,7 +79,6 @@ public class TestLruBlockCache {
 
   @Test
   public void testBackgroundEvictionThread() throws Exception {
-
     long maxSize = 100000;
     long blockSize = calculateBlockSizeDefault(maxSize, 9); // room for 9, will evict
 
@@ -85,6 +86,14 @@ public class TestLruBlockCache {
 
     CachedItem [] blocks = generateFixedBlocks(10, blockSize, "block");
 
+    EvictionThread evictionThread = cache.getEvictionThread();
+    assertTrue(evictionThread != null);
+
+    // Make sure eviction thread has entered run method
+    while (!evictionThread.isEnteringRun()) {
+      Thread.sleep(1);
+    }
+
     // Add all the blocks
     for (CachedItem block : blocks) {
       cache.cacheBlock(block.cacheKey, block);

Added: hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java?rev=1452257&view=auto
==============================================================================
--- hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java (added)
+++ hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java Mon Mar  4 11:24:50 2013
@@ -0,0 +1,174 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.cleaner;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.backup.HFileArchiver;
+import org.apache.hadoop.hbase.catalog.CatalogTracker;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.io.HFileLink;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.HFileArchiveUtil;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test the HFileLink Cleaner.
+ * HFiles with links cannot be deleted until a link is present.
+ */
+@Category(SmallTests.class)
+public class TestHFileLinkCleaner {
+
+  private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+
+  @Test
+  public void testHFileLinkCleaning() throws Exception {
+    Configuration conf = TEST_UTIL.getConfiguration();
+    conf.set(HConstants.HBASE_DIR, TEST_UTIL.getDataTestDir().toString());
+    conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, HFileLinkCleaner.class.getName());
+    Path rootDir = FSUtils.getRootDir(conf);
+    FileSystem fs = FileSystem.get(conf);
+
+    final String tableName = "test-table";
+    final String tableLinkName = "test-link";
+    final String hfileName = "1234567890";
+    final String familyName = "cf";
+
+    HRegionInfo hri = new HRegionInfo(Bytes.toBytes(tableName));
+    HRegionInfo hriLink = new HRegionInfo(Bytes.toBytes(tableLinkName));
+
+    Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
+    Path archiveStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
+          tableName, hri.getEncodedName(), familyName);
+    Path archiveLinkStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
+          tableLinkName, hriLink.getEncodedName(), familyName);
+
+    // Create hfile /hbase/table-link/region/cf/getEncodedName.HFILE(conf);
+    Path familyPath = getFamilyDirPath(archiveDir, tableName, hri.getEncodedName(), familyName);
+    fs.mkdirs(familyPath);
+    Path hfilePath = new Path(familyPath, hfileName);
+    fs.createNewFile(hfilePath);
+
+    // Create link to hfile
+    Path familyLinkPath = getFamilyDirPath(rootDir, tableLinkName,
+                                        hriLink.getEncodedName(), familyName);
+    fs.mkdirs(familyLinkPath);
+    HFileLink.create(conf, fs, familyLinkPath, hri, hfileName);
+    Path linkBackRefDir = HFileLink.getBackReferencesDir(archiveStoreDir, hfileName);
+    assertTrue(fs.exists(linkBackRefDir));
+    FileStatus[] backRefs = fs.listStatus(linkBackRefDir);
+    assertEquals(1, backRefs.length);
+    Path linkBackRef = backRefs[0].getPath();
+
+    // Initialize cleaner
+    final long ttl = 1000;
+    conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, ttl);
+    Server server = new DummyServer();
+    HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archiveDir);
+
+    // Link backref cannot be removed
+    cleaner.chore();
+    assertTrue(fs.exists(linkBackRef));
+    assertTrue(fs.exists(hfilePath));
+
+    // Link backref can be removed
+    fs.rename(new Path(rootDir, tableLinkName), new Path(archiveDir, tableLinkName));
+    cleaner.chore();
+    assertFalse("Link should be deleted", fs.exists(linkBackRef));
+
+    // HFile can be removed
+    Thread.sleep(ttl * 2);
+    cleaner.chore();
+    assertFalse("HFile should be deleted", fs.exists(hfilePath));
+
+    // Remove everything
+    for (int i = 0; i < 4; ++i) {
+      Thread.sleep(ttl * 2);
+      cleaner.chore();
+    }
+    assertFalse("HFile should be deleted", fs.exists(new Path(archiveDir, tableName)));
+    assertFalse("Link should be deleted", fs.exists(new Path(archiveDir, tableLinkName)));
+
+    cleaner.interrupt();
+  }
+
+  private static Path getFamilyDirPath (final Path rootDir, final String table,
+    final String region, final String family) {
+    return new Path(new Path(new Path(rootDir, table), region), family);
+  }
+
+  static class DummyServer implements Server {
+
+    @Override
+    public Configuration getConfiguration() {
+      return TEST_UTIL.getConfiguration();
+    }
+
+    @Override
+    public ZooKeeperWatcher getZooKeeper() {
+      try {
+        return new ZooKeeperWatcher(getConfiguration(), "dummy server", this);
+      } catch (IOException e) {
+        e.printStackTrace();
+      }
+      return null;
+    }
+
+    @Override
+    public CatalogTracker getCatalogTracker() {
+      return null;
+    }
+
+    @Override
+    public ServerName getServerName() {
+      return new ServerName("regionserver,60020,000000");
+    }
+
+    @Override
+    public void abort(String why, Throwable e) {}
+
+    @Override
+    public boolean isAborted() {
+      return false;
+    }
+
+    @Override
+    public void stop(String why) {}
+
+    @Override
+    public boolean isStopped() {
+      return false;
+    }
+  }
+}

Added: hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java?rev=1452257&view=auto
==============================================================================
--- hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java (added)
+++ hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java Mon Mar  4 11:24:50 2013
@@ -0,0 +1,382 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.cleaner;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.snapshot.DisabledTableSnapshotHandler;
+import org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner;
+import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.snapshot.HSnapshotDescription;
+import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
+import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.HFileArchiveUtil;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.Mockito;
+
+import com.google.common.collect.Lists;
+
+/**
+ * Test the master-related aspects of a snapshot
+ */
+@Category(MediumTests.class)
+public class TestSnapshotFromMaster {
+
+  private static final Log LOG = LogFactory.getLog(TestSnapshotFromMaster.class);
+  private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+  private static final int NUM_RS = 2;
+  private static Path rootDir;
+  private static Path snapshots;
+  private static FileSystem fs;
+  private static HMaster master;
+
+  // for hfile archiving test.
+  private static Path archiveDir;
+  private static final String STRING_TABLE_NAME = "test";
+  private static final byte[] TEST_FAM = Bytes.toBytes("fam");
+  private static final byte[] TABLE_NAME = Bytes.toBytes(STRING_TABLE_NAME);
+  // refresh the cache every 1/2 second
+  private static final long cacheRefreshPeriod = 500;
+
+  /**
+   * Setup the config for the cluster
+   */
+  @BeforeClass
+  public static void setupCluster() throws Exception {
+    setupConf(UTIL.getConfiguration());
+    UTIL.startMiniCluster(NUM_RS);
+    fs = UTIL.getDFSCluster().getFileSystem();
+    master = UTIL.getMiniHBaseCluster().getMaster();
+    rootDir = master.getMasterFileSystem().getRootDir();
+    snapshots = SnapshotDescriptionUtils.getSnapshotsDir(rootDir);
+    archiveDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);
+  }
+
+  private static void setupConf(Configuration conf) {
+    // disable the ui
+    conf.setInt("hbase.regionsever.info.port", -1);
+    // change the flush size to a small amount, regulating number of store files
+    conf.setInt("hbase.hregion.memstore.flush.size", 25000);
+    // so make sure we get a compaction when doing a load, but keep around some
+    // files in the store
+    conf.setInt("hbase.hstore.compaction.min", 3);
+    conf.setInt("hbase.hstore.compactionThreshold", 5);
+    // block writes if we get to 12 store files
+    conf.setInt("hbase.hstore.blockingStoreFiles", 12);
+    // drop the number of attempts for the hbase admin
+    conf.setInt("hbase.client.retries.number", 1);
+    // Ensure no extra cleaners on by default (e.g. TimeToLiveHFileCleaner)
+    conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, "");
+    conf.set(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS, "");
+    // Enable snapshot
+    conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
+    conf.setLong(SnapshotHFileCleaner.HFILE_CACHE_REFRESH_PERIOD_CONF_KEY, cacheRefreshPeriod);
+
+    // prevent aggressive region split
+    conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
+      ConstantSizeRegionSplitPolicy.class.getName());
+  }
+
+  @Before
+  public void setup() throws Exception {
+    UTIL.createTable(TABLE_NAME, TEST_FAM);
+    master.getSnapshotManagerForTesting().setSnapshotHandlerForTesting(null);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    UTIL.deleteTable(TABLE_NAME);
+
+    // delete the archive directory, if its exists
+    if (fs.exists(archiveDir)) {
+      if (!fs.delete(archiveDir, true)) {
+        throw new IOException("Couldn't delete archive directory (" + archiveDir
+            + " for an unknown reason");
+      }
+    }
+
+    // delete the snapshot directory, if its exists
+    if (fs.exists(snapshots)) {
+      if (!fs.delete(snapshots, true)) {
+        throw new IOException("Couldn't delete snapshots directory (" + snapshots
+            + " for an unknown reason");
+      }
+    }
+  }
+
+  @AfterClass
+  public static void cleanupTest() throws Exception {
+    try {
+      UTIL.shutdownMiniCluster();
+    } catch (Exception e) {
+      // NOOP;
+    }
+  }
+
+  /**
+   * Test that the contract from the master for checking on a snapshot are valid.
+   * <p>
+   * <ol>
+   * <li>If a snapshot fails with an error, we expect to get the source error.</li>
+   * <li>If there is no snapshot name supplied, we should get an error.</li>
+   * <li>If asking about a snapshot has hasn't occurred, you should get an error.</li>
+   * </ol>
+   */
+  @Test(timeout = 15000)
+  public void testIsDoneContract() throws Exception {
+
+    String snapshotName = "asyncExpectedFailureTest";
+
+    // check that we get an exception when looking up snapshot where one hasn't happened
+    SnapshotTestingUtils.expectSnapshotDoneException(master, new HSnapshotDescription(),
+      UnknownSnapshotException.class);
+
+    // and that we get the same issue, even if we specify a name
+    SnapshotDescription desc = SnapshotDescription.newBuilder()
+      .setName(snapshotName).build();
+    SnapshotTestingUtils.expectSnapshotDoneException(master, new HSnapshotDescription(desc),
+      UnknownSnapshotException.class);
+
+    // set a mock handler to simulate a snapshot
+    DisabledTableSnapshotHandler mockHandler = Mockito.mock(DisabledTableSnapshotHandler.class);
+    Mockito.when(mockHandler.getException()).thenReturn(null);
+    Mockito.when(mockHandler.getSnapshot()).thenReturn(desc);
+    Mockito.when(mockHandler.isFinished()).thenReturn(new Boolean(true));
+
+    master.getSnapshotManagerForTesting().setSnapshotHandlerForTesting(mockHandler);
+
+    // if we do a lookup without a snapshot name, we should fail - you should always know your name
+    SnapshotTestingUtils.expectSnapshotDoneException(master, new HSnapshotDescription(),
+      UnknownSnapshotException.class);
+
+    // then do the lookup for the snapshot that it is done
+    boolean isDone = master.isSnapshotDone(new HSnapshotDescription(desc));
+    assertTrue("Snapshot didn't complete when it should have.", isDone);
+
+    // now try the case where we are looking for a snapshot we didn't take
+    desc = SnapshotDescription.newBuilder().setName("Not A Snapshot").build();
+    SnapshotTestingUtils.expectSnapshotDoneException(master, new HSnapshotDescription(desc),
+      UnknownSnapshotException.class);
+
+    // then create a snapshot to the fs and make sure that we can find it when checking done
+    snapshotName = "completed";
+    Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
+    desc = desc.toBuilder().setName(snapshotName).build();
+    SnapshotDescriptionUtils.writeSnapshotInfo(desc, snapshotDir, fs);
+
+    isDone = master.isSnapshotDone(new HSnapshotDescription(desc));
+    assertTrue("Completed, on-disk snapshot not found", isDone);
+  }
+
+  @Test
+  public void testGetCompletedSnapshots() throws Exception {
+    // first check when there are no snapshots
+    List<HSnapshotDescription> snapshots = master.getCompletedSnapshots();
+    assertEquals("Found unexpected number of snapshots", 0, snapshots.size());
+
+    // write one snapshot to the fs
+    String snapshotName = "completed";
+    Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
+    SnapshotDescription snapshot = SnapshotDescription.newBuilder().setName(snapshotName).build();
+    SnapshotDescriptionUtils.writeSnapshotInfo(snapshot, snapshotDir, fs);
+
+    // check that we get one snapshot
+    snapshots = master.getCompletedSnapshots();
+    assertEquals("Found unexpected number of snapshots", 1, snapshots.size());
+    List<HSnapshotDescription> expected = Lists.newArrayList(new HSnapshotDescription(snapshot));
+    assertEquals("Returned snapshots don't match created snapshots", expected, snapshots);
+
+    // write a second snapshot
+    snapshotName = "completed_two";
+    snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
+    snapshot = SnapshotDescription.newBuilder().setName(snapshotName).build();
+    SnapshotDescriptionUtils.writeSnapshotInfo(snapshot, snapshotDir, fs);
+    expected.add(new HSnapshotDescription(snapshot));
+
+    // check that we get one snapshot
+    snapshots = master.getCompletedSnapshots();
+    assertEquals("Found unexpected number of snapshots", 2, snapshots.size());
+    assertEquals("Returned snapshots don't match created snapshots", expected, snapshots);
+  }
+
+  @Test
+  public void testDeleteSnapshot() throws Exception {
+
+    String snapshotName = "completed";
+    SnapshotDescription snapshot = SnapshotDescription.newBuilder().setName(snapshotName).build();
+
+    try {
+      master.deleteSnapshot(new HSnapshotDescription(snapshot));
+      fail("Master didn't throw exception when attempting to delete snapshot that doesn't exist");
+    } catch (IOException e) {
+      LOG.debug("Correctly failed delete of non-existant snapshot:" + e.getMessage());
+    }
+
+    // write one snapshot to the fs
+    Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
+    SnapshotDescriptionUtils.writeSnapshotInfo(snapshot, snapshotDir, fs);
+
+    // then delete the existing snapshot,which shouldn't cause an exception to be thrown
+    master.deleteSnapshot(new HSnapshotDescription(snapshot));
+  }
+
+  /**
+   * Test that the snapshot hfile archive cleaner works correctly. HFiles that are in snapshots
+   * should be retained, while those that are not in a snapshot should be deleted.
+   * @throws Exception on failure
+   */
+  @Test
+  public void testSnapshotHFileArchiving() throws Exception {
+    HBaseAdmin admin = UTIL.getHBaseAdmin();
+    // make sure we don't fail on listing snapshots
+    SnapshotTestingUtils.assertNoSnapshots(admin);
+    // load the table
+    UTIL.loadTable(new HTable(UTIL.getConfiguration(), TABLE_NAME), TEST_FAM);
+
+    // disable the table so we can take a snapshot
+    admin.disableTable(TABLE_NAME);
+
+    // take a snapshot of the table
+    String snapshotName = "snapshot";
+    byte[] snapshotNameBytes = Bytes.toBytes(snapshotName);
+    admin.snapshot(snapshotNameBytes, TABLE_NAME);
+
+    Configuration conf = master.getConfiguration();
+    LOG.info("After snapshot File-System state");
+    FSUtils.logFileSystemState(fs, rootDir, LOG);
+
+    // ensure we only have one snapshot
+    SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshotNameBytes, TABLE_NAME);
+
+    // renable the table so we can compact the regions
+    admin.enableTable(TABLE_NAME);
+
+    // compact the files so we get some archived files for the table we just snapshotted
+    List<HRegion> regions = UTIL.getHBaseCluster().getRegions(TABLE_NAME);
+    for (HRegion region : regions) {
+      region.waitForFlushesAndCompactions(); // enable can trigger a compaction, wait for it.
+      region.compactStores();
+    }
+    LOG.info("After compaction File-System state");
+    FSUtils.logFileSystemState(fs, rootDir, LOG);
+
+    // make sure the cleaner has run
+    LOG.debug("Running hfile cleaners");
+    ensureHFileCleanersRun();
+    LOG.info("After cleaners File-System state: " + rootDir);
+    FSUtils.logFileSystemState(fs, rootDir, LOG);
+
+    // get the snapshot files for the table
+    Path snapshotTable = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
+    FileStatus[] snapshotHFiles = SnapshotTestingUtils.listHFiles(fs, snapshotTable);
+    // check that the files in the archive contain the ones that we need for the snapshot
+    LOG.debug("Have snapshot hfiles:");
+    for (FileStatus file : snapshotHFiles) {
+      LOG.debug(file.getPath());
+    }
+    // get the archived files for the table
+    Collection<String> files = getArchivedHFiles(archiveDir, rootDir, fs, STRING_TABLE_NAME);
+
+    // and make sure that there is a proper subset
+    for (FileStatus file : snapshotHFiles) {
+      assertTrue("Archived hfiles " + files + " is missing snapshot file:" + file.getPath(),
+        files.contains(file.getPath().getName()));
+    }
+
+    // delete the existing snapshot
+    admin.deleteSnapshot(snapshotNameBytes);
+    SnapshotTestingUtils.assertNoSnapshots(admin);
+
+    // make sure that we don't keep around the hfiles that aren't in a snapshot
+    // make sure we wait long enough to refresh the snapshot hfile
+    List<BaseHFileCleanerDelegate> delegates = UTIL.getMiniHBaseCluster().getMaster()
+        .getHFileCleaner().cleanersChain;
+    for (BaseHFileCleanerDelegate delegate: delegates) {
+      if (delegate instanceof SnapshotHFileCleaner) {
+        ((SnapshotHFileCleaner)delegate).getFileCacheForTesting().triggerCacheRefreshForTesting();
+      }
+    }
+    // run the cleaner again
+    LOG.debug("Running hfile cleaners");
+    ensureHFileCleanersRun();
+    LOG.info("After delete snapshot cleaners run File-System state");
+    FSUtils.logFileSystemState(fs, rootDir, LOG);
+
+    files = getArchivedHFiles(archiveDir, rootDir, fs, STRING_TABLE_NAME);
+    assertEquals("Still have some hfiles in the archive, when their snapshot has been deleted.", 0,
+      files.size());
+  }
+
+  /**
+   * @return all the HFiles for a given table that have been archived
+   * @throws IOException on expected failure
+   */
+  private final Collection<String> getArchivedHFiles(Path archiveDir, Path rootDir,
+      FileSystem fs, String tableName) throws IOException {
+    Path tableArchive = new Path(archiveDir, tableName);
+    FileStatus[] archivedHFiles = SnapshotTestingUtils.listHFiles(fs, tableArchive);
+    List<String> files = new ArrayList<String>(archivedHFiles.length);
+    LOG.debug("Have archived hfiles: " + tableArchive);
+    for (FileStatus file : archivedHFiles) {
+      LOG.debug(file.getPath());
+      files.add(file.getPath().getName());
+    }
+    // sort the archived files
+
+    Collections.sort(files);
+    return files;
+  }
+
+  /**
+   * Make sure the {@link HFileCleaner HFileCleaners} run at least once
+   */
+  private static void ensureHFileCleanersRun() {
+    UTIL.getHBaseCluster().getMaster().getHFileCleaner().chore();
+  }
+}

Added: hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java?rev=1452257&view=auto
==============================================================================
--- hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java (added)
+++ hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java Mon Mar  4 11:24:50 2013
@@ -0,0 +1,230 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.snapshot;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.HashSet;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil;
+import org.apache.hadoop.hbase.snapshot.TakeSnapshotUtils;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test that we correctly reload the cache, filter directories, etc.
+ */
+@Category(MediumTests.class)
+public class TestSnapshotFileCache {
+
+  private static final Log LOG = LogFactory.getLog(TestSnapshotFileCache.class);
+  private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+  private static FileSystem fs;
+  private static Path rootDir;
+
+  @BeforeClass
+  public static void startCluster() throws Exception {
+    UTIL.startMiniDFSCluster(1);
+    fs = UTIL.getDFSCluster().getFileSystem();
+    rootDir = UTIL.getDefaultRootDirPath();
+  }
+
+  @AfterClass
+  public static void stopCluster() throws Exception {
+    UTIL.shutdownMiniDFSCluster();
+  }
+
+  @After
+  public void cleanupFiles() throws Exception {
+    // cleanup the snapshot directory
+    Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir);
+    fs.delete(snapshotDir, true);
+  }
+
+  @Test(timeout = 10000000)
+  public void testLoadAndDelete() throws Exception {
+    // don't refresh the cache unless we tell it to
+    long period = Long.MAX_VALUE;
+    Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir);
+    SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000,
+        "test-snapshot-file-cache-refresh", new SnapshotFiles());
+
+    Path snapshot = new Path(snapshotDir, "snapshot");
+    Path region = new Path(snapshot, "7e91021");
+    Path family = new Path(region, "fam");
+    Path file1 = new Path(family, "file1");
+    Path file2 = new Path(family, "file2");
+
+    // create two hfiles under the snapshot
+    fs.create(file1);
+    fs.create(file2);
+
+    FSUtils.logFileSystemState(fs, rootDir, LOG);
+
+    // then make sure the cache finds them
+    assertTrue("Cache didn't find:" + file1, cache.contains(file1.getName()));
+    assertTrue("Cache didn't find:" + file2, cache.contains(file2.getName()));
+    String not = "file-shouldn't-be-found";
+    assertFalse("Cache found '" + not + "', but it shouldn't have.", cache.contains(not));
+
+    // make sure we get a little bit of separation in the modification times
+    // its okay if we sleep a little longer (b/c of GC pause), as long as we sleep a little
+    Thread.sleep(10);
+
+    LOG.debug("Deleting snapshot.");
+    // then delete the snapshot and make sure that we can still find the files
+    if (!fs.delete(snapshot, true)) {
+      throw new IOException("Couldn't delete " + snapshot + " for an unknown reason.");
+    }
+    FSUtils.logFileSystemState(fs, rootDir, LOG);
+
+
+    LOG.debug("Checking to see if file is deleted.");
+    assertTrue("Cache didn't find:" + file1, cache.contains(file1.getName()));
+    assertTrue("Cache didn't find:" + file2, cache.contains(file2.getName()));
+
+    // then trigger a refresh
+    cache.triggerCacheRefreshForTesting();
+    // and not it shouldn't find those files
+    assertFalse("Cache found '" + file1 + "', but it shouldn't have.",
+      cache.contains(file1.getName()));
+    assertFalse("Cache found '" + file2 + "', but it shouldn't have.",
+      cache.contains(file2.getName()));
+
+    fs.delete(snapshotDir, true);
+  }
+
+  @Test
+  public void testLoadsTmpDir() throws Exception {
+    // don't refresh the cache unless we tell it to
+    long period = Long.MAX_VALUE;
+    Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir);
+    SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000,
+        "test-snapshot-file-cache-refresh", new SnapshotFiles());
+
+    // create a file in a 'completed' snapshot
+    Path snapshot = new Path(snapshotDir, "snapshot");
+    Path region = new Path(snapshot, "7e91021");
+    Path family = new Path(region, "fam");
+    Path file1 = new Path(family, "file1");
+    fs.create(file1);
+
+    // create an 'in progress' snapshot
+    SnapshotDescription desc = SnapshotDescription.newBuilder().setName("working").build();
+    snapshot = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir);
+    region = new Path(snapshot, "7e91021");
+    family = new Path(region, "fam");
+    Path file2 = new Path(family, "file2");
+    fs.create(file2);
+
+    FSUtils.logFileSystemState(fs, rootDir, LOG);
+
+    // then make sure the cache finds both files
+    assertTrue("Cache didn't find:" + file1, cache.contains(file1.getName()));
+    assertTrue("Cache didn't find:" + file2, cache.contains(file2.getName()));
+  }
+
+  @Test
+  public void testJustFindLogsDirectory() throws Exception {
+    // don't refresh the cache unless we tell it to
+    long period = Long.MAX_VALUE;
+    Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir);
+    SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000,
+        "test-snapshot-file-cache-refresh", new SnapshotFileCache.SnapshotFileInspector() {
+            public Collection<String> filesUnderSnapshot(final Path snapshotDir)
+                throws IOException {
+              return SnapshotReferenceUtil.getHLogNames(fs, snapshotDir);
+            }
+        });
+
+    // create a file in a 'completed' snapshot
+    Path snapshot = new Path(snapshotDir, "snapshot");
+    Path region = new Path(snapshot, "7e91021");
+    Path family = new Path(region, "fam");
+    Path file1 = new Path(family, "file1");
+    fs.create(file1);
+
+    // and another file in the logs directory
+    Path logs = TakeSnapshotUtils.getSnapshotHLogsDir(snapshot, "server");
+    Path log = new Path(logs, "me.hbase.com%2C58939%2C1350424310315.1350424315552");
+    fs.create(log);
+
+    FSUtils.logFileSystemState(fs, rootDir, LOG);
+
+    // then make sure the cache only finds the log files
+    assertFalse("Cache found '" + file1 + "', but it shouldn't have.",
+      cache.contains(file1.getName()));
+    assertTrue("Cache didn't find:" + log, cache.contains(log.getName()));
+  }
+
+  @Test
+  public void testReloadModifiedDirectory() throws IOException {
+    // don't refresh the cache unless we tell it to
+    long period = Long.MAX_VALUE;
+    Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir);
+    SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000,
+        "test-snapshot-file-cache-refresh", new SnapshotFiles());
+
+    Path snapshot = new Path(snapshotDir, "snapshot");
+    Path region = new Path(snapshot, "7e91021");
+    Path family = new Path(region, "fam");
+    Path file1 = new Path(family, "file1");
+    Path file2 = new Path(family, "file2");
+
+    // create two hfiles under the snapshot
+    fs.create(file1);
+    fs.create(file2);
+
+    FSUtils.logFileSystemState(fs, rootDir, LOG);
+
+    assertTrue("Cache didn't find " + file1, cache.contains(file1.getName()));
+
+    // now delete the snapshot and add a file with a different name
+    fs.delete(snapshot, true);
+    Path file3 = new Path(family, "new_file");
+    fs.create(file3);
+
+    FSUtils.logFileSystemState(fs, rootDir, LOG);
+    assertTrue("Cache didn't find new file:" + file3, cache.contains(file3.getName()));
+  }
+
+  class SnapshotFiles implements SnapshotFileCache.SnapshotFileInspector {
+    public Collection<String> filesUnderSnapshot(final Path snapshotDir) throws IOException {
+      Collection<String> files =  new HashSet<String>();
+      files.addAll(SnapshotReferenceUtil.getHLogNames(fs, snapshotDir));
+      files.addAll(SnapshotReferenceUtil.getHFileNames(fs, snapshotDir));
+      return files;
+    }
+  };
+}

Added: hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java?rev=1452257&view=auto
==============================================================================
--- hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java (added)
+++ hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java Mon Mar  4 11:24:50 2013
@@ -0,0 +1,89 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.snapshot;
+
+import static org.junit.Assert.assertFalse;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.junit.AfterClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test that the snapshot hfile cleaner finds hfiles referenced in a snapshot
+ */
+@Category(SmallTests.class)
+public class TestSnapshotHFileCleaner {
+
+  private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+
+  @AfterClass
+  public static void cleanup() throws IOException {
+    Configuration conf = TEST_UTIL.getConfiguration();
+    Path rootDir = FSUtils.getRootDir(conf);
+    FileSystem fs = FileSystem.get(conf);
+    // cleanup
+    fs.delete(rootDir, true);
+  }
+
+  @Test
+  public void testFindsSnapshotFilesWhenCleaning() throws IOException {
+    Configuration conf = TEST_UTIL.getConfiguration();
+    FSUtils.setRootDir(conf, TEST_UTIL.getDataTestDir());
+    Path rootDir = FSUtils.getRootDir(conf);
+    Path archivedHfileDir = new Path(TEST_UTIL.getDataTestDir(), HConstants.HFILE_ARCHIVE_DIRECTORY);
+
+    FileSystem fs = FileSystem.get(conf);
+    SnapshotHFileCleaner cleaner = new SnapshotHFileCleaner();
+    cleaner.setConf(conf);
+
+    // write an hfile to the snapshot directory
+    String snapshotName = "snapshot";
+    byte[] snapshot = Bytes.toBytes(snapshotName);
+    String table = "table";
+    byte[] tableName = Bytes.toBytes(table);
+    Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
+    HRegionInfo mockRegion = new HRegionInfo(tableName);
+    Path regionSnapshotDir = new Path(snapshotDir, mockRegion.getEncodedName());
+    Path familyDir = new Path(regionSnapshotDir, "family");
+    // create a reference to a supposedly valid hfile
+    String hfile = "fd1e73e8a96c486090c5cec07b4894c4";
+    Path refFile = new Path(familyDir, hfile);
+
+    // make sure the reference file exists
+    fs.create(refFile);
+
+    // create the hfile in the archive
+    fs.mkdirs(archivedHfileDir);
+    fs.createNewFile(new Path(archivedHfileDir, hfile));
+
+    // make sure that the file isn't deletable
+    assertFalse(cleaner.isFileDeletable(new Path(hfile)));
+  }
+}
\ No newline at end of file

Added: hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotLogCleaner.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotLogCleaner.java?rev=1452257&view=auto
==============================================================================
--- hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotLogCleaner.java (added)
+++ hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotLogCleaner.java Mon Mar  4 11:24:50 2013
@@ -0,0 +1,85 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.snapshot;
+
+import static org.junit.Assert.assertFalse;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.junit.AfterClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test that the snapshot log cleaner finds logs referenced in a snapshot
+ */
+@Category(SmallTests.class)
+public class TestSnapshotLogCleaner {
+
+  private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+
+  @AfterClass
+  public static void cleanup() throws IOException {
+    Configuration conf = TEST_UTIL.getConfiguration();
+    Path rootDir = FSUtils.getRootDir(conf);
+    FileSystem fs = FileSystem.get(conf);
+    // cleanup
+    fs.delete(rootDir, true);
+  }
+
+  @Test
+  public void testFindsSnapshotFilesWhenCleaning() throws IOException {
+    Configuration conf = TEST_UTIL.getConfiguration();
+    FSUtils.setRootDir(conf, TEST_UTIL.getDataTestDir());
+    Path rootDir = FSUtils.getRootDir(conf);
+    FileSystem fs = FileSystem.get(conf);
+    SnapshotLogCleaner cleaner = new SnapshotLogCleaner();
+    cleaner.setConf(conf);
+
+    // write an hfile to the snapshot directory
+    String snapshotName = "snapshot";
+    byte[] snapshot = Bytes.toBytes(snapshotName);
+    Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
+    Path snapshotLogDir = new Path(snapshotDir, HConstants.HREGION_LOGDIR_NAME);
+    String timestamp = "1339643343027";
+    String hostFromMaster = "localhost%2C59648%2C1339643336601";
+
+    Path hostSnapshotLogDir = new Path(snapshotLogDir, hostFromMaster);
+    String snapshotlogfile = hostFromMaster + "." + timestamp + ".hbase";
+
+    // add the reference to log in the snapshot
+    fs.create(new Path(hostSnapshotLogDir, snapshotlogfile));
+
+    // now check to see if that log file would get deleted.
+    Path oldlogDir = new Path(rootDir, ".oldlogs");
+    Path logFile = new Path(oldlogDir, snapshotlogfile);
+    fs.create(logFile);
+
+    // make sure that the file isn't deletable
+    assertFalse(cleaner.isFileDeletable(logFile));
+  }
+}
\ No newline at end of file

Added: hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java?rev=1452257&view=auto
==============================================================================
--- hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java (added)
+++ hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java Mon Mar  4 11:24:50 2013
@@ -0,0 +1,155 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.snapshot;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.executor.ExecutorService;
+import org.apache.hadoop.hbase.master.MasterFileSystem;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
+import org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner;
+import org.apache.hadoop.hbase.procedure.ProcedureCoordinator;
+import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+import org.apache.zookeeper.KeeperException;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.Mockito;
+
+/**
+ * Test basic snapshot manager functionality
+ */
+@Category(SmallTests.class)
+public class TestSnapshotManager {
+  private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+  MasterServices services = Mockito.mock(MasterServices.class);
+  ProcedureCoordinator coordinator = Mockito.mock(ProcedureCoordinator.class);
+  ExecutorService pool = Mockito.mock(ExecutorService.class);
+  MasterFileSystem mfs = Mockito.mock(MasterFileSystem.class);
+  FileSystem fs;
+  {
+    try {
+      fs = UTIL.getTestFileSystem();
+    } catch (IOException e) {
+      throw new RuntimeException("Couldn't get test filesystem", e);
+    }
+  }
+
+   private SnapshotManager getNewManager() throws IOException, KeeperException {
+    return getNewManager(UTIL.getConfiguration());
+  }
+
+  private SnapshotManager getNewManager(final Configuration conf) throws IOException, KeeperException {
+    Mockito.reset(services);
+    Mockito.when(services.getConfiguration()).thenReturn(conf);
+    Mockito.when(services.getMasterFileSystem()).thenReturn(mfs);
+    Mockito.when(mfs.getFileSystem()).thenReturn(fs);
+    Mockito.when(mfs.getRootDir()).thenReturn(UTIL.getDataTestDir());
+    return new SnapshotManager(services, coordinator, pool);
+  }
+
+  @Test
+  public void testInProcess() throws KeeperException, IOException {
+    SnapshotManager manager = getNewManager();
+    TakeSnapshotHandler handler = Mockito.mock(TakeSnapshotHandler.class);
+    assertFalse("Manager is in process when there is no current handler", manager.isTakingSnapshot());
+    manager.setSnapshotHandlerForTesting(handler);
+    Mockito.when(handler.isFinished()).thenReturn(false);
+    assertTrue("Manager isn't in process when handler is running", manager.isTakingSnapshot());
+    Mockito.when(handler.isFinished()).thenReturn(true);
+    assertFalse("Manager is process when handler isn't running", manager.isTakingSnapshot());
+  }
+
+  /**
+   * Verify the snapshot support based on the configuration.
+   */
+  @Test
+  public void testSnapshotSupportConfiguration() throws Exception {
+    // No configuration (no cleaners, not enabled): snapshot feature disabled
+    Configuration conf = new Configuration();
+    SnapshotManager manager = getNewManager(conf);
+    assertFalse("Snapshot should be disabled with no configuration", isSnapshotSupported(manager));
+
+    // force snapshot feature to be enabled
+    conf = new Configuration();
+    conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
+    manager = getNewManager(conf);
+    assertTrue("Snapshot should be enabled", isSnapshotSupported(manager));
+
+    // force snapshot feature to be disabled
+    conf = new Configuration();
+    conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, false);
+    manager = getNewManager(conf);
+    assertFalse("Snapshot should be disabled", isSnapshotSupported(manager));
+
+    // force snapshot feature to be disabled, even if cleaners are present
+    conf = new Configuration();
+    conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
+      SnapshotHFileCleaner.class.getName(), HFileLinkCleaner.class.getName());
+    conf.set(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS, SnapshotLogCleaner.class.getName());
+    conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, false);
+    manager = getNewManager(conf);
+    assertFalse("Snapshot should be disabled", isSnapshotSupported(manager));
+
+    // cleaners are present, but missing snapshot enabled property
+    conf = new Configuration();
+    conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
+      SnapshotHFileCleaner.class.getName(), HFileLinkCleaner.class.getName());
+    conf.set(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS, SnapshotLogCleaner.class.getName());
+    manager = getNewManager(conf);
+    assertTrue("Snapshot should be enabled, because cleaners are present",
+      isSnapshotSupported(manager));
+
+    // Create a "test snapshot"
+    Path rootDir = UTIL.getDataTestDir();
+    Path testSnapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(
+      "testSnapshotSupportConfiguration", rootDir);
+    fs.mkdirs(testSnapshotDir);
+    try {
+      // force snapshot feature to be disabled, but snapshots are present
+      conf = new Configuration();
+      conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, false);
+      manager = getNewManager(conf);
+      fail("Master should not start when snapshot is disabled, but snapshots are present");
+    } catch (UnsupportedOperationException e) {
+      // expected
+    } finally {
+      fs.delete(testSnapshotDir, true);
+    }
+  }
+
+  private boolean isSnapshotSupported(final SnapshotManager manager) {
+    try {
+      manager.checkSnapshotSupport();
+      return true;
+    } catch (UnsupportedOperationException e) {
+      return false;
+    }
+  }
+}