You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by jm...@apache.org on 2013/02/13 19:37:34 UTC

svn commit: r1445814 [3/3] - in /hbase/branches/hbase-7290/hbase-server/src: main/java/org/apache/hadoop/hbase/ main/java/org/apache/hadoop/hbase/client/ main/java/org/apache/hadoop/hbase/master/ main/java/org/apache/hadoop/hbase/master/snapshot/ main/...

Added: hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java?rev=1445814&view=auto
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java (added)
+++ hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java Wed Feb 13 18:37:32 2013
@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.snapshot;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.executor.ExecutorService;
+import org.apache.hadoop.hbase.master.MasterFileSystem;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.zookeeper.KeeperException;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.Mockito;
+
+/**
+ * Test basic snapshot manager functionality
+ */
+@Category(SmallTests.class)
+public class TestSnapshotManager {
+  private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+  MasterServices services = Mockito.mock(MasterServices.class);
+  ExecutorService pool = Mockito.mock(ExecutorService.class);
+  MasterFileSystem mfs = Mockito.mock(MasterFileSystem.class);
+  FileSystem fs;
+  {
+    try {
+      fs = UTIL.getTestFileSystem();
+    } catch (IOException e) {
+      throw new RuntimeException("Couldn't get test filesystem", e);
+    }
+  }
+
+  private SnapshotManager getNewManager() throws KeeperException, IOException {
+    Mockito.reset(services);
+    Mockito.when(services.getConfiguration()).thenReturn(UTIL.getConfiguration());
+    Mockito.when(services.getMasterFileSystem()).thenReturn(mfs);
+    Mockito.when(mfs.getFileSystem()).thenReturn(fs);
+    Mockito.when(mfs.getRootDir()).thenReturn(UTIL.getDataTestDir());
+    return new SnapshotManager(services);
+  }
+
+  @Test
+  public void testInProcess() throws KeeperException, IOException {
+    SnapshotManager manager = getNewManager();
+    TakeSnapshotHandler handler = Mockito.mock(TakeSnapshotHandler.class);
+    assertFalse("Manager is in process when there is no current handler", manager.isTakingSnapshot());
+    manager.setSnapshotHandlerForTesting(handler);
+    Mockito.when(handler.isFinished()).thenReturn(false);
+    assertTrue("Manager isn't in process when handler is running", manager.isTakingSnapshot());
+    Mockito.when(handler.isFinished()).thenReturn(true);
+    assertFalse("Manager is process when handler isn't running", manager.isTakingSnapshot());
+  }
+}
\ No newline at end of file

Modified: hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java?rev=1445814&r1=1445813&r2=1445814&view=diff
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java (original)
+++ hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java Wed Feb 13 18:37:32 2013
@@ -44,8 +44,6 @@ import org.apache.hadoop.hbase.protobuf.
 import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse;
 import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.server.snapshot.TakeSnapshotUtils;
-import org.apache.hadoop.hbase.snapshot.exception.HBaseSnapshotException;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.FSUtils;

Added: hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestCopyRecoveredEditsTask.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestCopyRecoveredEditsTask.java?rev=1445814&view=auto
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestCopyRecoveredEditsTask.java (added)
+++ hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestCopyRecoveredEditsTask.java Wed Feb 13 18:37:32 2013
@@ -0,0 +1,128 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.snapshot;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.errorhandling.ForeignException;
+import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
+import org.apache.hadoop.hbase.snapshot.CopyRecoveredEditsTask;
+import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.Mockito;
+
+/**
+ * Test that we correctly copy the recovered edits from a directory
+ */
+@Category(SmallTests.class)
+public class TestCopyRecoveredEditsTask {
+
+  private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+  @Test
+  public void testCopyFiles() throws Exception {
+
+    SnapshotDescription snapshot = SnapshotDescription.newBuilder().setName("snapshot").build();
+    ForeignExceptionDispatcher monitor = Mockito.mock(ForeignExceptionDispatcher.class);
+    FileSystem fs = UTIL.getTestFileSystem();
+    Path root = UTIL.getDataTestDir();
+    String regionName = "regionA";
+    Path regionDir = new Path(root, regionName);
+    Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, root);
+
+    try {
+      // doesn't really matter where the region's snapshot directory is, but this is pretty close
+      Path snapshotRegionDir = new Path(workingDir, regionName);
+      fs.mkdirs(snapshotRegionDir);
+
+      // put some stuff in the recovered.edits directory
+      Path edits = HLogUtil.getRegionDirRecoveredEditsDir(regionDir);
+      fs.mkdirs(edits);
+      // make a file with some data
+      Path file1 = new Path(edits, "0000000000000002352");
+      FSDataOutputStream out = fs.create(file1);
+      byte[] data = new byte[] { 1, 2, 3, 4 };
+      out.write(data);
+      out.close();
+      // make an empty file
+      Path empty = new Path(edits, "empty");
+      fs.createNewFile(empty);
+
+      CopyRecoveredEditsTask task = new CopyRecoveredEditsTask(snapshot, monitor, fs, regionDir,
+          snapshotRegionDir);
+      CopyRecoveredEditsTask taskSpy = Mockito.spy(task);
+      taskSpy.call();
+
+      Path snapshotEdits = HLogUtil.getRegionDirRecoveredEditsDir(snapshotRegionDir);
+      FileStatus[] snapshotEditFiles = FSUtils.listStatus(fs, snapshotEdits);
+      assertEquals("Got wrong number of files in the snapshot edits", 1, snapshotEditFiles.length);
+      FileStatus file = snapshotEditFiles[0];
+      assertEquals("Didn't copy expected file", file1.getName(), file.getPath().getName());
+
+      Mockito.verify(monitor, Mockito.never()).receive(Mockito.any(ForeignException.class));
+      Mockito.verify(taskSpy, Mockito.never()).snapshotFailure(Mockito.anyString(),
+           Mockito.any(Exception.class));
+
+    } finally {
+      // cleanup the working directory
+      FSUtils.delete(fs, regionDir, true);
+      FSUtils.delete(fs, workingDir, true);
+    }
+  }
+
+  /**
+   * Check that we don't get an exception if there is no recovered edits directory to copy
+   * @throws Exception on failure
+   */
+  @Test
+  public void testNoEditsDir() throws Exception {
+    SnapshotDescription snapshot = SnapshotDescription.newBuilder().setName("snapshot").build();
+    ForeignExceptionDispatcher monitor = Mockito.mock(ForeignExceptionDispatcher.class);
+    FileSystem fs = UTIL.getTestFileSystem();
+    Path root = UTIL.getDataTestDir();
+    String regionName = "regionA";
+    Path regionDir = new Path(root, regionName);
+    Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, root);
+    try {
+      // doesn't really matter where the region's snapshot directory is, but this is pretty close
+      Path snapshotRegionDir = new Path(workingDir, regionName);
+      fs.mkdirs(snapshotRegionDir);
+      Path regionEdits = HLogUtil.getRegionDirRecoveredEditsDir(regionDir);
+      assertFalse("Edits dir exists already - it shouldn't", fs.exists(regionEdits));
+
+      CopyRecoveredEditsTask task = new CopyRecoveredEditsTask(snapshot, monitor, fs, regionDir,
+          snapshotRegionDir);
+      task.call();
+    } finally {
+      // cleanup the working directory
+      FSUtils.delete(fs, regionDir, true);
+      FSUtils.delete(fs, workingDir, true);
+    }
+  }
+}
\ No newline at end of file

Added: hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java?rev=1445814&view=auto
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java (added)
+++ hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java Wed Feb 13 18:37:32 2013
@@ -0,0 +1,255 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.snapshot;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.snapshot.ExportSnapshot;
+import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil;
+import org.apache.hadoop.mapreduce.Job;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test Export Snapshot Tool
+ */
+@Category(MediumTests.class)
+public class TestExportSnapshot {
+  private final Log LOG = LogFactory.getLog(getClass());
+
+  private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+
+  private final static byte[] FAMILY = Bytes.toBytes("cf");
+
+  private byte[] snapshotName;
+  private byte[] tableName;
+  private HBaseAdmin admin;
+
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
+    TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250);
+    TEST_UTIL.getConfiguration().setInt("hbase.client.retries.number", 6);
+    TEST_UTIL.getConfiguration().setBoolean("hbase.master.enabletable.roundrobin", true);
+    TEST_UTIL.startMiniCluster(3);
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  /**
+   * Create a table and take a snapshot of the table used by the export test.
+   */
+  @Before
+  public void setUp() throws Exception {
+    this.admin = TEST_UTIL.getHBaseAdmin();
+
+    long tid = System.currentTimeMillis();
+    tableName = Bytes.toBytes("testtb-" + tid);
+    snapshotName = Bytes.toBytes("snaptb0-" + tid);
+
+    // create Table
+    HTableDescriptor htd = new HTableDescriptor(tableName);
+    htd.addFamily(new HColumnDescriptor(FAMILY));
+    admin.createTable(htd, null);
+    HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName);
+    TEST_UTIL.loadTable(table, FAMILY);
+
+    // take a snapshot
+    admin.disableTable(tableName);
+    admin.snapshot(snapshotName, tableName);
+    admin.enableTable(tableName);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    this.admin.close();
+  }
+
+  /**
+   * Verfy the result of getBalanceSplits() method.
+   * The result are groups of files, used as input list for the "export" mappers.
+   * All the groups should have similar amount of data.
+   *
+   * The input list is a pair of file path and length.
+   * The getBalanceSplits() function sort it by length,
+   * and assign to each group a file, going back and forth through the groups.
+   */
+  @Test
+  public void testBalanceSplit() throws Exception {
+    // Create a list of files
+    List<Pair<Path, Long>> files = new ArrayList<Pair<Path, Long>>();
+    for (long i = 0; i <= 20; i++) {
+      files.add(new Pair<Path, Long>(new Path("file-" + i), i));
+    }
+
+    // Create 5 groups (total size 210)
+    //    group 0: 20, 11, 10,  1 (total size: 42)
+    //    group 1: 19, 12,  9,  2 (total size: 42)
+    //    group 2: 18, 13,  8,  3 (total size: 42)
+    //    group 3: 17, 12,  7,  4 (total size: 42)
+    //    group 4: 16, 11,  6,  5 (total size: 42)
+    List<List<Path>> splits = ExportSnapshot.getBalancedSplits(files, 5);
+    assertEquals(5, splits.size());
+    assertEquals(Arrays.asList(new Path("file-20"), new Path("file-11"),
+      new Path("file-10"), new Path("file-1"), new Path("file-0")), splits.get(0));
+    assertEquals(Arrays.asList(new Path("file-19"), new Path("file-12"),
+      new Path("file-9"), new Path("file-2")), splits.get(1));
+    assertEquals(Arrays.asList(new Path("file-18"), new Path("file-13"),
+      new Path("file-8"), new Path("file-3")), splits.get(2));
+    assertEquals(Arrays.asList(new Path("file-17"), new Path("file-14"),
+      new Path("file-7"), new Path("file-4")), splits.get(3));
+    assertEquals(Arrays.asList(new Path("file-16"), new Path("file-15"),
+      new Path("file-6"), new Path("file-5")), splits.get(4));
+  }
+
+  /**
+   * Verify if exported snapshot and copied files matches the original one.
+   */
+  @Test
+  public void testExportFileSystemState() throws Exception {
+    Path copyDir = TEST_UTIL.getDataTestDir("export-" + System.currentTimeMillis());
+    URI hdfsUri = FileSystem.get(TEST_UTIL.getConfiguration()).getUri();
+    FileSystem fs = FileSystem.get(copyDir.toUri(), new Configuration());
+    copyDir = copyDir.makeQualified(fs);
+
+    // Export Snapshot
+    int res = ExportSnapshot.innerMain(TEST_UTIL.getConfiguration(), new String[] {
+      "-snapshot", Bytes.toString(snapshotName),
+      "-copy-to", copyDir.toString()
+    });
+    assertEquals(0, res);
+
+    // Verify File-System state
+    FileStatus[] rootFiles = fs.listStatus(copyDir);
+    assertEquals(2, rootFiles.length);
+    for (FileStatus fileStatus: rootFiles) {
+      String name = fileStatus.getPath().getName();
+      assertTrue(fileStatus.isDir());
+      assertTrue(name.equals(".snapshot") || name.equals(".archive"));
+    }
+
+    // compare the snapshot metadata and verify the hfiles
+    final FileSystem hdfs = FileSystem.get(hdfsUri, TEST_UTIL.getConfiguration());
+    final Path snapshotDir = new Path(".snapshot", Bytes.toString(snapshotName));
+    verifySnapshot(hdfs, new Path(TEST_UTIL.getDefaultRootDirPath(), snapshotDir),
+        fs, new Path(copyDir, snapshotDir));
+    verifyArchive(fs, copyDir, Bytes.toString(snapshotName));
+
+    // Remove the exported dir
+    fs.delete(copyDir, true);
+  }
+
+  /*
+   * verify if the snapshot folder on file-system 1 match the one on file-system 2
+   */
+  private void verifySnapshot(final FileSystem fs1, final Path root1,
+      final FileSystem fs2, final Path root2) throws IOException {
+    Set<String> s = new HashSet<String>();
+    assertEquals(listFiles(fs1, root1, root1), listFiles(fs2, root2, root2));
+  }
+
+  /*
+   * Verify if the files exists
+   */
+  private void verifyArchive(final FileSystem fs, final Path rootDir, final String snapshotName)
+      throws IOException {
+    final Path exportedSnapshot = new Path(rootDir, new Path(".snapshot", snapshotName));
+    final Path exportedArchive = new Path(rootDir, ".archive");
+    LOG.debug(listFiles(fs, exportedArchive, exportedArchive));
+    SnapshotReferenceUtil.visitReferencedFiles(fs, exportedSnapshot,
+        new SnapshotReferenceUtil.FileVisitor() {
+        public void storeFile (final String region, final String family, final String hfile)
+            throws IOException {
+          verifyNonEmptyFile(new Path(exportedArchive,
+            new Path(Bytes.toString(tableName), new Path(region, new Path(family, hfile)))));
+        }
+
+        public void recoveredEdits (final String region, final String logfile)
+            throws IOException {
+          verifyNonEmptyFile(new Path(exportedSnapshot,
+            new Path(Bytes.toString(tableName), new Path(region, logfile))));
+        }
+
+        public void logFile (final String server, final String logfile)
+            throws IOException {
+          verifyNonEmptyFile(new Path(exportedSnapshot, new Path(server, logfile)));
+        }
+
+        private void verifyNonEmptyFile(final Path path) throws IOException {
+          LOG.debug(path);
+          assertTrue(fs.exists(path));
+          assertTrue(fs.getFileStatus(path).getLen() > 0);
+        }
+    });
+  }
+
+  private Set<String> listFiles(final FileSystem fs, final Path root, final Path dir)
+      throws IOException {
+    Set<String> files = new HashSet<String>();
+    int rootPrefix = root.toString().length();
+    FileStatus[] list = FSUtils.listStatus(fs, dir);
+    if (list != null) {
+      for (FileStatus fstat: list) {
+        LOG.debug(fstat.getPath());
+        if (fstat.isDir()) {
+          files.addAll(listFiles(fs, root, fstat.getPath()));
+        } else {
+          files.add(fstat.getPath().toString().substring(rootPrefix));
+        }
+      }
+    }
+    return files;
+  }
+}
+

Added: hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestReferenceRegionHFilesTask.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestReferenceRegionHFilesTask.java?rev=1445814&view=auto
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestReferenceRegionHFilesTask.java (added)
+++ hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestReferenceRegionHFilesTask.java Wed Feb 13 18:37:32 2013
@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.snapshot;
+
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.snapshot.ReferenceRegionHFilesTask;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.Mockito;
+
+@Category(SmallTests.class)
+public class TestReferenceRegionHFilesTask {
+  private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+  @Test
+  public void testRun() throws IOException {
+    FileSystem fs = UTIL.getTestFileSystem();
+    // setup the region internals
+    Path testdir = UTIL.getDataTestDir();
+    Path regionDir = new Path(testdir, "region");
+    Path family1 = new Path(regionDir, "fam1");
+    // make an empty family
+    Path family2 = new Path(regionDir, "fam2");
+    fs.mkdirs(family2);
+
+    // add some files to family 1
+    Path file1 = new Path(family1, "05f99689ae254693836613d1884c6b63");
+    fs.createNewFile(file1);
+    Path file2 = new Path(family1, "7ac9898bf41d445aa0003e3d699d5d26");
+    fs.createNewFile(file2);
+
+    // create the snapshot directory
+    Path snapshotRegionDir = new Path(testdir, HConstants.SNAPSHOT_DIR_NAME);
+    fs.mkdirs(snapshotRegionDir);
+
+    SnapshotDescription snapshot = SnapshotDescription.newBuilder().setName("name")
+        .setTable("table").build();
+    ForeignExceptionDispatcher monitor = Mockito.mock(ForeignExceptionDispatcher.class);
+    ReferenceRegionHFilesTask task = new ReferenceRegionHFilesTask(snapshot, monitor, regionDir,
+        fs, snapshotRegionDir);
+    ReferenceRegionHFilesTask taskSpy = Mockito.spy(task);
+    task.call();
+
+    // make sure we never get an error
+    Mockito.verify(taskSpy, Mockito.never()).snapshotFailure(Mockito.anyString(),
+        Mockito.any(Exception.class));
+
+    // verify that all the hfiles get referenced
+    List<String> hfiles = new ArrayList<String>(2);
+    FileStatus[] regions = FSUtils.listStatus(fs, snapshotRegionDir);
+    for (FileStatus region : regions) {
+      FileStatus[] fams = FSUtils.listStatus(fs, region.getPath());
+      for (FileStatus fam : fams) {
+        FileStatus[] files = FSUtils.listStatus(fs, fam.getPath());
+        for (FileStatus file : files) {
+          hfiles.add(file.getPath().getName());
+        }
+      }
+    }
+    assertTrue("Didn't reference :" + file1, hfiles.contains(file1.getName()));
+    assertTrue("Didn't reference :" + file1, hfiles.contains(file2.getName()));
+  }
+}

Added: hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotLogSplitter.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotLogSplitter.java?rev=1445814&view=auto
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotLogSplitter.java (added)
+++ hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotLogSplitter.java Wed Feb 13 18:37:32 2013
@@ -0,0 +1,179 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.snapshot;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.TreeMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
+import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
+import org.apache.hadoop.hbase.snapshot.SnapshotLogSplitter;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.junit.*;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test snapshot log splitter
+ */
+@Category(SmallTests.class)
+public class TestSnapshotLogSplitter {
+  final Log LOG = LogFactory.getLog(getClass());
+
+  private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+
+  private byte[] TEST_QUALIFIER = Bytes.toBytes("q");
+  private byte[] TEST_FAMILY = Bytes.toBytes("f");
+
+  private Configuration conf;
+  private FileSystem fs;
+  private Path logFile;
+
+  @Before
+  public void setup() throws Exception {
+    conf = TEST_UTIL.getConfiguration();
+    fs = FileSystem.get(conf);
+    logFile = new Path(TEST_UTIL.getDataTestDir(), "test.log");
+    writeTestLog(logFile);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    fs.delete(logFile, false);
+  }
+
+  @Test
+  public void testSplitLogs() throws IOException {
+    Map<byte[], byte[]> regionsMap = new TreeMap<byte[], byte[]>(Bytes.BYTES_COMPARATOR);
+    splitTestLogs(getTableName(5), regionsMap);
+  }
+
+  @Test
+  public void testSplitLogsOnDifferentTable() throws IOException {
+    byte[] tableName = getTableName(1);
+    Map<byte[], byte[]> regionsMap = new TreeMap<byte[], byte[]>(Bytes.BYTES_COMPARATOR);
+    for (int j = 0; j < 10; ++j) {
+      byte[] regionName = getRegionName(tableName, j);
+      byte[] newRegionName = getNewRegionName(tableName, j);
+      regionsMap.put(regionName, newRegionName);
+    }
+    splitTestLogs(tableName, regionsMap);
+  }
+
+  /*
+   * Split and verify test logs for the specified table
+   */
+  private void splitTestLogs(final byte[] tableName, final Map<byte[], byte[]> regionsMap)
+      throws IOException {
+    Path tableDir = new Path(TEST_UTIL.getDataTestDir(), Bytes.toString(tableName));
+    SnapshotLogSplitter logSplitter = new SnapshotLogSplitter(conf, fs, tableDir,
+      tableName, regionsMap);
+    try {
+      logSplitter.splitLog(logFile);
+    } finally {
+      logSplitter.close();
+    }
+    verifyRecoverEdits(tableDir, tableName, regionsMap);
+  }
+
+  /*
+   * Verify that every logs in the table directory has just the specified table and regions.
+   */
+  private void verifyRecoverEdits(final Path tableDir, final byte[] tableName,
+      final Map<byte[], byte[]> regionsMap) throws IOException {
+    for (FileStatus regionStatus: FSUtils.listStatus(fs, tableDir)) {
+      assertTrue(regionStatus.getPath().getName().startsWith(Bytes.toString(tableName)));
+      Path regionEdits = HLogUtil.getRegionDirRecoveredEditsDir(regionStatus.getPath());
+      byte[] regionName = Bytes.toBytes(regionStatus.getPath().getName());
+      assertFalse(regionsMap.containsKey(regionName));
+      for (FileStatus logStatus: FSUtils.listStatus(fs, regionEdits)) {
+        HLog.Reader reader = HLogFactory.createReader(fs, logStatus.getPath(), conf);
+        try {
+          HLog.Entry entry;
+          while ((entry = reader.next()) != null) {
+            HLogKey key = entry.getKey();
+            assertArrayEquals(tableName, key.getTablename());
+            assertArrayEquals(regionName, key.getEncodedRegionName());
+          }
+        } finally {
+          reader.close();
+        }
+      }
+    }
+  }
+
+  /*
+   * Write some entries in the log file.
+   * 7 different tables with name "testtb-%d"
+   * 10 region per table with name "tableName-region-%d"
+   * 50 entry with row key "row-%d"
+   */
+  private void writeTestLog(final Path logFile) throws IOException {
+    fs.mkdirs(logFile.getParent());
+    HLog.Writer writer = HLogFactory.createWriter(fs, logFile, conf);
+    try {
+      for (int i = 0; i < 7; ++i) {
+        byte[] tableName = getTableName(i);
+        for (int j = 0; j < 10; ++j) {
+          byte[] regionName = getRegionName(tableName, j);
+          for (int k = 0; k < 50; ++k) {
+            byte[] rowkey = Bytes.toBytes("row-" + k);
+            HLogKey key = new HLogKey(regionName, tableName, (long)k,
+              System.currentTimeMillis(), HConstants.DEFAULT_CLUSTER_ID);
+            WALEdit edit = new WALEdit();
+            edit.add(new KeyValue(rowkey, TEST_FAMILY, TEST_QUALIFIER, rowkey));
+            writer.append(new HLog.Entry(key, edit));
+          }
+        }
+      }
+    } finally {
+      writer.close();
+    }
+  }
+
+  private byte[] getTableName(int tableId) {
+    return Bytes.toBytes("testtb-" + tableId);
+  }
+
+  private byte[] getRegionName(final byte[] tableName, int regionId) {
+    return Bytes.toBytes(Bytes.toString(tableName) + "-region-" + regionId);
+  }
+
+  private byte[] getNewRegionName(final byte[] tableName, int regionId) {
+    return Bytes.toBytes(Bytes.toString(tableName) + "-new-region-" + regionId);
+  }
+}

Added: hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotTask.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotTask.java?rev=1445814&view=auto
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotTask.java (added)
+++ hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotTask.java Wed Feb 13 18:37:32 2013
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.snapshot;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.errorhandling.ForeignException;
+import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.snapshot.SnapshotTask;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.Mockito;
+
+@Category(SmallTests.class)
+public class TestSnapshotTask {
+
+  /**
+   * Check that errors from running the task get propagated back to the error listener.
+   */
+  @Test
+  public void testErrorPropagation() throws Exception {
+    ForeignExceptionDispatcher error = mock(ForeignExceptionDispatcher.class);
+    SnapshotDescription snapshot = SnapshotDescription.newBuilder().setName("snapshot")
+        .setTable("table").build();
+    final Exception thrown = new Exception("Failed!");
+    SnapshotTask fail = new SnapshotTask(snapshot, error) {
+      @Override
+      public Void call() {
+        snapshotFailure("Injected failure", thrown);
+        return null;
+      }
+    };
+    fail.call();
+
+    verify(error, Mockito.times(1)).receive(any(ForeignException.class));
+  }
+
+}

Added: hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestWALReferenceTask.java
URL: http://svn.apache.org/viewvc/hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestWALReferenceTask.java?rev=1445814&view=auto
==============================================================================
--- hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestWALReferenceTask.java (added)
+++ hbase/branches/hbase-7290/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestWALReferenceTask.java Wed Feb 13 18:37:32 2013
@@ -0,0 +1,103 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.snapshot;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.snapshot.ReferenceServerWALsTask;
+import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.snapshot.TakeSnapshotUtils;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.Mockito;
+
+/**
+ * Test that the WAL reference task works as expected
+ */
+@Category(SmallTests.class)
+public class TestWALReferenceTask {
+
+  private static final Log LOG = LogFactory.getLog(TestWALReferenceTask.class);
+  private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+  @Test
+  public void testRun() throws IOException {
+    Configuration conf = UTIL.getConfiguration();
+    FileSystem fs = UTIL.getTestFileSystem();
+    // setup the log dir
+    Path testDir = UTIL.getDataTestDir();
+    Set<String> servers = new HashSet<String>();
+    Path logDir = new Path(testDir, ".logs");
+    Path server1Dir = new Path(logDir, "Server1");
+    servers.add(server1Dir.getName());
+    Path server2Dir = new Path(logDir, "me.hbase.com,56073,1348618509968");
+    servers.add(server2Dir.getName());
+    // logs under server 1
+    Path log1_1 = new Path(server1Dir, "me.hbase.com%2C56073%2C1348618509968.1348618520536");
+    Path log1_2 = new Path(server1Dir, "me.hbase.com%2C56073%2C1348618509968.1234567890123");
+    // logs under server 2
+    Path log2_1 = new Path(server2Dir, "me.hbase.com%2C56074%2C1348618509998.1348618515589");
+    Path log2_2 = new Path(server2Dir, "me.hbase.com%2C56073%2C1348618509968.1234567890123");
+
+    // create all the log files
+    fs.createNewFile(log1_1);
+    fs.createNewFile(log1_2);
+    fs.createNewFile(log2_1);
+    fs.createNewFile(log2_2);
+
+    FSUtils.logFileSystemState(fs, testDir, LOG);
+    FSUtils.setRootDir(conf, testDir);
+    SnapshotDescription snapshot = SnapshotDescription.newBuilder()
+        .setName("testWALReferenceSnapshot").build();
+    ForeignExceptionDispatcher listener = Mockito.mock(ForeignExceptionDispatcher.class);
+
+    // reference all the files in the first server directory
+    ReferenceServerWALsTask task = new ReferenceServerWALsTask(snapshot, listener, server1Dir,
+        conf, fs);
+    task.call();
+
+    // reference all the files in the first server directory
+    task = new ReferenceServerWALsTask(snapshot, listener, server2Dir, conf, fs);
+    task.call();
+
+    // verify that we got everything
+    FSUtils.logFileSystemState(fs, testDir, LOG);
+    Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, testDir);
+    Path snapshotLogDir = new Path(workingDir, HConstants.HREGION_LOGDIR_NAME);
+
+    // make sure we reference the all the wal files
+    TakeSnapshotUtils.verifyAllLogsGotReferenced(fs, logDir, servers, snapshot, snapshotLogDir);
+
+    // make sure we never got an error
+    Mockito.verify(listener, Mockito.atLeastOnce()).rethrowException();
+    Mockito.verifyNoMoreInteractions(listener);
+  }
+}
\ No newline at end of file