You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2012/09/01 02:36:06 UTC

svn commit: r1379682 - in /hbase/trunk/hbase-server/src: main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java

Author: stack
Date: Sat Sep  1 00:36:06 2012
New Revision: 1379682

URL: http://svn.apache.org/viewvc?rev=1379682&view=rev
Log:
HBASE-6667 TestCatalogJanitor occasionally fails; PATCH THAT ADDS DEBUG AROUND FAILING TEST

Modified:
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java?rev=1379682&r1=1379681&r2=1379682&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java Sat Sep  1 00:36:06 2012
@@ -245,6 +245,7 @@ class CatalogJanitor extends Chore {
         this.services.getAssignmentManager().regionOffline(parent);
       }
       FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
+      LOG.debug("Archiving parent region:" + parent);
       HFileArchiver.archiveRegion(fs, parent);
       MetaEditor.deleteRegion(this.server.getCatalogTracker(), parent);
       result = true;

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java?rev=1379682&r1=1379681&r2=1379682&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java Sat Sep  1 00:36:06 2012
@@ -32,6 +32,8 @@ import java.util.Map;
 import java.util.SortedMap;
 import java.util.TreeMap;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
@@ -76,6 +78,8 @@ import com.google.protobuf.ServiceExcept
 
 @Category(SmallTests.class)
 public class TestCatalogJanitor {
+  private static final Log LOG = LogFactory.getLog(TestCatalogJanitor.class);
+
   /**
    * Pseudo server for below tests.
    * Be sure to call stop on the way out else could leave some mess around.
@@ -529,6 +533,10 @@ public class TestCatalogJanitor {
     janitor.join();
   }
 
+  /**
+   * Test that we correctly archive all the storefiles when a region is deleted
+   * @throws Exception
+   */
   @Test
   public void testArchiveOldRegion() throws Exception {
     String table = "table";
@@ -546,10 +554,10 @@ public class TestCatalogJanitor {
     HRegionInfo parent = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"), Bytes.toBytes("eee"));
     HRegionInfo splita = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
     HRegionInfo splitb = new HRegionInfo(htd.getName(), Bytes.toBytes("ccc"), Bytes.toBytes("eee"));
+
     // Test that when both daughter regions are in place, that we do not
     // remove the parent.
-    Result r = createResult(parent, splita, splitb);
-
+    Result parentMetaRow = createResult(parent, splita, splitb);
     FileSystem fs = FileSystem.get(htu.getConfiguration());
     Path rootdir = services.getMasterFileSystem().getRootDir();
     // have to set the root directory since we use it in HFileDisposer to figure out to get to the
@@ -559,33 +567,54 @@ public class TestCatalogJanitor {
     Path tabledir = HTableDescriptor.getTableDir(rootdir, htd.getName());
     Path storedir = HStore.getStoreHomedir(tabledir, parent.getEncodedName(),
       htd.getColumnFamilies()[0].getName());
-
-    // delete the file and ensure that the files have been archived
     Path storeArchive = HFileArchiveUtil.getStoreArchivePath(services.getConfiguration(), parent,
       tabledir, htd.getColumnFamilies()[0].getName());
+    LOG.debug("Table dir:" + tabledir);
+    LOG.debug("Store dir:" + storedir);
+    LOG.debug("Store archive dir:" + storeArchive);
 
-    // enable archiving, make sure that files get archived
-    addMockStoreFiles(2, services, storedir);
+    // add a couple of store files that we can check for
+    FileStatus[] mockFiles = addMockStoreFiles(2, services, storedir);
     // get the current store files for comparison
     FileStatus[] storeFiles = fs.listStatus(storedir);
+    int index = 0;
     for (FileStatus file : storeFiles) {
-      System.out.println("Have store file:" + file.getPath());
+      LOG.debug("Have store file:" + file.getPath());
+      assertEquals("Got unexpected store file", mockFiles[index].getPath(),
+        storeFiles[index].getPath());
+      index++;
     }
 
     // do the cleaning of the parent
-    assertTrue(janitor.cleanParent(parent, r));
+    assertTrue(janitor.cleanParent(parent, parentMetaRow));
+    LOG.debug("Finished cleanup of parent region");
 
     // and now check to make sure that the files have actually been archived
     FileStatus[] archivedStoreFiles = fs.listStatus(storeArchive);
+    logFiles("archived files", storeFiles);
+    logFiles("archived files", archivedStoreFiles);
+
     assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs);
 
     // cleanup
+    FSUtils.delete(fs, rootdir, true);
     services.stop("Test finished");
-    server.stop("shutdown");
+    server.stop("Test finished");
     janitor.join();
   }
 
   /**
+   * @param description description of the files for logging
+   * @param storeFiles the status of the files to log
+   */
+  private void logFiles(String description, FileStatus[] storeFiles) {
+    LOG.debug("Current " + description + ": ");
+    for (FileStatus file : storeFiles) {
+      LOG.debug(file.getPath());
+    }
+  }
+
+  /**
    * Test that if a store file with the same name is present as those already backed up cause the
    * already archived files to be timestamped backup
    */
@@ -657,7 +686,7 @@ public class TestCatalogJanitor {
     janitor.join();
   }
 
-  private void addMockStoreFiles(int count, MasterServices services, Path storedir)
+  private FileStatus[] addMockStoreFiles(int count, MasterServices services, Path storedir)
       throws IOException {
     // get the existing store files
     FileSystem fs = services.getMasterFileSystem().getFileSystem();
@@ -669,9 +698,11 @@ public class TestCatalogJanitor {
       dos.writeBytes("Some data: " + i);
       dos.close();
     }
+    LOG.debug("Adding " + count + " store files to the storedir:" + storedir);
     // make sure the mock store files are there
     FileStatus[] storeFiles = fs.listStatus(storedir);
-    assertEquals(count, storeFiles.length);
+    assertEquals("Didn't have expected store files", count, storeFiles.length);
+    return storeFiles;
   }
 
   private String setRootDirAndCleanIt(final HBaseTestingUtility htu,

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java?rev=1379682&r1=1379681&r2=1379682&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java Sat Sep  1 00:36:06 2012
@@ -85,29 +85,29 @@ public class HFileArchiveTestingUtil {
 
   /**
    * Compare the archived files to the files in the original directory
-   * @param previous original files that should have been archived
-   * @param archived files that were archived
+   * @param expected original files that should have been archived
+   * @param actual files that were archived
    * @param fs filessystem on which the archiving took place
    * @throws IOException
    */
-  public static void assertArchiveEqualToOriginal(FileStatus[] previous, FileStatus[] archived,
+  public static void assertArchiveEqualToOriginal(FileStatus[] expected, FileStatus[] actual,
       FileSystem fs) throws IOException {
-    assertArchiveEqualToOriginal(previous, archived, fs, false);
+    assertArchiveEqualToOriginal(expected, actual, fs, false);
   }
 
   /**
    * Compare the archived files to the files in the original directory
-   * @param previous original files that should have been archived
-   * @param archived files that were archived
+   * @param expected original files that should have been archived
+   * @param actual files that were archived
    * @param fs {@link FileSystem} on which the archiving took place
    * @param hasTimedBackup <tt>true</tt> if we expect to find an archive backup directory with a
    *          copy of the files in the archive directory (and the original files).
    * @throws IOException
    */
-  public static void assertArchiveEqualToOriginal(FileStatus[] previous, FileStatus[] archived,
+  public static void assertArchiveEqualToOriginal(FileStatus[] expected, FileStatus[] actual,
       FileSystem fs, boolean hasTimedBackup) throws IOException {
 
-    List<List<String>> lists = getFileLists(previous, archived);
+    List<List<String>> lists = getFileLists(expected, actual);
     List<String> original = lists.get(0);
     Collections.sort(original);