You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by te...@apache.org on 2012/11/21 21:19:33 UTC

svn commit: r1412267 - in /hbase/trunk/hbase-server/src: main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java

Author: tedyu
Date: Wed Nov 21 20:19:32 2012
New Revision: 1412267

URL: http://svn.apache.org/viewvc?rev=1412267&view=rev
Log:
HBASE-7202 Family Store Files are not archived on admin.deleteColumn() (Matteo)


Modified:
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java?rev=1412267&r1=1412266&r2=1412267&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java Wed Nov 21 20:19:32 2012
@@ -147,6 +147,38 @@ public class HFileArchiver {
   }
 
   /**
+   * Remove from the specified region the store files of the specified column family,
+   * either by archiving them or outright deletion
+   * @param fs the filesystem where the store files live
+   * @param conf {@link Configuration} to examine to determine the archive directory
+   * @param parent Parent region hosting the store files
+   * @param tableDir {@link Path} to where the table is being stored (for building the archive path)
+   * @param family the family hosting the store files
+   * @throws IOException if the files could not be correctly disposed.
+   */
+  public static void archiveFamily(FileSystem fs, Configuration conf,
+      HRegionInfo parent, Path tableDir, byte[] family) throws IOException {
+    Path familyDir = new Path(tableDir, new Path(parent.getEncodedName(), Bytes.toString(family)));
+    FileStatus[] storeFiles = FSUtils.listStatus(fs, familyDir);
+    if (storeFiles == null) {
+      LOG.debug("No store files to dispose for region=" + parent.getRegionNameAsString() +
+          ", family=" + Bytes.toString(family));
+      return;
+    }
+
+    FileStatusConverter getAsFile = new FileStatusConverter(fs);
+    Collection<File> toArchive = Lists.transform(Arrays.asList(storeFiles), getAsFile);
+    Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, parent, tableDir, family);
+
+    // do the actual archive
+    if (!resolveAndArchive(fs, storeArchiveDir, toArchive)) {
+      throw new IOException("Failed to archive/delete all the files for region:"
+          + Bytes.toString(parent.getRegionName()) + ", family:" + Bytes.toString(family)
+          + " into " + storeArchiveDir + ". Something is probably awry on the filesystem.");
+    }
+  }
+
+  /**
    * Remove the store files, either by archiving them or outright deletion
    * @param fs the filesystem where the store files live
    * @param parent Parent region hosting the store files
@@ -196,7 +228,7 @@ public class HFileArchiver {
     if (!resolveAndArchive(fs, storeArchiveDir, storeFiles)) {
       throw new IOException("Failed to archive/delete all the files for region:"
           + Bytes.toString(parent.getRegionName()) + ", family:" + Bytes.toString(family)
-          + " into " + storeArchiveDir + "Something is probably arwy on the filesystem.");
+          + " into " + storeArchiveDir + ". Something is probably awry on the filesystem.");
     }
   }
 

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java?rev=1412267&r1=1412266&r2=1412267&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java Wed Nov 21 20:19:32 2012
@@ -458,10 +458,14 @@ public class MasterFileSystem {
 
   public void deleteFamilyFromFS(HRegionInfo region, byte[] familyName)
       throws IOException {
-    Path delDir = new Path(rootdir,
-        new Path(region.getTableNameAsString(), new Path(
-            region.getEncodedName(), new Path(Bytes.toString(familyName)))));
-    if (fs.delete(delDir, true) == false) {
+    // archive family store files
+    Path tableDir = new Path(rootdir, region.getTableNameAsString());
+    HFileArchiver.archiveFamily(fs, conf, region, tableDir, familyName);
+
+    // delete the family folder
+    Path familyDir = new Path(tableDir,
+      new Path(region.getEncodedName(), Bytes.toString(familyName)));
+    if (fs.delete(familyDir, true) == false) {
       throw new IOException("Could not delete family "
           + Bytes.toString(familyName) + " from FileSystem for region "
           + region.getRegionNameAsString() + "(" + region.getEncodedName()

Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java?rev=1412267&r1=1412266&r2=1412267&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java Wed Nov 21 20:19:32 2012
@@ -230,18 +230,70 @@ public class TestHFileArchiving {
 
     // then get the current store files
     Path regionDir = region.getRegionDir();
-    List<String> storeFiles = getAllFileNames(fs, regionDir);
-    // remove all the non-storefile named files for the region
+    List<String> storeFiles = getRegionStoreFiles(fs, regionDir);
+
+    // then delete the table so the hfiles get archived
+    UTIL.deleteTable(TABLE_NAME);
+
+    // then get the files in the archive directory.
+    Path archiveDir = HFileArchiveUtil.getArchivePath(UTIL.getConfiguration());
+    List<String> archivedFiles = getAllFileNames(fs, archiveDir);
+    Collections.sort(storeFiles);
+    Collections.sort(archivedFiles);
+
+    LOG.debug("Store files:");
     for (int i = 0; i < storeFiles.size(); i++) {
-      String file = storeFiles.get(i);
-      if (file.contains(HRegion.REGIONINFO_FILE) || file.contains("hlog")) {
-        storeFiles.remove(i--);
-      }
+      LOG.debug(i + " - " + storeFiles.get(i));
+    }
+    LOG.debug("Archive files:");
+    for (int i = 0; i < archivedFiles.size(); i++) {
+      LOG.debug(i + " - " + archivedFiles.get(i));
     }
-    storeFiles.remove(HRegion.REGIONINFO_FILE);
+
+    assertTrue("Archived files are missing some of the store files!",
+      archivedFiles.containsAll(storeFiles));
+  }
+
+  /**
+   * Test that the store files are archived when a column family is removed.
+   * @throws Exception
+   */
+  @Test
+  public void testArchiveOnTableFamilyDelete() throws Exception {
+    List<HRegion> servingRegions = UTIL.getHBaseCluster().getRegions(TABLE_NAME);
+    // make sure we only have 1 region serving this table
+    assertEquals(1, servingRegions.size());
+    HRegion region = servingRegions.get(0);
+
+    // get the parent RS and monitor
+    HRegionServer hrs = UTIL.getRSForFirstRegionInTable(TABLE_NAME);
+    FileSystem fs = hrs.getFileSystem();
+
+    // put some data on the region
+    LOG.debug("-------Loading table");
+    UTIL.loadRegion(region, TEST_FAM);
+
+    // get the hfiles in the region
+    List<HRegion> regions = hrs.getOnlineRegions(TABLE_NAME);
+    assertEquals("More that 1 region for test table.", 1, regions.size());
+
+    region = regions.get(0);
+    // wait for all the compactions to complete
+    region.waitForFlushesAndCompactions();
+
+    // disable table to prevent new updates
+    UTIL.getHBaseAdmin().disableTable(TABLE_NAME);
+    LOG.debug("Disabled table");
+
+    // remove all the files from the archive to get a fair comparison
+    clearArchiveDirectory();
+
+    // then get the current store files
+    Path regionDir = region.getRegionDir();
+    List<String> storeFiles = getRegionStoreFiles(fs, regionDir);
 
     // then delete the table so the hfiles get archived
-    UTIL.deleteTable(TABLE_NAME);
+    UTIL.getHBaseAdmin().deleteColumn(TABLE_NAME, TEST_FAM);
 
     // then get the files in the archive directory.
     Path archiveDir = HFileArchiveUtil.getArchivePath(UTIL.getConfiguration());
@@ -290,4 +342,18 @@ public class TestHFileArchiving {
     }
     return fileNames;
   }
+
+  private List<String> getRegionStoreFiles(final FileSystem fs, final Path regionDir) 
+      throws IOException {
+    List<String> storeFiles = getAllFileNames(fs, regionDir);
+    // remove all the non-storefile named files for the region
+    for (int i = 0; i < storeFiles.size(); i++) {
+      String file = storeFiles.get(i);
+      if (file.contains(HRegion.REGIONINFO_FILE) || file.contains("hlog")) {
+        storeFiles.remove(i--);
+      }
+    }
+    storeFiles.remove(HRegion.REGIONINFO_FILE);
+    return storeFiles;
+  }
 }