You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2009/07/15 07:38:41 UTC

svn commit: r794140 - in /hadoop/hbase/trunk/src: java/org/apache/hadoop/hbase/ java/org/apache/hadoop/hbase/util/ test/data/ test/org/apache/hadoop/hbase/util/ testdata/

Author: stack
Date: Wed Jul 15 05:38:40 2009
New Revision: 794140

URL: http://svn.apache.org/viewvc?rev=794140&view=rev
Log:
HBASE-1215 [migration] 0.19.0 -> 0.20.0 migration (hfile, HCD changes, HSK changes) -- part 1

Added:
    hadoop/hbase/trunk/src/test/data/
    hadoop/hbase/trunk/src/test/data/hbase-0.19-two-small-tables.zip   (with props)
Removed:
    hadoop/hbase/trunk/src/testdata/
Modified:
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HConstants.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/FSUtils.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Migrate.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/MigrationTest.java

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HConstants.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HConstants.java?rev=794140&r1=794139&r2=794140&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HConstants.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HConstants.java Wed Jul 15 05:38:40 2009
@@ -47,8 +47,9 @@
    * Version 4 supports only one kind of bloom filter.
    * Version 5 changes versions in catalog table regions.
    * Version 6 enables blockcaching on catalog tables.
+   * Version 7 introduces hfile -- hbase 0.19 to 0.20..
    */
-  public static final String FILE_SYSTEM_VERSION = "6";
+  public static final String FILE_SYSTEM_VERSION = "7";
   
   // Configuration parameters
   

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/FSUtils.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/FSUtils.java?rev=794140&r1=794139&r2=794140&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/FSUtils.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/FSUtils.java Wed Jul 15 05:38:40 2009
@@ -29,8 +29,10 @@
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
@@ -265,4 +267,35 @@
       HRegion.getRegionDir(rootdir, HRegionInfo.ROOT_REGIONINFO);
     return fs.exists(rootRegionDir);
   }
+
+
+  /**
+   * Runs through the hbase rootdir and checks all stores have only
+   * one file in them -- that is, they've been major compacted.  Looks
+   * at root and meta tables too.
+   * @param fs
+   * @param c
+   * @return True if this hbase install is major compacted.
+   * @throws IOException
+   */
+  public static boolean isMajorCompacted(final FileSystem fs,
+      final HBaseConfiguration c)
+  throws IOException {
+    // Presumes any directory under hbase.rootdir is a table.
+    FileStatus [] directories =
+      fs.listStatus(new Path(c.get(HConstants.HBASE_DIR)), new PathFilter() {
+        public boolean accept(Path p) {
+          boolean isdir = false;
+          try {
+            isdir = fs.getFileStatus(p).isDir();
+          } catch (IOException e) {
+            e.printStackTrace();
+          }
+          return isdir;
+        }
+    });    
+    for (int i = 0; i < directories.length; i++) {
+      
+    }
+  }
 }
\ No newline at end of file

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Migrate.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Migrate.java?rev=794140&r1=794139&r2=794140&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Migrate.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Migrate.java Wed Jul 15 05:38:40 2009
@@ -26,8 +26,10 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
@@ -86,7 +88,7 @@
   private static final float HBASE_0_1_VERSION = 0.1f;
   
   // Filesystem version we can migrate from
-  private static final int PREVIOUS_VERSION = 4;
+  private static final int PREVIOUS_VERSION = 6;
   
   private static final String MIGRATION_LINK = 
     " See http://wiki.apache.org/hadoop/Hbase/HowToMigrate for more information.";
@@ -185,14 +187,14 @@
       if (version == HBASE_0_1_VERSION ||
           Integer.valueOf(versionStr).intValue() < PREVIOUS_VERSION) {
         String msg = "Cannot upgrade from " + versionStr + " to " +
-        HConstants.FILE_SYSTEM_VERSION + " you must install hbase-0.2.x, run " +
+        HConstants.FILE_SYSTEM_VERSION + " you must install an earlier hbase, run " +
         "the upgrade tool, reinstall this version and run this utility again." +
         MIGRATION_LINK;
         System.out.println(msg);
         throw new IOException(msg);
       }
 
-      migrate4To6();
+      migrate6to7();
 
       if (!readOnly) {
         // Set file system version
@@ -209,17 +211,31 @@
     }
   }
   
-  // Move the fileystem version from 4 to 6.
-  // In here we rewrite the catalog table regions so they keep 10 versions
-  // instead of 1.
-  private void migrate4To6() throws IOException {
+  // Move the fileystem version from 6 to 7.
+  private void migrate6to7() throws IOException {
     if (this.readOnly && this.migrationNeeded) {
       return;
     }
+    // Before we start, make sure all is major compacted.
+    if (!allMajorCompacted()) {
+      String msg = "All tables must be major compacted before the migration can begin." +
+        MIGRATION_LINK;
+      System.out.println(msg);
+      throw new IOException(msg);
+    }
     final MetaUtils utils = new MetaUtils(this.conf);
     try {
-      // These two operations are effectively useless.  -ROOT- is hardcode,
-      // at least until hbase 0.20.0 when we store it out in ZK.
+      // Preperation
+      // TODO: Fail if not all major compacted first
+      
+      // TODO: Set the .META. and -ROOT- to flush at 16k?  32k?
+      // TODO: Enable block cache on all tables
+      // TODO: Rewrite MEMCACHE_FLUSHSIZE as MEMSTORE_FLUSHSIZE – name has changed. 
+      // TODO: Remove tableindexer 'index' attribute index from TableDescriptor (See HBASE-1586) 
+      // TODO: TODO: Move of in-memory parameter from table to column family (from HTD to HCD). 
+      // TODO: Purge isInMemory, etc., methods from HTD as part of migration. 
+      // TODO: Clean up old region log files (HBASE-698) 
+      
       updateVersions(utils.getRootRegion().getRegionInfo());
       enableBlockCache(utils.getRootRegion().getRegionInfo());
       // Scan the root region
@@ -235,11 +251,52 @@
           return true;
         }
       });
+      LOG.info("TODO: Note on make sure not using old hbase-default.xml");
+      /*
+       * hbase.master / hbase.master.hostname are obsolete, that's replaced by
+hbase.cluster.distributed. This config must be set to "true" to have a
+fully-distributed cluster and the server lines in zoo.cfg must not
+point to "localhost".
+
+The clients must have a valid zoo.cfg in their classpath since we
+don't provide the master address.
+
+hbase.master.dns.interface and hbase.master.dns.nameserver should be
+set to control the master's address (not mandatory).
+       */
+      LOG.info("TODO: Note on zookeeper config. before starting:");
     } finally {
       utils.shutdown();
     }
   }
 
+  /**
+   * Runs through the hbase rootdir and checks all stores have only
+   * one file in them -- that is, they've been major compacted.  Looks
+   * at root and meta tables too.
+   * @param fs
+   * @param c
+   * @return True if this hbase install is major compacted.
+   * @throws IOException
+   */
+  public static boolean isMajorCompacted(final FileSystem fs,
+      final HBaseConfiguration c)
+  throws IOException {
+    FileStatus [] directories =
+      fs.listStatus(new Path(c.get(HConstants.HBASE_DIR)), new PathFilter() {
+        public boolean accept(Path p) {
+          boolean isdir = false;
+          try {
+            isdir = fs.getFileStatus(p).isDir();
+          } catch (IOException e) {
+            e.printStackTrace();
+          }
+          return isdir;
+        }
+    });    
+    
+  }
+
   /*
    * Enable blockcaching on catalog tables.
    * @param mr

Added: hadoop/hbase/trunk/src/test/data/hbase-0.19-two-small-tables.zip
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/data/hbase-0.19-two-small-tables.zip?rev=794140&view=auto
==============================================================================
Binary file - no diff available.

Propchange: hadoop/hbase/trunk/src/test/data/hbase-0.19-two-small-tables.zip
------------------------------------------------------------------------------
    svn:mime-type = application/octet-stream

Modified: hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/MigrationTest.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/MigrationTest.java?rev=794140&r1=794139&r2=794140&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/MigrationTest.java (original)
+++ hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/util/MigrationTest.java Wed Jul 15 05:38:40 2009
@@ -20,14 +20,13 @@
 
 package org.apache.hadoop.hbase.util;
 
-import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.io.InputStream;
 import java.util.zip.ZipEntry;
 import java.util.zip.ZipInputStream;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -42,30 +41,32 @@
 import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.ResultScanner;
 
 /**
- * Runs migration of filesystem from hbase 0.x to 0.x
+ * Runs migration of filesystem from hbase 0.19 to hbase 0.20.
+ * Not part of general test suite because takes time.
  */
 public class MigrationTest extends HBaseTestCase {
   private static final Log LOG = LogFactory.getLog(MigrationTest.class);
-  
-  // This is the name of the table that is in the data file.
-  private static final String TABLENAME = "TestUpgrade";
-  
-  // The table has two columns
-  private static final byte [][] TABLENAME_COLUMNS =
-    {Bytes.toBytes("column_a:"), Bytes.toBytes("column_b:")};
 
   // Expected count of rows in migrated table.
-  private static final int EXPECTED_COUNT = 17576;
+  private static final int EXPECTED_COUNT = 3;
 
   /**
-   * Test migration. To be used in future migrations
+   * Test migration.
    * @throws IOException 
    */
-  public void testUpgrade() throws IOException {
+  public void testMigration() throws IOException {
+    Path rootdir = getUnitTestdir(getName());
+    FileSystem fs = FileSystem.get(this.conf);
+    Path hbasedir = loadTestData(fs, rootdir);
+    assertTrue(fs.exists(hbasedir));
+    listPaths(fs, hbasedir, -1);
+    Migrate migrator = new Migrate(this.conf);
+    Path qualified = fs.makeQualified(hbasedir);
+    String uri = qualified.toString();
+    this.conf.set("hbase.rootdir", uri);
+    migrator.run(new String [] {"upgrade"});
   }
   
   /*
@@ -74,24 +75,18 @@
    * @param rootDir
    * @throws IOException
    */
-  private void loadTestData(final FileSystem dfs, final Path rootDir)
+  private Path loadTestData(final FileSystem dfs, final Path rootDir)
   throws IOException {
-    FileSystem localfs = FileSystem.getLocal(conf);
-    // Get path for zip file.  If running this test in eclipse, define
-    // the system property src.testdata for your test run.
-    String srcTestdata = System.getProperty("src.testdata");
-    if (srcTestdata == null) {
-      throw new NullPointerException("Define src.test system property");
-    }
-    Path data = new Path(srcTestdata, "HADOOP-2478-testdata-v0.1.zip");
-    if (!localfs.exists(data)) {
-      throw new FileNotFoundException(data.toString());
-    }
-    FSDataInputStream hs = localfs.open(data);
-    ZipInputStream zip = new ZipInputStream(hs);
-    unzip(zip, dfs, rootDir);
-    zip.close();
-    hs.close();
+    String hbasedir = "hbase-0.19-two-small-tables";
+    InputStream is = this.getClass().getClassLoader().
+      getResourceAsStream("data/" + hbasedir + ".zip");
+    ZipInputStream zip = new ZipInputStream(is);
+    try {
+      unzip(zip, dfs, rootDir);
+    } finally {
+      zip.close();
+    }
+    return new Path(rootDir, hbasedir);
   }
 
   /*
@@ -116,6 +111,7 @@
       assertTrue(hb.isMasterRunning());
       HTableDescriptor [] tables = hb.listTables();
       boolean foundTable = false;
+      /*
       for (int i = 0; i < tables.length; i++) {
         if (Bytes.equals(Bytes.toBytes(TABLENAME), tables[i].getName())) {
           foundTable = true;
@@ -148,6 +144,8 @@
       } finally {
         s.close();
       }
+      
+    */
     } finally {
       HConnectionManager.deleteConnectionInfo(conf, false);
       cluster.shutdown();