You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ap...@apache.org on 2009/07/24 07:21:01 UTC

svn commit: r797323 [2/2] - in /hadoop/hbase/trunk_on_hadoop-0.18.3: ./ bin/ src/contrib/transactional/src/java/org/apache/hadoop/hbase/client/tableindexed/ src/contrib/transactional/src/java/org/apache/hadoop/hbase/client/transactional/ src/contrib/tr...

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=797323&r1=797322&r2=797323&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java Fri Jul 24 05:20:59 2009
@@ -1063,6 +1063,10 @@
    */
   public InternalScanner getScanner(Scan scan)
   throws IOException {
+   return getScanner(scan, null);
+  }
+  
+  protected InternalScanner getScanner(Scan scan, List<KeyValueScanner> additionalScanners) throws IOException {
     newScannerLock.readLock().lock();
     try {
       if (this.closed.get()) {
@@ -1078,7 +1082,7 @@
           scan.addFamily(family);
         }
       }
-      return new RegionScanner(scan);
+      return new RegionScanner(scan, additionalScanners);
       
     } finally {
       newScannerLock.readLock().unlock();
@@ -1677,8 +1681,8 @@
   class RegionScanner implements InternalScanner {
     private final KeyValueHeap storeHeap;
     private final byte [] stopRow;
-    
-    RegionScanner(Scan scan) {
+
+    RegionScanner(Scan scan, List<KeyValueScanner> additionalScanners) {
       if (Bytes.equals(scan.getStopRow(), HConstants.EMPTY_END_ROW)) {
         this.stopRow = null;
       } else {
@@ -1686,6 +1690,9 @@
       }
       
       List<KeyValueScanner> scanners = new ArrayList<KeyValueScanner>();
+      if (additionalScanners != null) {
+        scanners.addAll(additionalScanners);
+      }
       for (Map.Entry<byte[], NavigableSet<byte[]>> entry : 
           scan.getFamilyMap().entrySet()) {
         Store store = stores.get(entry.getKey());
@@ -1694,6 +1701,10 @@
       this.storeHeap = 
         new KeyValueHeap(scanners.toArray(new KeyValueScanner[0]), comparator);
     }
+    
+    RegionScanner(Scan scan) {
+      this(scan, null);
+    }
 
     /**
      * Get the next row of results from this region.

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java?rev=797323&r1=797322&r2=797323&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java Fri Jul 24 05:20:59 2009
@@ -276,19 +276,20 @@
   /**
    * Creates all of the state that needs to be reconstructed in case we are
    * doing a restart. This is shared between the constructor and restart().
+   * Both call it.
    * @throws IOException
    */
   private void reinitialize() throws IOException {
-    abortRequested = false;
-    stopRequested.set(false);
-    shutdownHDFS.set(true);
+    this.abortRequested = false;
+    this.stopRequested.set(false);
+    this.shutdownHDFS.set(true);
 
     // Server to handle client requests
     this.server = HBaseRPC.getServer(this, address.getBindAddress(), 
       address.getPort(), conf.getInt("hbase.regionserver.handler.count", 10),
       false, conf);
     this.server.setErrorHandler(this);
-    // Address is givin a default IP for the moment. Will be changed after
+    // Address is giving a default IP for the moment. Will be changed after
     // calling the master.
     this.serverInfo = new HServerInfo(new HServerAddress(
       new InetSocketAddress(address.getBindAddress(),
@@ -298,11 +299,8 @@
       throw new NullPointerException("Server address cannot be null; " +
         "hbase-958 debugging");
     }
-
     reinitializeThreads();
-
     reinitializeZooKeeper();
-
     int nbBlocks = conf.getInt("hbase.regionserver.nbreservationblocks", 4);
     for(int i = 0; i < nbBlocks; i++)  {
       reservedSpace.add(new byte[DEFAULT_SIZE_RESERVATION_BLOCK]);
@@ -383,11 +381,9 @@
 
   private void restart() {
     LOG.info("Restarting Region Server");
-
     shutdownHDFS.set(false);
     abort();
     Threads.shutdown(regionServerThread);
-
     boolean done = false;
     while (!done) {
       try {
@@ -397,7 +393,6 @@
         LOG.debug("Error trying to reinitialize ZooKeeper", e);
       }
     }
-
     Thread t = new Thread(this);
     String name = regionServerThread.getName();
     t.setName(name);
@@ -713,10 +708,14 @@
       }
       // Master may have sent us a new address with the other configs.
       // Update our address in this case. See HBASE-719
-      if(conf.get("hbase.regionserver.address") != null)
-        serverInfo.setServerAddress(new HServerAddress
-            (conf.get("hbase.regionserver.address"), 
-            serverInfo.getServerAddress().getPort()));
+      String hra = conf.get("hbase.regionserver.address");
+      if (address != null) {
+        HServerAddress hsa = new HServerAddress (hra,
+          this.serverInfo.getServerAddress().getPort());
+        LOG.info("Master passed us address to use. Was=" +
+          this.serverInfo.getServerAddress() + ", Now=" + hra);
+        this.serverInfo.setServerAddress(hsa);
+      }
       // Master sent us hbase.rootdir to use. Should be fully qualified
       // path with file system specification included.  Set 'fs.default.name'
       // to match the filesystem on hbase.rootdir else underlying hadoop hdfs
@@ -1039,7 +1038,7 @@
   private HLog setupHLog() throws RegionServerRunningException,
     IOException {
     
-    Path logdir = new Path(rootDir, HLog.getHLogDirectoryName(serverInfo));
+    Path logdir = new Path(rootDir, HLog.getHLogDirectoryName(this.serverInfo));
     if (LOG.isDebugEnabled()) {
       LOG.debug("Log dir " + logdir);
     }
@@ -1180,7 +1179,7 @@
           LOG.info("Failed binding http info server to port: " + port);
           port++;
           // update HRS server info
-          serverInfo.setInfoPort(port);
+          this.serverInfo.setInfoPort(port);
         }
       } 
     }
@@ -1200,7 +1199,7 @@
     // a thread.
     this.server.start();
     LOG.info("HRegionServer started at: " +
-        serverInfo.getServerAddress().toString());
+      this.serverInfo.getServerAddress().toString());
   }
 
   /*
@@ -1347,12 +1346,12 @@
         boolean startCodeOk = false; 
         while(!startCodeOk) {
           serverInfo.setStartCode(System.currentTimeMillis());
-          startCodeOk = zooKeeperWrapper.writeRSLocation(serverInfo);
+          startCodeOk = zooKeeperWrapper.writeRSLocation(this.serverInfo);
           if(!startCodeOk) {
            LOG.debug("Start code already taken, trying another one");
           }
         }
-        result = this.hbaseMaster.regionServerStartup(serverInfo);
+        result = this.hbaseMaster.regionServerStartup(this.serverInfo);
         break;
       } catch (Leases.LeaseStillHeldException e) {
         LOG.info("Lease " + e.getName() + " already held on master. Check " +
@@ -1894,8 +1893,8 @@
       try {
         checkOpen();
       } catch (IOException e) {
-        // If checkOpen failed, cancel this lease; filesystem is gone or we're
-        // closing or something.
+        // If checkOpen failed, server not running or filesystem gone, 
+        // cancel this lease; filesystem is gone or we're closing or something.
         this.leases.cancelLease(scannerName);
         throw e;
       }
@@ -1928,11 +1927,10 @@
       synchronized(scanners) {
         s = scanners.remove(scannerName);
       }
-      if(s == null) {
-        throw new UnknownScannerException(scannerName);
+      if (s != null) {
+        s.close();
+        this.leases.cancelLease(scannerName);
       }
-      s.close();
-      this.leases.cancelLease(scannerName);
     } catch (Throwable t) {
       throw convertThrowableToIOE(cleanup(t));
     }
@@ -2040,10 +2038,10 @@
     }
     String lockName = String.valueOf(lockId);
     Integer rl = null;
-    synchronized(rowlocks) {
+    synchronized (rowlocks) {
       rl = rowlocks.get(lockName);
     }
-    if(rl == null) {
+    if (rl == null) {
       throw new IOException("Invalid row lock");
     }
     this.leases.renewLease(lockName);
@@ -2111,13 +2109,6 @@
     }
   }
 
-  /**
-   * @return Info on this server.
-   */
-  public HServerInfo getServerInfo() {
-    return this.serverInfo;
-  }
-
   /** @return the info server */
   public InfoServer getInfoServer() {
     return infoServer;
@@ -2467,8 +2458,7 @@
   public HServerInfo getHServerInfo() throws IOException {
     return serverInfo;
   }
-  
-  
+
   /**
    * @param args
    */

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java?rev=797323&r1=797322&r2=797323&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/regionserver/StoreFile.java Fri Jul 24 05:20:59 2009
@@ -463,7 +463,7 @@
    * @param mc True if this file is product of a major compaction
    * @throws IOException
    */
-  static void appendMetadata(final HFile.Writer w, final long maxSequenceId,
+  public static void appendMetadata(final HFile.Writer w, final long maxSequenceId,
     final boolean mc)
   throws IOException {
     w.appendFileInfo(MAX_SEQ_ID_KEY, Bytes.toBytes(maxSequenceId));

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/util/FSUtils.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/util/FSUtils.java?rev=797323&r1=797322&r2=797323&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/util/FSUtils.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/util/FSUtils.java Fri Jul 24 05:20:59 2009
@@ -193,11 +193,24 @@
    */
   public static void setVersion(FileSystem fs, Path rootdir) 
   throws IOException {
+    setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION);
+  }
+
+  /**
+   * Sets version of file system
+   * 
+   * @param fs
+   * @param rootdir
+   * @param version
+   * @throws IOException
+   */
+  public static void setVersion(FileSystem fs, Path rootdir, String version) 
+  throws IOException {
     FSDataOutputStream s =
       fs.create(new Path(rootdir, HConstants.VERSION_FILE_NAME));
-    s.writeUTF(HConstants.FILE_SYSTEM_VERSION);
+    s.writeUTF(version);
     s.close();
-    LOG.debug("Created version file to: " + rootdir.toString());
+    LOG.debug("Created version file at " + rootdir.toString() + " set its version at:" + version);
   }
 
   /**

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/util/MetaUtils.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/util/MetaUtils.java?rev=797323&r1=797322&r2=797323&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/util/MetaUtils.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/util/MetaUtils.java Fri Jul 24 05:20:59 2009
@@ -42,8 +42,6 @@
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.io.BatchUpdate;
-import org.apache.hadoop.hbase.io.Cell;
 import org.apache.hadoop.hbase.regionserver.HLog;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
@@ -124,6 +122,7 @@
     HRegion meta = metaRegions.get(metaInfo.getRegionName());
     if (meta == null) {
       meta = openMetaRegion(metaInfo);
+      LOG.info("OPENING META " + meta.toString());
       this.metaRegions.put(metaInfo.getRegionName(), meta);
     }
     return meta;
@@ -146,6 +145,7 @@
     }
     try {
       for (HRegion r: metaRegions.values()) {
+        LOG.info("CLOSING META " + r.toString());
         r.close();
       }
     } catch (IOException e) {
@@ -192,24 +192,33 @@
     if (this.rootRegion == null) {
       openRootRegion();
     }
+    scanMetaRegion(this.rootRegion, listener);
+  }
 
+  /**
+   * Scan the passed in metaregion <code>m</code> invoking the passed
+   * <code>listener</code> per row found.
+   * @param r
+   * @param listener
+   * @throws IOException
+   */
+  public void scanMetaRegion(final HRegion r, final ScannerListener listener)
+  throws IOException {
     Scan scan = new Scan();
     scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
-    InternalScanner rootScanner = 
-      rootRegion.getScanner(scan);
-
+    InternalScanner s = r.getScanner(scan);
     try {
       List<KeyValue> results = new ArrayList<KeyValue>();
       boolean hasNext = true;
       do {
-        hasNext = rootScanner.next(results);
+        hasNext = s.next(results);
         HRegionInfo info = null;
         for (KeyValue kv: results) {
           info = Writables.getHRegionInfoOrNull(kv.getValue());
           if (info == null) {
-            LOG.warn("region info is null for row " +
-                Bytes.toString(kv.getRow()) + " in table " +
-                HConstants.ROOT_TABLE_NAME);
+            LOG.warn("Region info is null for row " +
+              Bytes.toString(kv.getRow()) + " in table " +
+              r.getTableDesc().getNameAsString());
           }
           continue;
         }
@@ -219,7 +228,7 @@
         results.clear();
       } while (hasNext);
     } finally {
-      rootScanner.close();
+      r.close();
     }
   }
 
@@ -229,6 +238,9 @@
    * TODO: Use Visitor rather than Listener pattern.  Allow multiple Visitors.
    * Use this everywhere we scan meta regions: e.g. in metascanners, in close
    * handling, etc.  Have it pass in the whole row, not just HRegionInfo.
+   * <p>Use for reading meta only.  Does not close region when done.
+   * Use {@link #getMetaRegion(HRegionInfo)} instead if writing.  Adds
+   * meta region to list that will get a close on {@link #shutdown()}.
    * 
    * @param metaRegionInfo HRegionInfo for meta region
    * @param listener method to be called for each meta region found
@@ -242,47 +254,6 @@
     scanMetaRegion(metaRegion, listener);
   }
 
-  /**
-   * Scan the passed in metaregion <code>m</code> invoking the passed
-   * <code>listener</code> per row found.
-   * @param m
-   * @param listener
-   * @throws IOException
-   */
-  public void scanMetaRegion(final HRegion m, final ScannerListener listener)
-  throws IOException {
-    
-    Scan scan = new Scan();
-    scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
-    InternalScanner metaScanner = 
-      m.getScanner(scan);
-    
-    try {
-      List<KeyValue> results = new ArrayList<KeyValue>();
-      while (metaScanner.next(results)) {
-        HRegionInfo info = null;
-        for (KeyValue kv: results) {
-          if(kv.matchingColumn(HConstants.CATALOG_FAMILY,
-              HConstants.REGIONINFO_QUALIFIER)) {
-            info = Writables.getHRegionInfoOrNull(kv.getValue());
-            if (info == null) {
-              LOG.warn("region info is null for row " +
-                Bytes.toString(kv.getRow()) +
-                " in table " + HConstants.META_TABLE_NAME);
-            }
-            break;
-          }
-        }
-        if (!listener.processRow(info)) {
-          break;
-        }
-        results.clear();
-      }
-    } finally {
-      metaScanner.close();
-    }
-  }
-
   private synchronized HRegion openRootRegion() throws IOException {
     if (this.rootRegion != null) {
       return this.rootRegion;
@@ -506,4 +477,4 @@
     return Bytes.equals(n, HConstants.ROOT_TABLE_NAME) ||
       Bytes.equals(n, HConstants.META_TABLE_NAME);
   }
-}
\ No newline at end of file
+}

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/util/Migrate.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/util/Migrate.java?rev=797323&r1=797322&r2=797323&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/util/Migrate.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/org/apache/hadoop/hbase/util/Migrate.java Fri Jul 24 05:20:59 2009
@@ -20,8 +20,9 @@
 
 package org.apache.hadoop.hbase.util;
 
-import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
 
 import org.apache.commons.cli.Options;
 import org.apache.commons.logging.Log;
@@ -30,14 +31,24 @@
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HStoreKey;
+import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.MasterNotRunningException;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.io.hfile.Compression;
+import org.apache.hadoop.hbase.io.hfile.HFile;
+import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm;
+import org.apache.hadoop.hbase.migration.nineteen.io.BloomFilterMapFile;
+import org.apache.hadoop.hbase.migration.nineteen.regionserver.HStoreFile;
 import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.util.FSUtils.DirFilter;
 import org.apache.hadoop.util.GenericOptionsParser;
 import org.apache.hadoop.util.Tool;
@@ -76,13 +87,9 @@
  */
 public class Migrate extends Configured implements Tool {
   private static final Log LOG = LogFactory.getLog(Migrate.class);
-  private final HBaseConfiguration conf;
   private FileSystem fs;
-  
-  // Gets set by migration methods if we are in readOnly mode.
   boolean migrationNeeded = false;
-
-  boolean readOnly = false;
+  boolean check = false;
 
   // Filesystem version of hbase 0.1.x.
   private static final float HBASE_0_1_VERSION = 0.1f;
@@ -93,26 +100,27 @@
   private static final String MIGRATION_LINK = 
     " See http://wiki.apache.org/hadoop/Hbase/HowToMigrate for more information.";
 
-  /** default constructor */
+  /**
+   * Default constructor.
+   */
   public Migrate() {
-    this(new HBaseConfiguration());
+    super();
   }
-  
+
   /**
-   * @param conf
+   * @param c
    */
-  public Migrate(HBaseConfiguration conf) {
-    super(conf);
-    this.conf = conf;
+  public Migrate(final HBaseConfiguration c) {
+    super(c);
   }
-  
+
   /*
    * Sets the hbase rootdir as fs.default.name.
    * @return True if succeeded.
    */
   private boolean setFsDefaultName() {
     // Validate root directory path
-    Path rd = new Path(conf.get(HConstants.HBASE_DIR));
+    Path rd = new Path(getConf().get(HConstants.HBASE_DIR));
     try {
       // Validate root directory path
       FSUtils.validateRootPath(rd);
@@ -122,7 +130,7 @@
           " configuration parameter '" + HConstants.HBASE_DIR + "'", e);
       return false;
     }
-    this.conf.set("fs.default.name", rd.toString());
+    getConf().set("fs.default.name", rd.toString());
     return true;
   }
 
@@ -132,7 +140,7 @@
   private boolean verifyFilesystem() {
     try {
       // Verify file system is up.
-      fs = FileSystem.get(conf);                        // get DFS handle
+      fs = FileSystem.get(getConf());                        // get DFS handle
       LOG.info("Verifying that file system is available..");
       FSUtils.checkFileSystemAvailable(fs);
       return true;
@@ -147,7 +155,7 @@
     LOG.info("Verifying that HBase is not running...." +
           "Trys ten times  to connect to running master");
     try {
-      HBaseAdmin.checkHBaseAvailable(conf);
+      HBaseAdmin.checkHBaseAvailable((HBaseConfiguration)getConf());
       LOG.fatal("HBase cluster must be off-line.");
       return false;
     } catch (MasterNotRunningException e) {
@@ -165,15 +173,13 @@
     if (!verifyFilesystem()) {
       return -3;
     }
-    if (!notRunning()) {
-      return -4;
-    }
 
     try {
-      LOG.info("Starting upgrade" + (readOnly ? " check" : ""));
+      LOG.info("Starting upgrade" + (check ? " check" : ""));
 
       // See if there is a file system version file
-      String versionStr = FSUtils.getVersion(fs, FSUtils.getRootDir(this.conf));
+      String versionStr = FSUtils.getVersion(fs,
+        FSUtils.getRootDir((HBaseConfiguration)getConf()));
       if (versionStr == null) {
         throw new IOException("File system version file " +
             HConstants.VERSION_FILE_NAME +
@@ -193,31 +199,30 @@
         System.out.println(msg);
         throw new IOException(msg);
       }
-
+      this.migrationNeeded = true;
       migrate6to7();
-
-      if (!readOnly) {
+      if (!check) {
         // Set file system version
         LOG.info("Setting file system version.");
-        FSUtils.setVersion(fs, FSUtils.getRootDir(this.conf));
+        FSUtils.setVersion(fs, FSUtils.getRootDir((HBaseConfiguration)getConf()));
         LOG.info("Upgrade successful.");
       } else if (this.migrationNeeded) {
         LOG.info("Upgrade needed.");
       }
       return 0;
     } catch (Exception e) {
-      LOG.fatal("Upgrade" +  (readOnly ? " check" : "") + " failed", e);
+      LOG.fatal("Upgrade" +  (check ? " check" : "") + " failed", e);
       return -1;
     }
   }
   
   // Move the fileystem version from 6 to 7.
   private void migrate6to7() throws IOException {
-    if (this.readOnly && this.migrationNeeded) {
+    if (this.check && this.migrationNeeded) {
       return;
     }
     // Before we start, make sure all is major compacted.
-    Path hbaseRootDir = new Path(conf.get(HConstants.HBASE_DIR));
+    Path hbaseRootDir = new Path(getConf().get(HConstants.HBASE_DIR));
     boolean pre020 = FSUtils.isPre020FileLayout(fs, hbaseRootDir);
     if (pre020) {
       LOG.info("Checking pre020 filesystem is major compacted");
@@ -241,51 +246,66 @@
         throw new IOException(msg);
       }
     }
-    // TOOD: Verify all has been brought over from old to new layout.
-    final MetaUtils utils = new MetaUtils(this.conf);
+    final MetaUtils utils = new MetaUtils((HBaseConfiguration)getConf());
+    final List<HRegionInfo> metas = new ArrayList<HRegionInfo>();
     try {
-      // TODO: Set the .META. and -ROOT- to flush at 16k?  32k?
-      // TODO: Enable block cache on all tables
-      // TODO: Rewrite MEMCACHE_FLUSHSIZE as MEMSTORE_FLUSHSIZE – name has changed. 
-      // TODO: Remove tableindexer 'index' attribute index from TableDescriptor (See HBASE-1586) 
-      // TODO: TODO: Move of in-memory parameter from table to column family (from HTD to HCD). 
-      // TODO: Purge isInMemory, etc., methods from HTD as part of migration. 
-      // TODO: Clean up old region log files (HBASE-698) 
-      
-      updateVersions(utils.getRootRegion().getRegionInfo());
-      enableBlockCache(utils.getRootRegion().getRegionInfo());
-      // Scan the root region
+      // Rewrite root.
+      rewriteHRegionInfo(utils.getRootRegion().getRegionInfo());
+      // Scan the root region to rewrite metas.
       utils.scanRootRegion(new MetaUtils.ScannerListener() {
         public boolean processRow(HRegionInfo info)
         throws IOException {
-          if (readOnly && !migrationNeeded) {
+          if (check && !migrationNeeded) {
             migrationNeeded = true;
             return false;
           }
-          updateVersions(utils.getRootRegion(), info);
-          enableBlockCache(utils.getRootRegion(), info);
+          metas.add(info);
+          rewriteHRegionInfo(utils.getRootRegion(), info);
           return true;
         }
       });
-      LOG.info("TODO: Note on make sure not using old hbase-default.xml");
-      /*
-       * hbase.master / hbase.master.hostname are obsolete, that's replaced by
-hbase.cluster.distributed. This config must be set to "true" to have a
-fully-distributed cluster and the server lines in zoo.cfg must not
-point to "localhost".
-
-The clients must have a valid zoo.cfg in their classpath since we
-don't provide the master address.
-
-hbase.master.dns.interface and hbase.master.dns.nameserver should be
-set to control the master's address (not mandatory).
-       */
-      LOG.info("TODO: Note on zookeeper config. before starting:");
+      // Scan meta to rewrite table stuff.
+      for (HRegionInfo hri: metas) {
+        final HRegion h = utils.getMetaRegion(hri);
+        utils.scanMetaRegion(h, new MetaUtils.ScannerListener() {
+          public boolean processRow(HRegionInfo info) throws IOException {
+            if (check && !migrationNeeded) {
+              migrationNeeded = true;
+              return false;
+            }
+            rewriteHRegionInfo(h, info);
+            return true;
+          }
+        });
+      }
+      cleanOldLogFiles(hbaseRootDir);
     } finally {
       utils.shutdown();
     }
   }
-  
+
+  /*
+   * Remove old log files.
+   * @param fs
+   * @param hbaseRootDir
+   * @throws IOException
+   */
+  private void cleanOldLogFiles(final Path hbaseRootDir)
+  throws IOException {
+    FileStatus [] oldlogfiles = fs.listStatus(hbaseRootDir, new PathFilter () {
+      public boolean accept(Path p) {
+        return p.getName().startsWith("log_");
+      }
+    });
+    // Return if nothing to do.
+    if (oldlogfiles.length <= 0) return;
+    LOG.info("Removing " + oldlogfiles.length + " old logs file clutter");
+    for (int i = 0; i < oldlogfiles.length; i++) {
+      fs.delete(oldlogfiles[i].getPath(), true);
+      LOG.info("Deleted: " + oldlogfiles[i].getPath());
+    }
+  }
+
   /*
    * Rewrite all under hbase root dir.
    * Presumes that {@link FSUtils#isMajorCompactedPre020(FileSystem, Path)}
@@ -316,30 +336,84 @@
           if (mfs.length > 1) {
             throw new IOException("Should only be one directory in: " + mfdir);
           }
-          Path mf = mfs[0].getPath();
-          Path infofile = new Path(new Path(family, "info"), mf.getName());
-          rewrite(this.fs, mf, infofile);
+          if (mfs.length == 0) {
+            // Special case.  Empty region.  Remove the mapfiles and info dirs.
+            Path infodir = new Path(family, "info");
+            LOG.info("Removing " + mfdir + " and " + infodir + " because empty");
+            fs.delete(mfdir, true);
+            fs.delete(infodir, true);
+          } else {
+            rewrite((HBaseConfiguration)getConf(), this.fs, mfs[0].getPath());
+          }
         }
       }
     }
   }
-  
+
   /**
-   * Rewrite the passed mapfile
-   * @param mapfiledir
-   * @param infofile
+   * Rewrite the passed 0.19 mapfile as a 0.20 file.
+   * @param fs
+   * @param mf
    * @throws IOExcepion
    */
-  public static void rewrite (final FileSystem fs, final Path mapfiledir,
-      final Path infofile)
+  public static void rewrite (final HBaseConfiguration conf, final FileSystem fs,
+    final Path mf)
   throws IOException {
-    if (!fs.exists(mapfiledir)) {
-      throw new FileNotFoundException(mapfiledir.toString());
-    }
-    if (!fs.exists(infofile)) {
-      throw new FileNotFoundException(infofile.toString());
+    Path familydir = mf.getParent().getParent();
+    Path regiondir = familydir.getParent();
+    Path basedir = regiondir.getParent();
+    if (HStoreFile.isReference(mf)) {
+      throw new IOException(mf.toString() + " is Reference");
+    }
+    HStoreFile hsf = new HStoreFile(conf, fs, basedir,
+      Integer.parseInt(regiondir.getName()),
+      Bytes.toBytes(familydir.getName()), Long.parseLong(mf.getName()), null);
+    BloomFilterMapFile.Reader src = hsf.getReader(fs, false, false);
+    HFile.Writer tgt = StoreFile.getWriter(fs, familydir,
+      conf.getInt("hfile.min.blocksize.size", 64*1024),
+      Compression.Algorithm.NONE, getComparator(basedir));
+    // From old 0.19 HLogEdit.
+    ImmutableBytesWritable deleteBytes =
+      new ImmutableBytesWritable("HBASE::DELETEVAL".getBytes("UTF-8"));
+    try {
+      while (true) {
+        HStoreKey key = new HStoreKey();
+        ImmutableBytesWritable value = new ImmutableBytesWritable();
+        if (!src.next(key, value)) {
+          break;
+        }
+        byte [][] parts = KeyValue.parseColumn(key.getColumn());
+        KeyValue kv = deleteBytes.equals(value)?
+            new KeyValue(key.getRow(), parts[0], parts[1], 
+                key.getTimestamp(), KeyValue.Type.Delete):
+              new KeyValue(key.getRow(), parts[0], parts[1], 
+                key.getTimestamp(), value.get());
+         tgt.append(kv);
+      }
+      long seqid = hsf.loadInfo(fs);
+      StoreFile.appendMetadata(tgt, seqid, 
+          hsf.isMajorCompaction());
+      // Success, delete src.
+      src.close();
+      tgt.close();
+      hsf.delete();
+      // If we rewrote src, delete mapfiles and info dir.
+      fs.delete(mf.getParent(), true);
+      fs.delete(new Path(familydir, "info"), true);
+      LOG.info("Rewrote " + mf.toString() + " as " + tgt.toString());
+    } catch (IOException e) {
+      // If error, delete tgt.
+      src.close();
+      tgt.close();
+      fs.delete(tgt.getPath(), true);
     }
-    
+  }
+
+  private static KeyValue.KeyComparator getComparator(final Path tabledir) {
+    String tablename = tabledir.getName();
+    return tablename.equals("-ROOT-")? KeyValue.META_KEY_COMPARATOR:
+      tablename.equals(".META.")? KeyValue.META_KEY_COMPARATOR:
+        KeyValue.KEY_COMPARATOR;
   }
 
   /*
@@ -347,33 +421,41 @@
    * @param mr
    * @param oldHri
    */
-  void enableBlockCache(HRegion mr, HRegionInfo oldHri)
+  void rewriteHRegionInfo(HRegion mr, HRegionInfo oldHri)
   throws IOException {
-    if (!enableBlockCache(oldHri)) {
+    if (!rewriteHRegionInfo(oldHri)) {
       return;
     }
     Put put = new Put(oldHri.getRegionName());
     put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, 
         Writables.getBytes(oldHri));
     mr.put(put);
-    LOG.info("Enabled blockcache on " + oldHri.getRegionNameAsString());
   }
 
   /*
    * @param hri Update versions.
    * @param true if we changed value
    */
-  private boolean enableBlockCache(final HRegionInfo hri) {
+  private boolean rewriteHRegionInfo(final HRegionInfo hri) {
     boolean result = false;
-    HColumnDescriptor hcd =
-      hri.getTableDesc().getFamily(HConstants.CATALOG_FAMILY);
-    if (hcd == null) {
-      LOG.info("No info family in: " + hri.getRegionNameAsString());
-      return result;
+    // Set flush size at 32k if a catalog table.
+    int catalogMemStoreFlushSize = 32 * 1024;
+    if (hri.isMetaRegion() &&
+        hri.getTableDesc().getMemStoreFlushSize() != catalogMemStoreFlushSize) {
+      hri.getTableDesc().setMemStoreFlushSize(catalogMemStoreFlushSize);
+      result = true;
     }
-    // Set blockcache enabled.
-    hcd.setBlockCacheEnabled(true);
-    return true;
+    // Remove the old MEMCACHE_FLUSHSIZE if present
+    hri.getTableDesc().remove(Bytes.toBytes("MEMCACHE_FLUSHSIZE"));
+    for (HColumnDescriptor hcd: hri.getTableDesc().getFamilies()) {
+      // Set block cache on all tables.
+      hcd.setBlockCacheEnabled(true);
+      // Set compression to none.  Previous was 'none'.  Needs to be upper-case.
+      // Any other compression we are turning off.  Have user enable it.
+      hcd.setCompressionType(Algorithm.NONE);
+      result = true;
+    }
+    return result;
   }
 
 
@@ -431,7 +513,7 @@
       return -1;
     }
     if (remainingArgs[0].compareTo("check") == 0) {
-      this.readOnly = true;
+      this.check = true;
     } else if (remainingArgs[0].compareTo("upgrade") != 0) {
       usage();
       return -1;
@@ -459,11 +541,11 @@
   public static void main(String[] args) {
     int status = 0;
     try {
-      status = ToolRunner.run(new Migrate(), args);
+      status = ToolRunner.run(new HBaseConfiguration(), new Migrate(), args);
     } catch (Exception e) {
       LOG.error(e);
       status = -1;
     }
     System.exit(status);
   }
-}
\ No newline at end of file
+}

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/overview.html
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/overview.html?rev=797323&r1=797322&r2=797323&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/overview.html (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/java/overview.html Fri Jul 24 05:20:59 2009
@@ -24,6 +24,16 @@
 <body bgcolor="white">
 <a href="http://hbase.org">HBase</a> is a scalable, distributed database built on <a href="http://hadoop.apache.org/core">Hadoop Core</a>.
 
+<h2>Table of Contents</h2>
+<ul>
+<li><a href="#requirements">Requirements</a></li>
+<li><a href="#getting_started" >Getting Started</a></li>
+<li><a href="#runandconfirm">Running and Confirming Your Installation</a></li>
+<li><a href="#upgrading" >Upgrading</a></li>
+<li><a href="#client_example">Example API Usage</a></li>
+<li><a href="#related" >Related Documentation</a></li>
+</ul>
+
 <h2><a name="requirements">Requirements</a></h2>
 <ul>
   <li>Java 1.6.x, preferably from <a href="http://www.java.com/en/download/">Sun</a>.
@@ -37,11 +47,11 @@
   </li>
       <li>HBase depends on <a href="http://hadoop.apache.org/zookeeper/">ZooKeeper</a> as of release 0.20.0.
       Clients and Servers now must know where their ZooKeeper Quorum locations before
-      they can do anything else.
-      In basic standalone and pseudo-distributed modes, HBase manages a ZooKeeper instance
-      for you but it is required that you run a ZooKeeper Quorum when running HBase
-      fully distributed (More on this below). The Zookeeper addition changes
-      how some core HBase configuration is done.
+      they can do anything else (Usually they pick up this information from configuration
+      supplied on their CLASSPATH).  By default, HBase will manage a single ZooKeeper instance for you.
+      In basic standalone and pseudo-distributed modes this is usually enough, but for fully
+      distributed mode you should configure a ZooKeeper quorum (more info below).
+      In addition ZooKeeper changes how some core HBase configuration is done.
       </li>
   <li>Hosts must be able to resolve the fully-qualified domain name of the master.</li>
   <li>HBase currently is a file handle hog.  The usual default of
@@ -102,20 +112,20 @@
 set the heapsize for HBase, etc.  At a minimum, set <code>JAVA_HOME</code> to point at the root of
 your Java installation.
 </p>
-<h2><a name="standalone">Standalone Mode</a></h2>
+<h3><a name="standalone">Standalone Mode</a></h3>
 <p>
 If you are running a standalone operation, there should be nothing further to configure; proceed to
 <a href=#runandconfirm>Running and Confirming Your Installation</a>.  If you are running a distributed 
 operation, continue reading.
 </p>
 
-<h2><a name="distributed">Distributed Operation: Pseudo- and Fully-Distributed Modes</a></h2>
+<h3><a name="distributed">Distributed Operation: Pseudo- and Fully-Distributed Modes</a></h3>
 <p>Distributed mode requires an instance of the Hadoop Distributed File System (DFS).
 See the Hadoop <a href="http://lucene.apache.org/hadoop/api/overview-summary.html#overview_description">
 requirements and instructions</a> for how to set up a DFS.
 </p>
 
-<h3><a name="pseudo-distrib">Pseudo-Distributed Operation</a></h3>
+<h4><a name="pseudo-distrib">Pseudo-Distributed Operation</a></h4>
 <p>A pseudo-distributed operation is simply a distributed operation run on a single host.  
 Once you have confirmed your DFS setup, configuring HBase for use on one host requires modification of 
 <code>${HBASE_HOME}/conf/hbase-site.xml</code>, which needs to be pointed at the running Hadoop DFS instance.  
@@ -330,7 +340,7 @@
 </p>
 
 <h2><a name="client_example">Example API Usage</a></h2>
-For sample Java code, see <a href="org/apache/hadoop/hbase/client/package-summary.html#client_example">org.apache.hadoop.hbase.client</a> documentation.
+For sample Java code, see <a href="org/apache/hadoop/hbase/client/package-summary.html#package_description">org.apache.hadoop.hbase.client</a> documentation.
 
 <p>If your client is NOT Java, consider the Thrift or REST libraries.</p>
 
@@ -339,6 +349,11 @@
   <li><a href="http://hbase.org">HBase Home Page</a>
   <li><a href="http://wiki.apache.org/hadoop/Hbase">HBase Wiki</a>
   <li><a href="http://hadoop.apache.org/">Hadoop Home Page</a>
+  <li><a href="http://wiki.apache.org/hadoop/Hbase/MultipleMasters">Setting up Multiple HBase Masters</a>
+  <li><a href="http://wiki.apache.org/hadoop/Hbase/RollingRestart">Rolling Upgrades</a>
+  <li><a href="org/apache/hadoop/hbase/client/transactional/package-summary.html#package_description">Transactional HBase</a>
+  <li><a href="org/apache/hadoop/hbase/client/tableindexed/package-summary.html">Table Indexed HBase</a>
+  <li><a href="org/apache/hadoop/hbase/stargate/package-summary.html#package_description">Stargate</a> -- a RESTful Web service front end for HBase.
 </ul>
 
 </body>

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java?rev=797323&r1=797322&r2=797323&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java Fri Jul 24 05:20:59 2009
@@ -112,7 +112,11 @@
    */
   public void abortRegionServer(int serverNumber) {
     HRegionServer server = getRegionServer(serverNumber);
-    LOG.info("Aborting " + server.getServerInfo().toString());
+    try {
+      LOG.info("Aborting " + server.getHServerInfo().toString());
+    } catch (IOException e) {
+      e.printStackTrace();
+    }
     server.abort();
   }
 

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/util/TestBytes.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/util/TestBytes.java?rev=797323&r1=797322&r2=797323&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/util/TestBytes.java (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/test/org/apache/hadoop/hbase/util/TestBytes.java Fri Jul 24 05:20:59 2009
@@ -25,6 +25,17 @@
 import junit.framework.TestCase;
 
 public class TestBytes extends TestCase {
+  public void testNullHashCode() {
+    byte [] b = null;
+    Exception ee = null;
+    try {
+      Bytes.hashCode(b);
+    } catch (Exception e) {
+      ee = e;
+    }
+    assertNotNull(ee);
+  }
+
   public void testSplit() throws Exception {
     byte [] lowest = Bytes.toBytes("AAA");
     byte [] middle = Bytes.toBytes("CCC");

Modified: hadoop/hbase/trunk_on_hadoop-0.18.3/src/webapps/regionserver/regionserver.jsp
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk_on_hadoop-0.18.3/src/webapps/regionserver/regionserver.jsp?rev=797323&r1=797322&r2=797323&view=diff
==============================================================================
--- hadoop/hbase/trunk_on_hadoop-0.18.3/src/webapps/regionserver/regionserver.jsp (original)
+++ hadoop/hbase/trunk_on_hadoop-0.18.3/src/webapps/regionserver/regionserver.jsp Fri Jul 24 05:20:59 2009
@@ -1,5 +1,6 @@
 <%@ page contentType="text/html;charset=UTF-8"
   import="java.util.*"
+  import="java.io.IOException"
   import="org.apache.hadoop.io.Text"
   import="org.apache.hadoop.hbase.regionserver.HRegionServer"
   import="org.apache.hadoop.hbase.regionserver.HRegion"
@@ -10,7 +11,12 @@
   import="org.apache.hadoop.hbase.HServerLoad"
   import="org.apache.hadoop.hbase.HRegionInfo" %><%
   HRegionServer regionServer = (HRegionServer)getServletContext().getAttribute(HRegionServer.REGIONSERVER);
-  HServerInfo serverInfo = regionServer.getServerInfo();
+  HServerInfo serverInfo = null;
+  try {
+    serverInfo = regionServer.getHServerInfo();
+  } catch (IOException e) {
+    e.printStackTrace();
+  }
   RegionServerMetrics metrics = regionServer.getMetrics();
   Collection<HRegionInfo> onlineRegions = regionServer.getSortedOnlineRegionInfos();
   int interval = regionServer.getConfiguration().getInt("hbase.regionserver.msginterval", 3000)/1000;