You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ji...@apache.org on 2007/06/04 19:14:13 UTC

svn commit: r544188 [2/3] - in /lucene/hadoop/trunk/src/contrib/hbase: ./ conf/ src/java/org/apache/hadoop/hbase/ src/test/org/apache/hadoop/hbase/

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java?view=diff&rev=544188&r1=544187&r2=544188
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java Mon Jun  4 10:14:10 2007
@@ -62,7 +62,7 @@
   static int MIN_COMMITS_FOR_COMPACTION = 10;
   static Random rand = new Random();
 
-  private static final Log LOG = LogFactory.getLog(HRegion.class);
+  static final Log LOG = LogFactory.getLog(HRegion.class);
 
   /**
    * Deletes all the files for a HRegion
@@ -74,6 +74,7 @@
    */
   public static void deleteRegion(FileSystem fs, Path baseDirectory,
       Text regionName) throws IOException {
+    LOG.debug("Deleting region " + regionName);
     fs.delete(HStoreFile.getHRegionDir(baseDirectory, regionName));
   }
   
@@ -134,14 +135,10 @@
         + (endKey == null ? "" : endKey) + "'");
     
     // Flush each of the sources, and merge their files into a single 
-    // target for each column family.
-
-    if(LOG.isDebugEnabled()) {
-      LOG.debug("flushing and getting file names for region " + srcA.getRegionName());
-    }
-    
+    // target for each column family.    
     TreeSet<HStoreFile> alreadyMerged = new TreeSet<HStoreFile>();
-    TreeMap<Text, Vector<HStoreFile>> filesToMerge = new TreeMap<Text, Vector<HStoreFile>>();
+    TreeMap<Text, Vector<HStoreFile>> filesToMerge =
+      new TreeMap<Text, Vector<HStoreFile>>();
     for(HStoreFile src: srcA.flushcache(true)) {
       Vector<HStoreFile> v = filesToMerge.get(src.getColFamily());
       if(v == null) {
@@ -151,10 +148,6 @@
       v.add(src);
     }
     
-    if(LOG.isDebugEnabled()) {
-      LOG.debug("flushing and getting file names for region " + srcB.getRegionName());
-    }
-    
     for(HStoreFile src: srcB.flushcache(true)) {
       Vector<HStoreFile> v = filesToMerge.get(src.getColFamily());
       if(v == null) {
@@ -187,6 +180,7 @@
     }
 
     filesToMerge.clear();
+    
     for(HStoreFile src: srcA.close()) {
       if(! alreadyMerged.contains(src)) {
         Vector<HStoreFile> v = filesToMerge.get(src.getColFamily());
@@ -330,6 +324,7 @@
   int maxUnflushedEntries = 0;
   int compactionThreshold = 0;
   private final HLocking lock = new HLocking();
+  private long desiredMaxFileSize;
 
   //////////////////////////////////////////////////////////////////////////////
   // Constructor
@@ -382,7 +377,6 @@
     // Load in all the HStores.
     for(Map.Entry<Text, HColumnDescriptor> e :
         this.regionInfo.tableDesc.families().entrySet()) {
-      
       Text colFamily = HStoreKey.extractFamily(e.getKey());
       stores.put(colFamily, new HStore(dir, this.regionInfo.regionName,
           e.getValue(), fs, oldLogFile, conf));
@@ -404,7 +398,12 @@
     
     // By default, we compact the region if an HStore has more than 10 map files
     
-    this.compactionThreshold = conf.getInt("hbase.hregion.compactionThreshold", 10);
+    this.compactionThreshold =
+      conf.getInt("hbase.hregion.compactionThreshold", 10);
+    
+    // By default we split region if a file > DEFAULT_MAX_FILE_SIZE.
+    this.desiredMaxFileSize =
+      conf.getLong("hbase.hregion.max.filesize", DEFAULT_MAX_FILE_SIZE);
 
     // HRegion is ready to go!
     this.writestate.writesOngoing = false;
@@ -448,6 +447,7 @@
           try {
             writestate.wait();
           } catch (InterruptedException iex) {
+            // continue
           }
         }
         writestate.writesOngoing = true;
@@ -456,24 +456,21 @@
 
       if(! shouldClose) {
         return null;
+      }
+      LOG.info("closing region " + this.regionInfo.regionName);
+      Vector<HStoreFile> allHStoreFiles = internalFlushcache();
+      for (HStore store: stores.values()) {
+        store.close();
+      }
+      try {
+        return allHStoreFiles;
 
-      } else {
-        LOG.info("closing region " + this.regionInfo.regionName);
-        Vector<HStoreFile> allHStoreFiles = internalFlushcache();
-        for(Iterator<HStore> it = stores.values().iterator(); it.hasNext(); ) {
-          HStore store = it.next();
-          store.close();
-        }
-        try {
-          return allHStoreFiles;
-
-        } finally {
-          synchronized(writestate) {
-            writestate.closed = true;
-            writestate.writesOngoing = false;
-          }
-          LOG.info("region " + this.regionInfo.regionName + " closed");
+      } finally {
+        synchronized (writestate) {
+          writestate.closed = true;
+          writestate.writesOngoing = false;
         }
+        LOG.info("region " + this.regionInfo.regionName + " closed");
       }
     } finally {
       lock.releaseWriteLock();
@@ -493,10 +490,11 @@
         && (regionInfo.startKey.compareTo(midKey) > 0))
         || ((regionInfo.endKey.getLength() != 0)
             && (regionInfo.endKey.compareTo(midKey) < 0))) {
-      throw new IOException("Region splitkey must lie within region boundaries.");
+      throw new IOException("Region splitkey must lie within region " +
+        "boundaries.");
     }
 
-    LOG.info("splitting region " + this.regionInfo.regionName);
+    LOG.info("Splitting region " + this.regionInfo.regionName);
 
     Path splits = new Path(regiondir, SPLITDIR);
     if(! fs.exists(splits)) {
@@ -524,39 +522,41 @@
     // Flush this HRegion out to storage, and turn off flushes
     // or compactions until close() is called.
     
+    // TODO: flushcache can come back null if it can't do the flush. FIX.
     Vector<HStoreFile> hstoreFilesToSplit = flushcache(true);
-    for(Iterator<HStoreFile> it = hstoreFilesToSplit.iterator(); it.hasNext(); ) {
-      HStoreFile hsf = it.next();
-      
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("splitting HStore " + hsf.getRegionName() + "/"
-            + hsf.getColFamily() + "/" + hsf.fileId());
+    for(HStoreFile hsf: hstoreFilesToSplit) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Splitting HStore " + hsf.getRegionName() + "/" +
+          hsf.getColFamily() + "/" + hsf.fileId());
       }
-
       HStoreFile dstA = new HStoreFile(conf, splits, regionAInfo.regionName, 
-          hsf.getColFamily(), Math.abs(rand.nextLong()));
-      
+        hsf.getColFamily(), Math.abs(rand.nextLong()));
       HStoreFile dstB = new HStoreFile(conf, splits, regionBInfo.regionName, 
-          hsf.getColFamily(), Math.abs(rand.nextLong()));
-      
+        hsf.getColFamily(), Math.abs(rand.nextLong()));
       hsf.splitStoreFile(midKey, dstA, dstB, fs, conf);
       alreadySplit.add(hsf);
     }
 
     // We just copied most of the data.
-    // Notify the caller that we are about to close the region
     
-    listener.regionIsUnavailable(this.getRegionName());
+    // Notify the caller that we are about to close the region
+    listener.closing(this.getRegionName());
     
-    // Now close the HRegion and copy the small remainder
+    // Wait on the last row updates to come in.
+    waitOnRowLocks();
     
+    // Now close the HRegion
     hstoreFilesToSplit = close();
-    for(Iterator<HStoreFile> it = hstoreFilesToSplit.iterator(); it.hasNext(); ) {
-      HStoreFile hsf = it.next();
-      
+    
+    // Tell listener that region is now closed and that they can therefore
+    // clean up any outstanding references.
+    listener.closed(this.getRegionName());
+    
+    // Copy the small remainder
+    for(HStoreFile hsf: hstoreFilesToSplit) {
       if(! alreadySplit.contains(hsf)) {
         if(LOG.isDebugEnabled()) {
-          LOG.debug("splitting HStore " + hsf.getRegionName() + "/"
+          LOG.debug("Splitting HStore " + hsf.getRegionName() + "/"
               + hsf.getColFamily() + "/" + hsf.fileId());
         }
 
@@ -585,8 +585,9 @@
     regions[0] = regionA;
     regions[1] = regionB;
     
-    LOG.info("region split complete. new regions are: " + regions[0].getRegionName()
-        + ", " + regions[1].getRegionName());
+    LOG.info("Region split of " + this.regionInfo.regionName + " complete. " +
+      "New regions are: " + regions[0].getRegionName() + ", " +
+      regions[1].getRegionName());
     
     return regions;
   }
@@ -653,22 +654,19 @@
    */
   public boolean needsSplit(Text midKey) {
     lock.obtainReadLock();
-
     try {
       Text key = new Text();
       long maxSize = 0;
-
-      for(Iterator<HStore> i = stores.values().iterator(); i.hasNext(); ) {
-        long size = i.next().getLargestFileSize(key);
-
+      for(HStore store: stores.values()) {
+        long size = store.getLargestFileSize(key);
         if(size > maxSize) {                      // Largest so far
           maxSize = size;
           midKey.set(key);
         }
       }
 
-      return (maxSize > (DESIRED_MAX_FILE_SIZE + (DESIRED_MAX_FILE_SIZE / 2)));
-      
+      return (maxSize >
+        (this.desiredMaxFileSize + (this.desiredMaxFileSize / 2)));
     } finally {
       lock.releaseReadLock();
     }
@@ -701,16 +699,16 @@
    */
   public boolean needsCompaction() {
     boolean needsCompaction = false;
-    lock.obtainReadLock();
+    this.lock.obtainReadLock();
     try {
-      for(Iterator<HStore> i = stores.values().iterator(); i.hasNext(); ) {
-        if(i.next().getNMaps() > compactionThreshold) {
+      for(HStore store: stores.values()) {
+        if(store.getNMaps() > this.compactionThreshold) {
           needsCompaction = true;
           break;
         }
       }
     } finally {
-      lock.releaseReadLock();
+      this.lock.releaseReadLock();
     }
     return needsCompaction;
   }
@@ -732,41 +730,35 @@
     boolean shouldCompact = false;
     lock.obtainReadLock();
     try {
-      synchronized(writestate) {
-        if((! writestate.writesOngoing)
-            && writestate.writesEnabled
-            && (! writestate.closed)
-            && recentCommits > MIN_COMMITS_FOR_COMPACTION) {
-
+      synchronized (writestate) {
+        if ((!writestate.writesOngoing) &&
+            writestate.writesEnabled &&
+            (!writestate.closed) &&
+            recentCommits > MIN_COMMITS_FOR_COMPACTION) {
           writestate.writesOngoing = true;
           shouldCompact = true;
         }
       }
-    } finally {
-      lock.releaseReadLock();
-    }
 
-    if(! shouldCompact) {
-      LOG.info("not compacting region " + this.regionInfo.regionName);
-      return false; 
-    }
-    lock.obtainWriteLock();
-    try {
-      LOG.info("starting compaction on region " + this.regionInfo.regionName);
-      for (Iterator<HStore> it = stores.values().iterator(); it.hasNext();) {
-        HStore store = it.next();
+      if (!shouldCompact) {
+        LOG.info("not compacting region " + this.regionInfo);
+        return false;
+      }
+
+      LOG.info("starting compaction on region " + this.regionInfo);
+      for (HStore store : stores.values()) {
         store.compact();
       }
-      LOG.info("compaction completed on region " + this.regionInfo.regionName);
+      LOG.info("compaction completed on region " + this.regionInfo);
       return true;
-
+      
     } finally {
+      lock.releaseReadLock();
       synchronized (writestate) {
         writestate.writesOngoing = false;
         recentCommits = 0;
         writestate.notifyAll();
       }
-      lock.releaseWriteLock();
     }
   }
 
@@ -800,7 +792,8 @@
    * This method may block for some time, so it should not be called from a 
    * time-sensitive thread.
    */
-  public Vector<HStoreFile> flushcache(boolean disableFutureWrites) throws IOException {
+  public Vector<HStoreFile> flushcache(boolean disableFutureWrites)
+  throws IOException {
     boolean shouldFlush = false;
     synchronized(writestate) {
       if((! writestate.writesOngoing)
@@ -818,45 +811,45 @@
     
     if(! shouldFlush) {
       if(LOG.isDebugEnabled()) {
-        LOG.debug("not flushing cache for region " + this.regionInfo.regionName);
+        LOG.debug("not flushing cache for region " +
+          this.regionInfo.regionName);
       }
-      return null;
-      
-    } else {
-      try {
-        return internalFlushcache();
-        
-      } finally {
-        synchronized(writestate) {
-          writestate.writesOngoing = false;
-          writestate.notifyAll();
-        }
+      return null;  
+    }
+    
+    try {
+      return internalFlushcache();
+
+    } finally {
+      synchronized (writestate) {
+        writestate.writesOngoing = false;
+        writestate.notifyAll();
       }
     }
   }
 
   /**
-   * Flushing the cache is a little tricky.  We have a lot of updates in the 
-   * HMemcache, all of which have also been written to the log.  We need to write
-   * those updates in the HMemcache out to disk, while being able to process 
-   * reads/writes as much as possible during the flush operation.  Also, the log
+   * Flushing the cache is a little tricky. We have a lot of updates in the
+   * HMemcache, all of which have also been written to the log. We need to write
+   * those updates in the HMemcache out to disk, while being able to process
+   * reads/writes as much as possible during the flush operation. Also, the log
    * has to state clearly the point in time at which the HMemcache was flushed.
    * (That way, during recovery, we know when we can rely on the on-disk flushed
    * structures and when we have to recover the HMemcache from the log.)
-   *
+   * 
    * So, we have a three-step process:
-   *
+   * 
    * A. Flush the memcache to the on-disk stores, noting the current sequence ID
-   *    for the log.
-   *    
-   * B. Write a FLUSHCACHE-COMPLETE message to the log, using the sequence ID 
-   *    that was current at the time of memcache-flush.
-   *    
-   * C. Get rid of the memcache structures that are now redundant, as they've 
-   *    been flushed to the on-disk HStores.
-   *
+   * for the log.
+   * 
+   * B. Write a FLUSHCACHE-COMPLETE message to the log, using the sequence ID
+   * that was current at the time of memcache-flush.
+   * 
+   * C. Get rid of the memcache structures that are now redundant, as they've
+   * been flushed to the on-disk HStores.
+   * 
    * This method is protected, but can be accessed via several public routes.
-   *
+   * 
    * This method may block for some time.
    */
   Vector<HStoreFile> internalFlushcache() throws IOException {
@@ -884,8 +877,7 @@
     HMemcache.Snapshot retval = memcache.snapshotMemcacheForLog(log);
     TreeMap<HStoreKey, BytesWritable> memcacheSnapshot = retval.memcacheSnapshot;
     if(memcacheSnapshot == null) {
-      for(Iterator<HStore> it = stores.values().iterator(); it.hasNext(); ) {
-        HStore hstore = it.next();
+      for(HStore hstore: stores.values()) {
         Vector<HStoreFile> hstoreFiles = hstore.getAllMapFiles();
         allHStoreFiles.addAll(0, hstoreFiles);
       }
@@ -944,12 +936,7 @@
   /** Fetch a single data item. */
   public BytesWritable get(Text row, Text column) throws IOException {
     BytesWritable[] results = get(row, column, Long.MAX_VALUE, 1);
-    if(results == null) {
-      return null;
-      
-    } else {
-      return results[0];
-    }
+    return (results == null)? null: results[0];
   }
   
   /** Fetch multiple versions of a single data item */
@@ -972,13 +959,13 @@
 
     // Obtain the row-lock
 
-    obtainLock(row);
+    obtainRowLock(row);
     try {
       // Obtain the -col results
       return get(new HStoreKey(row, column, timestamp), numVersions);
     
     } finally {
-      releaseLock(row);
+      releaseRowLock(row);
     }
   }
 
@@ -1042,7 +1029,8 @@
    * Return an iterator that scans over the HRegion, returning the indicated 
    * columns.  This Iterator must be closed by the caller.
    */
-  public HInternalScannerInterface getScanner(Text[] cols, Text firstRow) throws IOException {
+  public HInternalScannerInterface getScanner(Text[] cols, Text firstRow)
+  throws IOException {
     lock.obtainReadLock();
     try {
       TreeSet<Text> families = new TreeSet<Text>();
@@ -1052,12 +1040,10 @@
 
       HStore[] storelist = new HStore[families.size()];
       int i = 0;
-      for(Iterator<Text> it = families.iterator(); it.hasNext(); ) {
-        Text family = it.next();
+      for (Text family: families) {
         storelist[i++] = stores.get(family);
       }
       return new HScanner(cols, firstRow, memcache, storelist);
-      
     } finally {
       lock.releaseReadLock();
     }
@@ -1068,26 +1054,26 @@
   //////////////////////////////////////////////////////////////////////////////
   
   /**
-   * The caller wants to apply a series of writes to a single row in the HRegion.
-   * The caller will invoke startUpdate(), followed by a series of calls to 
-   * put/delete, then finally either abort() or commit().
+   * The caller wants to apply a series of writes to a single row in the
+   * HRegion. The caller will invoke startUpdate(), followed by a series of
+   * calls to put/delete, then finally either abort() or commit().
    *
-   * Note that we rely on the external caller to properly abort() or commit() 
-   * every transaction.  If the caller is a network client, there should be a 
-   * lease-system in place that automatically aborts() transactions after a 
-   * specified quiet period.
+   * <p>Note that we rely on the external caller to properly abort() or
+   * commit() every transaction.  If the caller is a network client, there
+   * should be a lease-system in place that automatically aborts() transactions
+   * after a specified quiet period.
+   * 
+   * @param row Row to update
+   * @return lockid
+   * @see #put(long, Text, BytesWritable)
    */
   public long startUpdate(Text row) throws IOException {
-
-    // We obtain a per-row lock, so other clients will
-    // block while one client performs an update.
-
-    lock.obtainReadLock();
-    try {
-      return obtainLock(row);
-    } finally {
-      lock.releaseReadLock();
-    }
+    // We obtain a per-row lock, so other clients will block while one client
+    // performs an update.  The read lock is released by the client calling
+    // #commit or #abort or if the HRegionServer lease on the lock expires.
+    // See HRegionServer#RegionListener for how the expire on HRegionServer
+    // invokes a HRegion#abort.
+    return obtainRowLock(row);
   }
 
   /**
@@ -1099,10 +1085,11 @@
    * This method really just tests the input, then calls an internal localput() 
    * method.
    */
-  public void put(long lockid, Text targetCol, BytesWritable val) throws IOException {
+  public void put(long lockid, Text targetCol, BytesWritable val)
+  throws IOException {
     if(val.getSize() == DELETE_BYTES.getSize()
         && val.compareTo(DELETE_BYTES) == 0) {
-        throw new IOException("Cannot insert value: " + val);
+      throw new IOException("Cannot insert value: " + val);
     }
     localput(lockid, targetCol, val);
   }
@@ -1114,14 +1101,21 @@
     localput(lockid, targetCol, DELETE_BYTES);
   }
 
-  /**
+  /*
    * Private implementation.
    * 
-   * localput() is used for both puts and deletes. We just place the values into
-   * a per-row pending area, until a commit() or abort() call is received.
+   * localput() is used for both puts and deletes. We just place the values
+   * into a per-row pending area, until a commit() or abort() call is received.
    * (Or until the user's write-lock expires.)
+   * 
+   * @param lockid
+   * @param targetCol
+   * @param val Value to enter into cell
+   * @throws IOException
    */
-  void localput(long lockid, Text targetCol, BytesWritable val) throws IOException {
+  void localput(final long lockid, final Text targetCol,
+    final BytesWritable val)
+  throws IOException {
     checkColumn(targetCol);
     
     Text row = getRowFromLock(lockid);
@@ -1132,15 +1126,12 @@
     // This sync block makes localput() thread-safe when multiple
     // threads from the same client attempt an insert on the same 
     // locked row (via lockid).
-
     synchronized(row) {
-      
       // This check makes sure that another thread from the client
       // hasn't aborted/committed the write-operation.
-
       if(row != getRowFromLock(lockid)) {
-        throw new LockException("Locking error: put operation on lock " + lockid 
-            + " unexpected aborted by another thread");
+        throw new LockException("Locking error: put operation on lock " +
+            lockid + " unexpected aborted by another thread");
       }
       
       TreeMap<Text, BytesWritable> targets = targetColumns.get(lockid);
@@ -1178,22 +1169,22 @@
       }
       
       targetColumns.remove(lockid);
-      releaseLock(row);
+      releaseRowLock(row);
     }
   }
 
   /**
-   * Commit a pending set of writes to the memcache. This also results in writing
-   * to the change log.
+   * Commit a pending set of writes to the memcache. This also results in
+   * writing to the change log.
    *
    * Once updates hit the change log, they are safe.  They will either be moved 
    * into an HStore in the future, or they will be recovered from the log.
+   * @param lockid Lock for row we're to commit.
+   * @throws IOException
    */
-  public void commit(long lockid) throws IOException {
-    
+  public void commit(final long lockid) throws IOException {
     // Remove the row from the pendingWrites list so 
     // that repeated executions won't screw this up.
-    
     Text row = getRowFromLock(lockid);
     if(row == null) {
       throw new LockException("No write lock for lockid " + lockid);
@@ -1201,22 +1192,16 @@
     
     // This check makes sure that another thread from the client
     // hasn't aborted/committed the write-operation
-
     synchronized(row) {
-      
-      // We can now commit the changes.
       // Add updates to the log and add values to the memcache.
-
       long commitTimestamp = System.currentTimeMillis();
       log.append(regionInfo.regionName, regionInfo.tableDesc.getName(), row, 
-          targetColumns.get(lockid), commitTimestamp);
-      
-      memcache.add(row, targetColumns.get(lockid), commitTimestamp);
-
+        targetColumns.get(Long.valueOf(lockid)), commitTimestamp);
+      memcache.add(row, targetColumns.get(Long.valueOf(lockid)),
+        commitTimestamp);
       // OK, all done!
-
-      targetColumns.remove(lockid);
-      releaseLock(row);
+      targetColumns.remove(Long.valueOf(lockid));
+      releaseRowLock(row);
     }
     recentCommits++;
     this.commitsSinceFlush++;
@@ -1235,9 +1220,10 @@
       // all's well
       
     } else {
-      throw new IOException("Requested row out of range for HRegion "
-          + regionInfo.regionName + ", startKey='" + regionInfo.startKey
-          + "', endKey='" + regionInfo.endKey + "', row='" + row + "'");
+      throw new WrongRegionException("Requested row out of range for " +
+        "HRegion " + regionInfo.regionName + ", startKey='" +
+        regionInfo.startKey + "', endKey='" + regionInfo.endKey + "', row='" +
+        row + "'");
     }
   }
   
@@ -1255,25 +1241,32 @@
    * Obtain a lock on the given row.  Blocks until success.
    *
    * I know it's strange to have two mappings:
+   * <pre>
    *   ROWS  ==> LOCKS
+   * </pre>
    * as well as
+   * <pre>
    *   LOCKS ==> ROWS
+   * </pre>
    *
-   * But it acts as a guard on the client; a miswritten client just can't submit
-   * the name of a row and start writing to it; it must know the correct lockid,
-   * which matches the lock list in memory.
+   * But it acts as a guard on the client; a miswritten client just can't
+   * submit the name of a row and start writing to it; it must know the correct
+   * lockid, which matches the lock list in memory.
    * 
-   * It would be more memory-efficient to assume a correctly-written client, 
+   * <p>It would be more memory-efficient to assume a correctly-written client, 
    * which maybe we'll do in the future.
+   * 
+   * @param row Name of row to lock.
+   * @return The id of the held lock.
    */
-  long obtainLock(Text row) throws IOException {
+  long obtainRowLock(Text row) throws IOException {
     checkRow(row);
-    
     synchronized(rowsToLocks) {
       while(rowsToLocks.get(row) != null) {
         try {
           rowsToLocks.wait();
         } catch (InterruptedException ie) {
+          // Empty
         }
       }
       
@@ -1285,7 +1278,7 @@
     }
   }
   
-  Text getRowFromLock(long lockid) throws IOException {
+  Text getRowFromLock(long lockid) {
     // Pattern is that all access to rowsToLocks and/or to
     // locksToRows is via a lock on rowsToLocks.
     synchronized(rowsToLocks) {
@@ -1293,17 +1286,32 @@
     }
   }
   
-  /** Release the row lock! */
-  void releaseLock(Text row) throws IOException {
+  /** Release the row lock!
+   * @param lock Name of row whose lock we are to release
+   */
+  void releaseRowLock(Text row) {
     synchronized(rowsToLocks) {
       long lockid = rowsToLocks.remove(row).longValue();
       locksToRows.remove(lockid);
       rowsToLocks.notifyAll();
     }
   }
-  /*******************************************************************************
+  
+  private void waitOnRowLocks() {
+    synchronized (this.rowsToLocks) {
+      while (this.rowsToLocks.size() > 0) {
+        try {
+          this.rowsToLocks.wait();
+        } catch (InterruptedException e) {
+          // Catch. Let while test determine loop-end.
+        }
+      }
+    }
+  }
+  
+  /*
    * HScanner is an iterator through a bunch of rows in an HRegion.
-   ******************************************************************************/
+   */
   private static class HScanner implements HInternalScannerInterface {
     private HInternalScannerInterface[] scanners;
     private TreeMap<Text, BytesWritable>[] resultSets;
@@ -1314,10 +1322,8 @@
     /** Create an HScanner with a handle on many HStores. */
     @SuppressWarnings("unchecked")
     public HScanner(Text[] cols, Text firstRow, HMemcache memcache, HStore[] stores)
-        throws IOException {
-      
+    throws IOException {  
       long scanTime = System.currentTimeMillis();
-      
       this.scanners = new HInternalScannerInterface[stores.length + 1];
       for(int i = 0; i < this.scanners.length; i++) {
         this.scanners[i] = null;
@@ -1332,12 +1338,9 @@
       // All results will match the required column-set and scanTime.
       
       // NOTE: the memcache scanner should be the first scanner
-
       try {
         HInternalScannerInterface scanner =
           memcache.getScanner(scanTime, cols, firstRow);
-
-
         if(scanner.isWildcardScanner()) {
           this.wildcardMatch = true;
         }
@@ -1368,8 +1371,7 @@
       for(int i = 0; i < scanners.length; i++) {
         keys[i] = new HStoreKey();
         resultSets[i] = new TreeMap<Text, BytesWritable>();
-
-        if(scanners[i] != null && ! scanners[i].next(keys[i], resultSets[i])) {
+        if(scanners[i] != null && !scanners[i].next(keys[i], resultSets[i])) {
           closeScanner(i);
         }
       }
@@ -1393,13 +1395,12 @@
      * Grab the next row's worth of values.  The HScanner will return the most 
      * recent data value for each row that is not newer than the target time.
      */
-    public boolean next(HStoreKey key, TreeMap<Text, BytesWritable> results) throws IOException {
-      
+    public boolean next(HStoreKey key, TreeMap<Text, BytesWritable> results)
+    throws IOException {
       // Find the lowest-possible key.
-      
       Text chosenRow = null;
       long chosenTimestamp = -1;
-      for(int i = 0; i < keys.length; i++) {
+      for(int i = 0; i < this.keys.length; i++) {
         if(scanners[i] != null
             && (chosenRow == null
                 || (keys[i].getRow().compareTo(chosenRow) < 0)
@@ -1412,18 +1413,15 @@
       }
 
       // Store the key and results for each sub-scanner. Merge them as appropriate.
-      
       boolean insertedItem = false;
       if(chosenTimestamp > 0) {
         key.setRow(chosenRow);
         key.setVersion(chosenTimestamp);
         key.setColumn(new Text(""));
 
-        for(int i = 0; i < scanners.length; i++) {        
+        for(int i = 0; i < scanners.length; i++) {
           while((scanners[i] != null)
               && (keys[i].getRow().compareTo(chosenRow) == 0)) {
-            
-
             // If we are doing a wild card match or there are multiple matchers
             // per column, we need to scan all the older versions of this row
             // to pick up the rest of the family members
@@ -1439,11 +1437,7 @@
             //       values with older ones. So now we only insert
             //       a result if the map does not contain the key.
             
-            for(Iterator<Map.Entry<Text, BytesWritable>> it
-                = resultSets[i].entrySet().iterator();
-                it.hasNext(); ) {
-              
-              Map.Entry<Text, BytesWritable> e = it.next();
+            for(Map.Entry<Text, BytesWritable> e: resultSets[i].entrySet()) {
               if(!results.containsKey(e.getKey())) {
                 results.put(e.getKey(), e.getValue());
                 insertedItem = true;
@@ -1476,7 +1470,6 @@
     void closeScanner(int i) {
       try {
         scanners[i].close();
-        
       } finally {
         scanners[i] = null;
         keys[i] = null;
@@ -1492,5 +1485,161 @@
         }
       }
     }
+  }
+  
+  // Utility methods
+  
+  /**
+   * Convenience method creating new HRegions.
+   * @param regionId ID to use
+   * @param tableDesc Descriptor
+   * @param dir Home directory for the new region.
+   * @param conf
+   * @return New META region (ROOT or META).
+   * @throws IOException
+   */
+  public static HRegion createHRegion(final long regionId,
+    final HTableDescriptor tableDesc, final Path dir, final Configuration conf)
+  throws IOException {
+    return createHRegion(new HRegionInfo(regionId, tableDesc, null, null),
+      dir, conf, null, null);
+  }
+  
+  /**
+   * Convenience method creating new HRegions. Used by createTable and by the
+   * bootstrap code in the HMaster constructor
+   * 
+   * @param info Info for region to create.
+   * @param dir Home dir for new region
+   * @param conf
+   * @param initialFiles InitialFiles to pass new HRegion. Pass null if none.
+   * @param oldLogFile Old log file to use in region initialization.  Pass null
+   * if none. 
+   * @return new HRegion
+   * 
+   * @throws IOException
+   */
+  public static HRegion createHRegion(final HRegionInfo info,
+    final Path dir, final Configuration conf, final Path initialFiles,
+    final Path oldLogFile) 
+  throws IOException {
+    Path regionDir = HStoreFile.getHRegionDir(dir, info.regionName);
+    FileSystem fs = FileSystem.get(conf);
+    fs.mkdirs(regionDir);
+    return new HRegion(dir,
+      new HLog(fs, new Path(regionDir, HREGION_LOGDIR_NAME), conf),
+      fs, conf, info, initialFiles, oldLogFile);
+  }
+  
+  /**
+   * Inserts a new region's meta information into the passed
+   * <code>meta</code> region. Used by the HMaster bootstrap code adding
+   * new table to ROOT table.
+   * 
+   * @param meta META HRegion to be updated
+   * @param r HRegion to add to <code>meta</code>
+   *
+   * @throws IOException
+   */
+  public static void addRegionToMETA(HRegion meta, HRegion r)
+  throws IOException {  
+    // The row key is the region name
+    long writeid = meta.startUpdate(r.getRegionName());
+    ByteArrayOutputStream bytes = new ByteArrayOutputStream();
+    DataOutputStream s = new DataOutputStream(bytes);
+    r.getRegionInfo().write(s);
+    meta.put(writeid, COL_REGIONINFO, new BytesWritable(bytes.toByteArray()));
+    meta.commit(writeid);
+  }
+  
+  public static void addRegionToMETA(final HClient client,
+      final Text table, final HRegion region,
+      final HServerAddress serverAddress,
+      final long startCode)
+  throws IOException {
+    client.openTable(table);
+    ByteArrayOutputStream bytes = new ByteArrayOutputStream();
+    DataOutputStream out = new DataOutputStream(bytes);
+    region.getRegionInfo().write(out);
+    long lockid = client.startUpdate(region.getRegionName());
+    client.put(lockid, COL_REGIONINFO, bytes.toByteArray());
+    client.put(lockid, COL_SERVER,
+      serverAddress.toString().getBytes(UTF8_ENCODING));
+    client.put(lockid, COL_STARTCODE,
+      String.valueOf(startCode).getBytes(UTF8_ENCODING));
+    client.commit(lockid);
+    LOG.info("Added region " + region.getRegionName() + " to table " + table);
+  }
+  
+  /**
+   * Delete <code>region</code> from META <code>table</code>.
+   * @param client Client to use running update.
+   * @param table META table we are to delete region from.
+   * @param regionName Region to remove.
+   * @throws IOException
+   */
+  public static void removeRegionFromMETA(final HClient client,
+      final Text table, final Text regionName)
+  throws IOException {
+    client.openTable(table);
+    long lockid = client.startUpdate(regionName);
+    client.delete(lockid, COL_REGIONINFO);
+    client.delete(lockid, COL_SERVER);
+    client.delete(lockid, COL_STARTCODE);
+    client.commit(lockid);
+    LOG.info("Removed " + regionName + " from table " + table);
+  }
+  
+  /**
+   * @param data Map of META row labelled column data.
+   * @return Server
+   */
+  static HRegionInfo getRegionInfo(final TreeMap<Text, byte[]> data)
+  throws IOException {
+    byte[] bytes = data.get(COL_REGIONINFO);
+    if (bytes == null || bytes.length == 0) {
+      throw new IOException("no value for " + COL_REGIONINFO);
+    }
+    DataInputBuffer in = new DataInputBuffer();
+    in.reset(bytes, bytes.length);
+    HRegionInfo info = new HRegionInfo();
+    info.readFields(in);
+    return info;
+  }
+  
+  /**
+   * @param data Map of META row labelled column data.
+   * @return Server
+   */
+  static String getServerName(final TreeMap<Text, byte[]> data) {
+    byte [] bytes = data.get(COL_SERVER);
+    String name = null;
+    try {
+      name = (bytes != null && bytes.length != 0) ?
+          new String(bytes, UTF8_ENCODING): null;
+
+    } catch(UnsupportedEncodingException e) {
+      assert(false);
+    }
+    return (name != null)? name.trim(): name;
+  }
+
+  /**
+   * @param data Map of META row labelled column data.
+   * @return Start code.
+   */
+  static long getStartCode(final TreeMap<Text, byte[]> data) {
+    long startCode = -1L;
+    byte [] bytes = data.get(COL_STARTCODE);
+    if(bytes != null && bytes.length != 0) {
+      try {
+        startCode = Long.parseLong(new String(bytes, UTF8_ENCODING).trim());
+      } catch(NumberFormatException e) {
+        LOG.error("Failed getting " + COL_STARTCODE, e);
+      } catch(UnsupportedEncodingException e) {
+        LOG.error("Failed getting " + COL_STARTCODE, e);
+      }
+    }
+    return startCode;
   }
 }

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInfo.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInfo.java?view=diff&rev=544188&r1=544187&r2=544188
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInfo.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInfo.java Mon Jun  4 10:14:10 2007
@@ -20,9 +20,11 @@
 import java.io.DataInputStream;
 import java.io.DataOutput;
 import java.io.IOException;
+import java.util.Map;
 
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.WritableComparable;
+import org.apache.hadoop.io.WritableComparable;
 
 /**
  * HRegion information.
@@ -124,13 +126,27 @@
     this.regionName.readFields(in);
     this.offLine = in.readBoolean();
   }
-  
+
   //////////////////////////////////////////////////////////////////////////////
   // Comparable
   //////////////////////////////////////////////////////////////////////////////
   
   public int compareTo(Object o) {
-    HRegionInfo other = (HRegionInfo)o;
-    return regionName.compareTo(other.regionName);
+    HRegionInfo other = (HRegionInfo) o;
+    
+    // Are regions of same table?
+    int result = this.tableDesc.compareTo(other.tableDesc);
+    if (result != 0) {
+      return result;
+    }
+
+    // Compare start keys.
+    result = this.startKey.compareTo(other.startKey);
+    if (result != 0) {
+      return result;
+    }
+    
+    // Compare end keys.
+    return this.endKey.compareTo(other.endKey);
   }
-}
\ No newline at end of file
+}

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java?view=diff&rev=544188&r1=544187&r2=544188
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java Mon Jun  4 10:14:10 2007
@@ -15,17 +15,28 @@
  */
 package org.apache.hadoop.hbase;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Random;
+import java.util.TreeMap;
+import java.util.Vector;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.io.*;
-import org.apache.hadoop.fs.*;
-import org.apache.hadoop.ipc.*;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.retry.RetryProxy;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.conf.*;
-
-import java.io.*;
-import java.util.*;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 /*******************************************************************************
  * HRegionServer makes a set of HRegions available to clients.  It checks in with
@@ -34,196 +45,194 @@
 public class HRegionServer
     implements HConstants, HRegionInterface, Runnable {
   
-  public long getProtocolVersion(String protocol, 
-      long clientVersion) throws IOException { 
+  public long getProtocolVersion(final String protocol, 
+      @SuppressWarnings("unused") final long clientVersion)
+  throws IOException { 
     if (protocol.equals(HRegionInterface.class.getName())) {
       return HRegionInterface.versionID;
-    } else {
-      throw new IOException("Unknown protocol to name node: " + protocol);
     }
+    throw new IOException("Unknown protocol to name node: " + protocol);
   }
 
-  private static final Log LOG = LogFactory.getLog(HRegionServer.class);
+  static final Log LOG = LogFactory.getLog(HRegionServer.class);
   
-  private volatile boolean stopRequested;
+  volatile boolean stopRequested;
   private Path regionDir;
-  private HServerInfo info;
-  private Configuration conf;
+  HServerInfo info;
+  Configuration conf;
   private Random rand;
-  private TreeMap<Text, HRegion> regions;               // region name -> HRegion
-  private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
+  
+  // region name -> HRegion
+  TreeMap<Text, HRegion> onlineRegions = new TreeMap<Text, HRegion>();
+  Map<Text, HRegion> retiringRegions = new HashMap<Text, HRegion>();
+  
+  final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
   private Vector<HMsg> outboundMsgs;
 
-  private long threadWakeFrequency;
-  private int maxLogEntries;
+  long threadWakeFrequency;
   private long msgInterval;
-  private int numRetries;
   
   // Check to see if regions should be split
-  
-  private long splitOrCompactCheckFrequency;
+  long splitOrCompactCheckFrequency;
   private SplitOrCompactChecker splitOrCompactChecker;
   private Thread splitOrCompactCheckerThread;
-  private Integer splitOrCompactLock = 0;
-  
-  private class SplitOrCompactChecker implements Runnable, RegionUnavailableListener {
-    private HClient client = new HClient(conf);
+  Integer splitOrCompactLock = Integer.valueOf(0);
   
-    /* (non-Javadoc)
-     * @see org.apache.hadoop.hbase.RegionUnavailableListener#regionIsUnavailable(org.apache.hadoop.io.Text)
+  /*
+   * Interface used by the {@link org.apache.hadoop.io.retry} mechanism.
+   */
+  interface UpdateMetaInterface {
+    /*
+     * @return True if succeeded.
+     * @throws IOException
      */
-    public void regionIsUnavailable(Text regionName) {
+   boolean update() throws IOException;
+  }
+  
+  class SplitOrCompactChecker implements Runnable, RegionUnavailableListener {
+    HClient client = new HClient(conf);
+  
+    public void closing(final Text regionName) {
       lock.writeLock().lock();
       try {
-        regions.remove(regionName);
+        // Remove region from regions Map and add it to the Map of retiring
+        // regions.
+        retiringRegions.put(regionName, onlineRegions.remove(regionName));
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Adding " + regionName + " to retiringRegions");
+        }
+      } finally {
+        lock.writeLock().unlock();
+      }
+    }
+    
+    public void closed(final Text regionName) {
+      lock.writeLock().lock();
+      try {
+        retiringRegions.remove(regionName);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Removing " + regionName + " from retiringRegions");
+        }
       } finally {
         lock.writeLock().unlock();
       }
     }
 
-    /* (non-Javadoc)
-     * @see java.lang.Runnable#run()
-     */
     public void run() {
       while(! stopRequested) {
         long startTime = System.currentTimeMillis();
-
         synchronized(splitOrCompactLock) { // Don't interrupt us while we're working
-
           // Grab a list of regions to check
-
           Vector<HRegion> regionsToCheck = new Vector<HRegion>();
           lock.readLock().lock();
           try {
-            regionsToCheck.addAll(regions.values());
+            regionsToCheck.addAll(onlineRegions.values());
           } finally {
             lock.readLock().unlock();
           }
 
           try {
-            for(Iterator<HRegion>it = regionsToCheck.iterator(); it.hasNext(); ) {
-              HRegion cur = it.next();
-
+            for(HRegion cur: regionsToCheck) {
               if(cur.isClosed()) {
                 continue;                               // Skip if closed
               }
 
               if(cur.needsCompaction()) {
-
-                // The best time to split a region is right after it has been compacted
-
+                // Best time to split a region is right after compaction
                 if(cur.compactStores()) {
                   Text midKey = new Text();
                   if(cur.needsSplit(midKey)) {
-                    Text oldRegion = cur.getRegionName();
-
-                    LOG.info("splitting region: " + oldRegion);
-
-                    HRegion[] newRegions = cur.closeAndSplit(midKey, this);
-
-                    // When a region is split, the META table needs to updated if we're
-                    // splitting a 'normal' region, and the ROOT table needs to be
-                    // updated if we are splitting a META region.
-
-                    if(LOG.isDebugEnabled()) {
-                      LOG.debug("region split complete. updating meta");
-                    }
-
-                    Text tableToUpdate =
-                      (oldRegion.find(META_TABLE_NAME.toString()) == 0) ?
-                          ROOT_TABLE_NAME : META_TABLE_NAME;
-
-                    for(int tries = 0; tries < numRetries; tries++) {
-                      try {
-                        client.openTable(tableToUpdate);
-                        long lockid = client.startUpdate(oldRegion);
-                        client.delete(lockid, COL_REGIONINFO);
-                        client.delete(lockid, COL_SERVER);
-                        client.delete(lockid, COL_STARTCODE);
-                        client.commit(lockid);
-
-                        for(int i = 0; i < newRegions.length; i++) {
-                          ByteArrayOutputStream bytes = new ByteArrayOutputStream();
-                          DataOutputStream out = new DataOutputStream(bytes);
-                          newRegions[i].getRegionInfo().write(out);
-
-                          lockid = client.startUpdate(newRegions[i].getRegionName());
-                          client.put(lockid, COL_REGIONINFO, bytes.toByteArray());
-                          client.put(lockid, COL_SERVER, 
-                              info.getServerAddress().toString().getBytes(UTF8_ENCODING));
-                          client.put(lockid, COL_STARTCODE, 
-                              String.valueOf(info.getStartCode()).getBytes(UTF8_ENCODING));
-                          client.commit(lockid);
-                        }
-
-                        // Now tell the master about the new regions
-
-                        if(LOG.isDebugEnabled()) {
-                          LOG.debug("reporting region split to master");
-                        }
-
-                        reportSplit(newRegions[0].getRegionInfo(), newRegions[1].getRegionInfo());
-
-                        LOG.info("region split successful. old region=" + oldRegion
-                            + ", new regions: " + newRegions[0].getRegionName() + ", "
-                            + newRegions[1].getRegionName());
-
-                        // Finally, start serving the new regions
-
-                        lock.writeLock().lock();
-                        try {
-                          regions.put(newRegions[0].getRegionName(), newRegions[0]);
-                          regions.put(newRegions[1].getRegionName(), newRegions[1]);
-                        } finally {
-                          lock.writeLock().unlock();
-                        }
-
-                      } catch(NotServingRegionException e) {
-                        if(tries == numRetries - 1) {
-                          throw e;
-                        }
-                        continue;
-                      }
-                      break;
-                    }
+                    split(cur, midKey);
                   }
                 }
               }
             }
           } catch(IOException e) {
             //TODO: What happens if this fails? Are we toast?
-            LOG.error(e);
+            LOG.error("What happens if this fails? Are we toast?", e);
           }
         }
+        
+        if (stopRequested) {
+          continue;
+        }
 
         // Sleep
-        long waitTime = stopRequested ? 0
-            : splitOrCompactCheckFrequency - (System.currentTimeMillis() - startTime);
+        long waitTime = splitOrCompactCheckFrequency -
+          (System.currentTimeMillis() - startTime);
         if (waitTime > 0) {
           try {
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Sleep splitOrCompactChecker");
-            }
             Thread.sleep(waitTime);
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Wake splitOrCompactChecker");
-            }
           } catch(InterruptedException iex) {
+            // continue
           }
         }
       }
+      LOG.info("splitOrCompactChecker exiting");
+    }
+    
+    private void split(final HRegion region, final Text midKey)
+    throws IOException {
+      final Text oldRegion = region.getRegionName();
+      final HRegion[] newRegions = region.closeAndSplit(midKey, this);
+
+      // When a region is split, the META table needs to updated if we're
+      // splitting a 'normal' region, and the ROOT table needs to be
+      // updated if we are splitting a META region.
+      final Text tableToUpdate =
+        (oldRegion.find(META_TABLE_NAME.toString()) == 0) ?
+            ROOT_TABLE_NAME : META_TABLE_NAME;
       if(LOG.isDebugEnabled()) {
-        LOG.debug("splitOrCompactChecker exiting");
+        LOG.debug("Updating " + tableToUpdate + " with region split info");
       }
+      
+      // Wrap the update of META region with an org.apache.hadoop.io.retry.
+      UpdateMetaInterface implementation = new UpdateMetaInterface() {
+        public boolean update() throws IOException {
+          HRegion.removeRegionFromMETA(client, tableToUpdate,
+            region.getRegionName());
+          for (int i = 0; i < newRegions.length; i++) {
+            HRegion.addRegionToMETA(client, tableToUpdate, newRegions[i],
+              info.getServerAddress(), info.getStartCode());
+          }
+          
+          // Now tell the master about the new regions
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Reporting region split to master");
+          }
+          reportSplit(newRegions[0].getRegionInfo(), newRegions[1].
+            getRegionInfo());
+          LOG.info("region split, META update, and report to master all" +
+            " successful. Old region=" + oldRegion + ", new regions: " +
+            newRegions[0].getRegionName() + ", " +
+            newRegions[1].getRegionName());
+
+          // Finally, start serving the new regions
+          lock.writeLock().lock();
+          try {
+            onlineRegions.put(newRegions[0].getRegionName(), newRegions[0]);
+            onlineRegions.put(newRegions[1].getRegionName(), newRegions[1]);
+          } finally {
+            lock.writeLock().unlock();
+          }
+          return true;
+        }
+      };
+      
+      // Get retry proxy wrapper around 'implementation'.
+      UpdateMetaInterface retryProxy = (UpdateMetaInterface)RetryProxy.
+        create(UpdateMetaInterface.class, implementation,
+        client.getRetryPolicy());
+      // Run retry.
+      retryProxy.update();
     }
   }
-  
-  // Cache flushing
-  
+
+  // Cache flushing  
   private Flusher cacheFlusher;
   private Thread cacheFlusherThread;
-  private Integer cacheFlusherLock = 0;
-  private class Flusher implements Runnable {
+  Integer cacheFlusherLock = Integer.valueOf(0);
+  class Flusher implements Runnable {
     public void run() {
       while(! stopRequested) {
         long startTime = System.currentTimeMillis();
@@ -235,23 +244,20 @@
           Vector<HRegion> toFlush = new Vector<HRegion>();
           lock.readLock().lock();
           try {
-            toFlush.addAll(regions.values());
+            toFlush.addAll(onlineRegions.values());
           } finally {
             lock.readLock().unlock();
           }
 
           // Flush them, if necessary
 
-          for(Iterator<HRegion> it = toFlush.iterator(); it.hasNext(); ) {
-            HRegion cur = it.next();
-            
+          for(HRegion cur: toFlush) {
             if(cur.isClosed()) {                // Skip if closed
               continue;
             }
 
             try {
               cur.optionallyFlush();
-
             } catch(IOException iex) {
               LOG.error(iex);
             }
@@ -263,21 +269,14 @@
             : threadWakeFrequency - (System.currentTimeMillis() - startTime);
         
         if(waitTime > 0) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Sleep cacheFlusher");
-          }
           try {
             Thread.sleep(waitTime);
           } catch(InterruptedException iex) {
-          }
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Wake cacheFlusher");
+            // continue
           }
         }
       }
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("cacheFlusher exiting");
-      }
+      LOG.info("cacheFlusher exiting");
     }
   }
   
@@ -287,45 +286,45 @@
   private Path oldlogfile;
   
   // Logging
-  
-  private HLog log;
+  HLog log;
   private LogRoller logRoller;
   private Thread logRollerThread;
-  private Integer logRollerLock = 0;
-  private class LogRoller implements Runnable {
+  Integer logRollerLock = Integer.valueOf(0);
+  
+  /**
+   * Log rolling Runnable.
+   */
+  class LogRoller implements Runnable {
+    private int maxLogEntries =
+      conf.getInt("hbase.regionserver.maxlogentries", 30 * 1000);
+    
     public void run() {
       while(! stopRequested) {
         synchronized(logRollerLock) {
           // If the number of log entries is high enough, roll the log.  This is a
           // very fast operation, but should not be done too frequently.
           int nEntries = log.getNumEntries();
-          if(nEntries > maxLogEntries) {
+          if(nEntries > this.maxLogEntries) {
             try {
               if (LOG.isDebugEnabled()) {
                 LOG.debug("Rolling log. Number of entries is: " + nEntries);
               }
               log.rollWriter();
             } catch(IOException iex) {
+              // continue
             }
           }
         }
         
         if(!stopRequested) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Sleep logRoller");
-          }
           try {
             Thread.sleep(threadWakeFrequency);
           } catch(InterruptedException iex) {
-          }
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Wake logRoller");
+            // continue
           }
         }
       }
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("logRoller exiting");
-      }
+      LOG.info("logRoller exiting");
     }
   }
   
@@ -338,7 +337,6 @@
   private Server server;
   
   // Leases
-  
   private Leases leases;
 
   /** Start a HRegionServer at the default location */
@@ -357,18 +355,17 @@
     this.regionDir = regionDir;
     this.conf = conf;
     this.rand = new Random();
-    this.regions = new TreeMap<Text, HRegion>();
     this.outboundMsgs = new Vector<HMsg>();
     this.scanners =
       Collections.synchronizedMap(new TreeMap<Text, HInternalScannerInterface>());
 
     // Config'ed params
     this.threadWakeFrequency = conf.getLong(THREAD_WAKE_FREQUENCY, 10 * 1000);
-    this.maxLogEntries = conf.getInt("hbase.regionserver.maxlogentries", 30 * 1000);
     this.msgInterval = conf.getLong("hbase.regionserver.msginterval",
-        15 * 1000);
+      15 * 1000);
     this.splitOrCompactCheckFrequency =
-      conf.getLong("hbase.regionserver.thread.splitcompactcheckfrequency", 60 * 1000);
+      conf.getLong("hbase.regionserver.thread.splitcompactcheckfrequency",
+      60 * 1000);
     
     // Cache flushing
     this.cacheFlusher = new Flusher();
@@ -448,7 +445,7 @@
    * Set a flag that will cause all the HRegionServer threads to shut down
    * in an orderly fashion.
    */
-  public synchronized void stop() throws IOException {
+  public synchronized void stop() {
     stopRequested = true;
     notifyAll();                        // Wakes run() if it is sleeping
   }
@@ -460,24 +457,30 @@
     try {
       this.workerThread.join();
     } catch(InterruptedException iex) {
+      // continue
     }
     try {
       this.logRollerThread.join();
     } catch(InterruptedException iex) {
+      // continue
     }
     try {
       this.cacheFlusherThread.join();
     } catch(InterruptedException iex) {
+      // continue
     }
     try {
       this.splitOrCompactCheckerThread.join();
     } catch(InterruptedException iex) {
+      // continue
     }
     try {
       this.server.join();
     } catch(InterruptedException iex) {
+      // continue
     }
-    LOG.info("HRegionServer stopped at: " + info.getServerAddress().toString());
+    LOG.info("HRegionServer stopped at: " +
+      info.getServerAddress().toString());
   }
   
   /**
@@ -506,9 +509,6 @@
             : msgInterval - (System.currentTimeMillis() - lastMsg);
         
         if(waitTime > 0) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Sleep");
-          }
           synchronized (this) {
             try {
               wait(waitTime);
@@ -586,9 +586,6 @@
         waitTime = stopRequested ? 0
             : msgInterval - (System.currentTimeMillis() - lastMsg);
         if (waitTime > 0) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Sleep");
-          }
           synchronized (this) {
             try {
               wait(waitTime);
@@ -596,9 +593,6 @@
               // On interrupt we go around to the while test of stopRequested
             }
           }
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Wake");
-          }
         }
       }
     }
@@ -666,7 +660,7 @@
    * updated the meta or root regions, and the master will pick that up on its
    * next rescan of the root or meta tables.
    */
-  private void reportSplit(HRegionInfo newRegionA, HRegionInfo newRegionB) {
+  void reportSplit(HRegionInfo newRegionA, HRegionInfo newRegionB) {
     synchronized(outboundMsgs) {
       outboundMsgs.add(new HMsg(HMsg.MSG_NEW_REGION, newRegionA));
       outboundMsgs.add(new HMsg(HMsg.MSG_NEW_REGION, newRegionB));
@@ -677,10 +671,10 @@
   // HMaster-given operations
   //////////////////////////////////////////////////////////////////////////////
 
-  private Vector<HMsg> toDo;
+  Vector<HMsg> toDo;
   private Worker worker;
   private Thread workerThread;
-  private class Worker implements Runnable {
+  class Worker implements Runnable {
     public void stop() {
       synchronized(toDo) {
         toDo.notifyAll();
@@ -700,6 +694,7 @@
                 LOG.debug("Wake on todo");
               }
             } catch(InterruptedException e) {
+              // continue
             }
           }
           if(stopRequested) {
@@ -761,38 +756,34 @@
           LOG.error(e);
         }
       }
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("worker thread exiting");
-      }
+      LOG.info("worker thread exiting");
     }
   }
   
-  private void openRegion(HRegionInfo regionInfo) throws IOException {
+  void openRegion(HRegionInfo regionInfo) throws IOException {
     this.lock.writeLock().lock();
     try {
       HRegion region =
         new HRegion(regionDir, log, fs, conf, regionInfo, null, oldlogfile);
-      regions.put(region.getRegionName(), region);
+      this.onlineRegions.put(region.getRegionName(), region);
       reportOpen(region); 
     } finally {
       this.lock.writeLock().unlock();
     }
   }
 
-  private void closeRegion(HRegionInfo info, boolean reportWhenCompleted)
-      throws IOException {
-    
+  void closeRegion(final HRegionInfo hri, final boolean reportWhenCompleted)
+  throws IOException {  
     this.lock.writeLock().lock();
     HRegion region = null;
     try {
-      region = regions.remove(info.regionName);
+      region = onlineRegions.remove(hri.regionName);
     } finally {
       this.lock.writeLock().unlock();
     }
       
     if(region != null) {
       region.close();
-
       if(reportWhenCompleted) {
         reportClose(region);
       }
@@ -800,12 +791,12 @@
   }
 
   /** Called either when the master tells us to restart or from stop() */
-  private void closeAllRegions() {
+  void closeAllRegions() {
     Vector<HRegion> regionsToClose = new Vector<HRegion>();
     this.lock.writeLock().lock();
     try {
-      regionsToClose.addAll(regions.values());
-      regions.clear();
+      regionsToClose.addAll(onlineRegions.values());
+      onlineRegions.clear();
     } finally {
       this.lock.writeLock().unlock();
     }
@@ -817,7 +808,6 @@
       try {
         region.close();
         LOG.debug("region closed " + region.getRegionName());
-        
       } catch(IOException e) {
         LOG.error("error closing region " + region.getRegionName(), e);
       }
@@ -829,55 +819,34 @@
   //////////////////////////////////////////////////////////////////////////////
 
   /** Obtain a table descriptor for the given region */
-  public HRegionInfo getRegionInfo(Text regionName) throws NotServingRegionException {
-    HRegion region = getRegion(regionName);
-    return region.getRegionInfo();
+  public HRegionInfo getRegionInfo(Text regionName)
+  throws NotServingRegionException {
+    return getRegion(regionName).getRegionInfo();
   }
 
   /** Get the indicated row/column */
-  public BytesWritable get(Text regionName, Text row, Text column) throws IOException {
-    HRegion region = getRegion(regionName);
-    
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("get " + row.toString() + ", " + column.toString());
-    }
-    BytesWritable results = region.get(row, column);
-    if(results != null) {
-      return results;
-    }
-    return null;
+  public BytesWritable get(Text regionName, Text row, Text column)
+  throws IOException {
+    return getRegion(regionName).get(row, column);
   }
 
   /** Get multiple versions of the indicated row/col */
   public BytesWritable[] get(Text regionName, Text row, Text column, 
-      int numVersions) throws IOException {
-    
-    HRegion region = getRegion(regionName);
-    
-    BytesWritable[] results = region.get(row, column, numVersions);
-    if(results != null) {
-      return results;
-    }
-    return null;
+      int numVersions)
+  throws IOException {
+    return getRegion(regionName).get(row, column, numVersions);
   }
 
   /** Get multiple timestamped versions of the indicated row/col */
   public BytesWritable[] get(Text regionName, Text row, Text column, 
-      long timestamp, int numVersions) throws IOException {
-    
-    HRegion region = getRegion(regionName);
-    
-    BytesWritable[] results = region.get(row, column, timestamp, numVersions);
-    if(results != null) {
-      return results;
-    }
-    return null;
+      long timestamp, int numVersions)
+  throws IOException {
+    return getRegion(regionName).get(row, column, timestamp, numVersions);
   }
 
   /** Get all the columns (along with their names) for a given row. */
   public LabelledData[] getRow(Text regionName, Text row) throws IOException {
     HRegion region = getRegion(regionName);
-    
     TreeMap<Text, BytesWritable> map = region.getFull(row);
     LabelledData result[] = new LabelledData[map.size()];
     int counter = 0;
@@ -910,13 +879,44 @@
     }
   }
   
+  public LabelledData[] next(final long scannerId, final HStoreKey key)
+  throws IOException {
+    Text scannerName = new Text(String.valueOf(scannerId));
+    HInternalScannerInterface s = scanners.get(scannerName);
+    if (s == null) {
+      throw new UnknownScannerException("Name: " + scannerName + ", key " +
+        key);
+    }
+    leases.renewLease(scannerName, scannerName);
+    TreeMap<Text, BytesWritable> results = new TreeMap<Text, BytesWritable>();
+    ArrayList<LabelledData> values = new ArrayList<LabelledData>();
+    // Keep getting rows till we find one that has at least one non-deleted
+    // column value.
+    while (s.next(key, results)) {
+      for(Map.Entry<Text, BytesWritable> e: results.entrySet()) {
+        BytesWritable val = e.getValue();
+        if(val.getSize() == DELETE_BYTES.getSize()
+            && val.compareTo(DELETE_BYTES) == 0) {
+          // Column value is deleted. Don't return it.
+          continue;
+        }
+        values.add(new LabelledData(e.getKey(), val));
+      }
+      if (values.size() > 0) {
+        // Row has something in it. Let it out. Else go get another row.
+        break;
+      }
+      // Need to clear results before we go back up and call 'next' again.
+      results.clear();
+    }
+    return values.toArray(new LabelledData[values.size()]);
+  }
+
   public long startUpdate(Text regionName, long clientid, Text row) 
       throws IOException {
-    
     HRegion region = getRegion(regionName);
-    
     long lockid = region.startUpdate(row);
-    leases.createLease(new Text(String.valueOf(clientid)), 
+    this.leases.createLease(new Text(String.valueOf(clientid)), 
         new Text(String.valueOf(lockid)), 
         new RegionListener(region, lockid));
     
@@ -926,48 +926,36 @@
   /** Add something to the HBase. */
   public void put(Text regionName, long clientid, long lockid, Text column, 
       BytesWritable val) throws IOException {
-    
-    HRegion region = getRegion(regionName);
-    
+    HRegion region = getRegion(regionName, true);
     leases.renewLease(new Text(String.valueOf(clientid)), 
         new Text(String.valueOf(lockid)));
-    
     region.put(lockid, column, val);
   }
 
   /** Remove a cell from the HBase. */
   public void delete(Text regionName, long clientid, long lockid, Text column) 
       throws IOException {
-    
     HRegion region = getRegion(regionName);
-    
     leases.renewLease(new Text(String.valueOf(clientid)), 
         new Text(String.valueOf(lockid)));
-    
     region.delete(lockid, column);
   }
 
   /** Abandon the transaction */
   public void abort(Text regionName, long clientid, long lockid) 
       throws IOException {
-    
-    HRegion region = getRegion(regionName);
-    
+    HRegion region = getRegion(regionName, true);
     leases.cancelLease(new Text(String.valueOf(clientid)), 
         new Text(String.valueOf(lockid)));
-    
     region.abort(lockid);
   }
 
   /** Confirm the transaction */
   public void commit(Text regionName, long clientid, long lockid) 
-      throws IOException {
-    
-    HRegion region = getRegion(regionName);
-    
+  throws IOException {
+    HRegion region = getRegion(regionName, true);
     leases.cancelLease(new Text(String.valueOf(clientid)), 
         new Text(String.valueOf(lockid)));
-    
     region.commit(lockid);
   }
 
@@ -977,27 +965,55 @@
         new Text(String.valueOf(lockid)));
   }
 
-  /** Private utility method for safely obtaining an HRegion handle. */
-  private HRegion getRegion(Text regionName) throws NotServingRegionException {
-    this.lock.readLock().lock();
+  /** Private utility method for safely obtaining an HRegion handle.
+   * @param regionName Name of online {@link HRegion} to return
+   * @return {@link HRegion} for <code>regionName</code>
+   * @throws NotServingRegionException
+   */
+  private HRegion getRegion(final Text regionName)
+  throws NotServingRegionException {
+    return getRegion(regionName, false);
+  }
+  
+  /** Private utility method for safely obtaining an HRegion handle.
+   * @param regionName Name of online {@link HRegion} to return
+   * @param checkRetiringRegions Set true if we're to check retiring regions
+   * as well as online regions.
+   * @return {@link HRegion} for <code>regionName</code>
+   * @throws NotServingRegionException
+   */
+  private HRegion getRegion(final Text regionName,
+      final boolean checkRetiringRegions)
+  throws NotServingRegionException {
     HRegion region = null;
+    this.lock.readLock().lock();
     try {
-      region = regions.get(regionName);
+      region = onlineRegions.get(regionName);
+      if (region == null && checkRetiringRegions) {
+        region = this.retiringRegions.get(regionName);
+        if (LOG.isDebugEnabled()) {
+          if (region != null) {
+            LOG.debug("Found region " + regionName + " in retiringRegions");
+          }
+        }
+      }
+
+      if (region == null) {
+        throw new NotServingRegionException(regionName.toString());
+      }
+      
+      return region;
     } finally {
       this.lock.readLock().unlock();
     }
-
-    if(region == null) {
-      throw new NotServingRegionException(regionName.toString());
-    }
-    return region;
   }
 
   //////////////////////////////////////////////////////////////////////////////
   // remote scanner interface
   //////////////////////////////////////////////////////////////////////////////
 
-  private Map<Text, HInternalScannerInterface> scanners;
+  Map<Text, HInternalScannerInterface> scanners;
+  
   private class ScannerListener extends LeaseListener {
     private Text scannerName;
     
@@ -1006,6 +1022,7 @@
     }
     
     public void leaseExpired() {
+      LOG.info("Scanner " + scannerName + " lease expired");
       HInternalScannerInterface s = null;
       synchronized(scanners) {
         s = scanners.remove(scannerName);
@@ -1018,8 +1035,7 @@
   
   /** Start a scanner for a given HRegion. */
   public long openScanner(Text regionName, Text[] cols, Text firstRow)
-      throws IOException {
-
+  throws IOException {
     HRegion r = getRegion(regionName);
     long scannerId = -1L;
     try {
@@ -1029,8 +1045,8 @@
       synchronized(scanners) {
         scanners.put(scannerName, s);
       }
-      leases.createLease(scannerName, scannerName, new ScannerListener(scannerName));
-    
+      leases.createLease(scannerName, scannerName,
+        new ScannerListener(scannerName));
     } catch(IOException e) {
       LOG.error(e);
       throw e;
@@ -1038,38 +1054,6 @@
     return scannerId;
   }
   
-  public LabelledData[] next(long scannerId, HStoreKey key) throws IOException {
-    
-    Text scannerName = new Text(String.valueOf(scannerId));
-    HInternalScannerInterface s = scanners.get(scannerName);
-    if(s == null) {
-      throw new IOException("unknown scanner");
-    }
-    leases.renewLease(scannerName, scannerName);
-    TreeMap<Text, BytesWritable> results = new TreeMap<Text, BytesWritable>();
-    ArrayList<LabelledData> values = new ArrayList<LabelledData>();
-    if(s.next(key, results)) {
-      for(Iterator<Map.Entry<Text, BytesWritable>> it
-          = results.entrySet().iterator();
-          it.hasNext(); ) {
-        
-        Map.Entry<Text, BytesWritable> e = it.next();
-        BytesWritable val = e.getValue();
-        if(val.getSize() == DELETE_BYTES.getSize()
-            && val.compareTo(DELETE_BYTES) == 0) {
-            
-          // Value is deleted. Don't return a value
-          
-          continue;
-
-        } else {
-          values.add(new LabelledData(e.getKey(), val));
-        }
-      }
-    }
-    return values.toArray(new LabelledData[values.size()]);
-  }
-  
   public void close(long scannerId) throws IOException {
     Text scannerName = new Text(String.valueOf(scannerId));
     HInternalScannerInterface s = null;
@@ -1077,7 +1061,7 @@
       s = scanners.remove(scannerName);
     }
     if(s == null) {
-      throw new IOException("unknown scanner");
+      throw new UnknownScannerException(scannerName.toString());
     }
     s.close();
     leases.cancelLease(scannerName, scannerName);
@@ -1096,7 +1080,7 @@
     System.exit(0);
   }
   
-  public static void main(String [] args) throws IOException {
+  public static void main(String [] args) {
     if (args.length < 1) {
       printUsageAndExit();
     }

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java?view=diff&rev=544188&r1=544187&r2=544188
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java Mon Jun  4 10:14:10 2007
@@ -369,8 +369,8 @@
    * Compact the back-HStores.  This method may take some time, so the calling 
    * thread must be able to block for long periods.
    * 
-   * During this time, the HStore can work as usual, getting values from MapFiles
-   * and writing new MapFiles from given memcaches.
+   * During this time, the HStore can work as usual, getting values from
+   * MapFiles and writing new MapFiles from given memcaches.
    * 
    * Existing MapFiles are not destroyed until the new compacted TreeMap is 
    * completely written-out to disk.
@@ -410,8 +410,7 @@
         // Compute the max-sequenceID seen in any of the to-be-compacted TreeMaps
 
         long maxSeenSeqID = -1;
-        for(Iterator<HStoreFile> it = toCompactFiles.iterator(); it.hasNext(); ) {
-          HStoreFile hsf = it.next();
+        for (HStoreFile hsf: toCompactFiles) {
           long seqid = hsf.loadInfo(fs);
           if(seqid > 0) {
             if(seqid > maxSeenSeqID) {
@@ -587,7 +586,6 @@
             HStoreFile hsf = it.next();
             hsf.write(out);
           }
-          
         } finally {
           out.close();
         }
@@ -595,12 +593,7 @@
         // Indicate that we're done.
 
         Path doneFile = new Path(curCompactStore, COMPACTION_DONE);
-        out = new DataOutputStream(fs.create(doneFile));
-        
-        try {
-        } finally {
-          out.close();
-        }
+        (new DataOutputStream(fs.create(doneFile))).close();
 
         // Move the compaction into place.
 

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/Leases.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/Leases.java?view=diff&rev=544188&r1=544187&r2=544188
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/Leases.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/Leases.java Mon Jun  4 10:14:10 2007
@@ -91,7 +91,9 @@
         Lease lease = new Lease(holderId, resourceId, listener);
         Text leaseId = lease.getLeaseId();
         if(leases.get(leaseId) != null) {
-          throw new IOException("Impossible state for createLease(): Lease for holderId " + holderId + " and resourceId " + resourceId + " is still held.");
+          throw new IOException("Impossible state for createLease(): Lease " +
+            "for holderId " + holderId + " and resourceId " + resourceId +
+            " is still held.");
         }
         leases.put(leaseId, lease);
         sortedLeases.add(lease);
@@ -106,11 +108,10 @@
         Text leaseId = createLeaseId(holderId, resourceId);
         Lease lease = leases.get(leaseId);
         if(lease == null) {
-          
           // It's possible that someone tries to renew the lease, but 
           // it just expired a moment ago.  So fail.
-          
-          throw new IOException("Cannot renew lease is not held (holderId=" + holderId + ", resourceId=" + resourceId + ")");
+          throw new IOException("Cannot renew lease; not held (holderId=" +
+            holderId + ", resourceId=" + resourceId + ")");
         }
         
         sortedLeases.remove(lease);

Added: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/RegionNotFoundException.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/RegionNotFoundException.java?view=auto&rev=544188
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/RegionNotFoundException.java (added)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/RegionNotFoundException.java Mon Jun  4 10:14:10 2007
@@ -0,0 +1,30 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+
+public class RegionNotFoundException extends IOException {
+  private static final long serialVersionUID = 993179627856392526L;
+
+  public RegionNotFoundException() {
+    super();
+  }
+
+  public RegionNotFoundException(String s) {
+    super(s);
+  }
+}
\ No newline at end of file

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/RegionUnavailableListener.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/RegionUnavailableListener.java?view=diff&rev=544188&r1=544187&r2=544188
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/RegionUnavailableListener.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/RegionUnavailableListener.java Mon Jun  4 10:14:10 2007
@@ -19,9 +19,22 @@
 
 /**
  * Used as a callback mechanism so that an HRegion can notify the HRegionServer
- * when a region is about to be closed during a split operation. This is done
- * to minimize the amount of time the region is off-line.
+ * of the different stages making an HRegion unavailable.  Regions are made
+ * unavailable during region split operations.
  */
 public interface RegionUnavailableListener {
-  public void regionIsUnavailable(Text regionName);
+  /**
+   * <code>regionName</code> is closing.
+   * Listener should stop accepting new writes but can continue to service
+   * outstanding transactions.
+   * @param regionName
+   */
+  public void closing(final Text regionName);
+  
+  /**
+   * <code>regionName</code> is closed and no longer available.
+   * Listener should clean up any references to <code>regionName</code>
+   * @param regionName
+   */
+  public void closed(final Text regionName);
 }

Added: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/UnknownScannerException.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/UnknownScannerException.java?view=auto&rev=544188
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/UnknownScannerException.java (added)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/UnknownScannerException.java Mon Jun  4 10:14:10 2007
@@ -0,0 +1,30 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+
+public class UnknownScannerException extends IOException {
+  private static final long serialVersionUID = 993179627856392526L;
+
+  public UnknownScannerException() {
+    super();
+  }
+
+  public UnknownScannerException(String s) {
+    super(s);
+  }
+}
\ No newline at end of file

Added: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/WrongRegionException.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/WrongRegionException.java?view=auto&rev=544188
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/WrongRegionException.java (added)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/WrongRegionException.java Mon Jun  4 10:14:10 2007
@@ -0,0 +1,30 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+
+public class WrongRegionException extends IOException {
+  private static final long serialVersionUID = 993179627856392526L;
+
+  public WrongRegionException() {
+    super();
+  }
+
+  public WrongRegionException(String s) {
+    super(s);
+  }
+}
\ No newline at end of file

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/EvaluationClient.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/EvaluationClient.java?view=diff&rev=544188&r1=544187&r2=544188
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/EvaluationClient.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/EvaluationClient.java Mon Jun  4 10:14:10 2007
@@ -42,7 +42,7 @@
   private static final int ROW_LENGTH = 1024;
   
 
-  private static final int ONE_HUNDRED_MB = 1024 * 1024 * 1 /*100 RESTORE*/;
+  private static final int ONE_HUNDRED_MB = 1024 * 1024 * 100;
   private static final int ROWS_PER_100_MB = ONE_HUNDRED_MB / ROW_LENGTH;
   
   private static final int ONE_GB = ONE_HUNDRED_MB * 10;
@@ -62,7 +62,7 @@
     RANDOM_WRITE,
     SEQUENTIAL_READ,
     SEQUENTIAL_WRITE,
-    SCAN};
+    SCAN}
   
   private Random rand;
   private Configuration conf;
@@ -177,8 +177,8 @@
           test == Test.SCAN || test == Test.SEQUENTIAL_READ ||
           test == Test.SEQUENTIAL_WRITE) {
 
-        for(int range = 0; range < 10; range++) {
-          long elapsedTime = sequentialWrite(range * nRows, nRows);
+        for(int i = 0; i < 10; i++) {
+          long elapsedTime = sequentialWrite(i * nRows, nRows);
           if (test == Test.SEQUENTIAL_WRITE) {
             totalElapsedTime += elapsedTime;
           }
@@ -188,8 +188,8 @@
       switch(test) {
       
       case RANDOM_READ:
-        for(int range = 0 ; range < 10; range++) {
-          long elapsedTime = randomRead(range * nRows, nRows);
+        for(int i = 0 ; i < 10; i++) {
+          long elapsedTime = randomRead(i * nRows, nRows);
           totalElapsedTime += elapsedTime;
         }
         System.out.print("Random read of " + R + " rows completed in: ");
@@ -199,15 +199,15 @@
         throw new UnsupportedOperationException("Not yet implemented");
 
       case RANDOM_WRITE:
-        for(int range = 0 ; range < 10; range++) {
-          long elapsedTime = randomWrite(range * nRows, nRows);
+        for(int i = 0 ; i < 10; i++) {
+          long elapsedTime = randomWrite(i * nRows, nRows);
           totalElapsedTime += elapsedTime;
         }
         System.out.print("Random write of " + R + " rows completed in: ");
         break;
 
       case SCAN:
-        for(int range = 0 ; range < 10; range++) {
+        for(int i = 0 ; i < 10; i++) {
           long elapsedTime = scan(range * nRows, nRows);
           totalElapsedTime += elapsedTime;
         }
@@ -215,8 +215,8 @@
         break;
 
       case SEQUENTIAL_READ:
-        for(int range = 0 ; range < 10; range++) {
-          long elapsedTime = sequentialRead(range * nRows, nRows);
+        for(int i = 0 ; i < 10; i++) {
+          long elapsedTime = sequentialRead(i * nRows, nRows);
           totalElapsedTime += elapsedTime;
         }
         System.out.print("Sequential read of " + R + " rows completed in: ");
@@ -230,16 +230,16 @@
         throw new IllegalArgumentException("Invalid command value: " + test);
       }
       System.out.println((totalElapsedTime / 1000.0));
-
     } catch(Exception e) {
-      e.printStackTrace();
-      
+      LOG.error("Failed", e);
     } finally {
+      LOG.info("Deleting table " + tableDescriptor.getName());
       this.client.deleteTable(tableDescriptor.getName());
     }
   }
   
-  private void runOneTest(Test cmd) {
+  private void runOneTest(@SuppressWarnings("unused") Test cmd) {
+    // TODO
   }
   
   private void runTest(Test test) throws IOException {
@@ -302,6 +302,10 @@
     System.err.println("                 running: 1 <= value <= 500");
     System.err.println(" range           Integer. Required. 0 <= value <= " +
       "(nclients * 10) - 1");
+    System.err.println("Examples:");
+    System.err.println(" To run a single evaluation client:");
+    System.err.println(" $ bin/hbase " +
+      "org.apache.hadoop.hbase.EvaluationClient sequentialWrite 1 1");
   }
 
   private void getArgs(final int start, final String[] args) {

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java?view=diff&rev=544188&r1=544187&r2=544188
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/MiniHBaseCluster.java Mon Jun  4 10:14:10 2007
@@ -157,25 +157,20 @@
   public void shutdown() {
     LOG.info("Shutting down the HBase Cluster");
     for(int i = 0; i < regionServers.length; i++) {
-      try {
-        regionServers[i].stop();
-        
-      } catch(IOException e) {
-        e.printStackTrace();
-      }
+      regionServers[i].stop();
     }
     master.shutdown();
     for(int i = 0; i < regionServers.length; i++) {
       try {
         regionThreads[i].join();
-        
       } catch(InterruptedException e) {
+        // continue
       }
     }
     try {
       masterThread.join();
-      
     } catch(InterruptedException e) {
+      // continue
     }
     LOG.info("HBase Cluster shutdown complete");
 

Added: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestCompare.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestCompare.java?view=auto&rev=544188
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestCompare.java (added)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestCompare.java Mon Jun  4 10:14:10 2007
@@ -0,0 +1,33 @@
+package org.apache.hadoop.hbase;
+
+import org.apache.hadoop.io.Text;
+
+import junit.framework.TestCase;
+
+/**
+ * Test comparing HBase objects.
+ */
+public class TestCompare extends TestCase {
+  public void testHRegionInfo() {
+    HRegionInfo a = new HRegionInfo(1, new HTableDescriptor("a"), null, null);
+    HRegionInfo b = new HRegionInfo(2, new HTableDescriptor("b"), null, null);
+    assertTrue(a.compareTo(b) != 0);
+    HTableDescriptor t = new HTableDescriptor("t");
+    Text midway = new Text("midway");
+    a = new HRegionInfo(1, t, null, midway);
+    b = new HRegionInfo(2, t, midway, null);
+    assertTrue(a.compareTo(b) < 0);
+    assertTrue(b.compareTo(a) > 0);
+    assertEquals(a, a);
+    assertTrue(a.compareTo(a) == 0);
+    a = new HRegionInfo(1, t, new Text("a"), new Text("d"));
+    b = new HRegionInfo(2, t, new Text("e"), new Text("g"));
+    assertTrue(a.compareTo(b) < 0);
+    a = new HRegionInfo(1, t, new Text("aaaa"), new Text("dddd"));
+    b = new HRegionInfo(2, t, new Text("e"), new Text("g"));
+    assertTrue(a.compareTo(b) < 0);
+    a = new HRegionInfo(1, t, new Text("aaaa"), new Text("dddd"));
+    b = new HRegionInfo(2, t, new Text("aaaa"), new Text("eeee"));
+    assertTrue(a.compareTo(b) < 0);
+  }
+}