You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by br...@apache.org on 2008/02/15 01:29:08 UTC

svn commit: r627918 [1/3] - in /hadoop/hbase/trunk: ./ src/java/org/apache/hadoop/hbase/ src/java/org/apache/hadoop/hbase/client/ src/java/org/apache/hadoop/hbase/generated/master/ src/java/org/apache/hadoop/hbase/hql/ src/java/org/apache/hadoop/hbase/...

Author: bryanduxbury
Date: Thu Feb 14 16:29:04 2008
New Revision: 627918

URL: http://svn.apache.org/viewvc?rev=627918&view=rev
Log:
414 Move client classes into client package

Added:
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnection.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HTable.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestBatchUpdate.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestHTable.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestListTables.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/client/TestMultipleUpdates.java
Removed:
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HBaseAdmin.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HConnection.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HConnectionManager.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HTable.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestBatchUpdate.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestHTable.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestListTables.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestMultipleUpdates.java
Modified:
    hadoop/hbase/trunk/CHANGES.txt
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HAbstractScanner.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HMerge.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionServer.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/generated/master/master_jsp.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/hql/AlterCommand.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/hql/CommandFactory.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/hql/CreateCommand.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/hql/DeleteCommand.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/hql/DescCommand.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/hql/DisableCommand.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/hql/DropCommand.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/hql/EnableCommand.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/hql/InsertCommand.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/hql/SelectCommand.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/hql/ShowCommand.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/hql/TruncateCommand.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/hql/formatter/AsciiTableFormatter.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/LuceneDocumentWrapper.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/Dispatcher.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/GenericHandler.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/MetaHandler.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/ScannerHandler.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/rest/TableHandler.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/thrift/ThriftServer.java
    hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/util/Migrate.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/DFSAbort.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/DisabledTestScanner2.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/HBaseClusterTestCase.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/HBaseTestCase.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/MultiRegionTable.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/PerformanceEvaluation.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestBloomFilters.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestInfoServers.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestLogRolling.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestMasterAdmin.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestRegionServerExit.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestScannerAPI.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestTable.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/TestTimestamp.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/hql/TestHQL.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/mapred/TestTableIndex.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java
    hadoop/hbase/trunk/src/test/org/apache/hadoop/hbase/master/OOMEHMaster.java
    hadoop/hbase/trunk/src/webapps/master/master.jsp

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=627918&r1=627917&r2=627918&view=diff
==============================================================================
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Thu Feb 14 16:29:04 2008
@@ -51,7 +51,8 @@
    HBASE-407   Keep HRegionLocation information in LRU structure 
    HBASE-444   hbase is very slow at determining table is not present
    HBASE-438   XMLOutputter state should be initialized.
-
+   HBASE-414   Move client classes into client package
+   
 Branch 0.1
 
   INCOMPATIBLE CHANGES

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HAbstractScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HAbstractScanner.java?rev=627918&r1=627917&r2=627918&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HAbstractScanner.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HAbstractScanner.java Thu Feb 14 16:29:04 2008
@@ -277,4 +277,4 @@
     throw new UnsupportedOperationException("Unimplemented serverside. " +
       "next(HStoreKey, StortedMap(...) is more efficient");
   }
-}
\ No newline at end of file
+}

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HMerge.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HMerge.java?rev=627918&r1=627917&r2=627918&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HMerge.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HMerge.java Thu Feb 14 16:29:04 2008
@@ -34,7 +34,9 @@
 import org.apache.hadoop.io.Text;
 
 import org.apache.hadoop.hbase.io.BatchUpdate;
-
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.HConnectionManager;
 /** 
  * A non-instantiable class that has a static method capable of compacting
  * a table by merging adjacent regions that have grown too small.

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionServer.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionServer.java?rev=627918&r1=627917&r2=627918&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionServer.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/HRegionServer.java Thu Feb 14 16:29:04 2008
@@ -67,6 +67,7 @@
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.hbase.master.HMasterRegionInterface;
+import org.apache.hadoop.hbase.client.HTable;
 
 /**
  * HRegionServer makes a set of HRegions available to clients.  It checks in with

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/LocalHBaseCluster.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/LocalHBaseCluster.java?rev=627918&r1=627917&r2=627918&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/LocalHBaseCluster.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/LocalHBaseCluster.java Thu Feb 14 16:29:04 2008
@@ -30,6 +30,7 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
 
 /**
  * This class creates a single process HBase cluster. One thread is created for
@@ -308,4 +309,4 @@
     admin.createTable(new HTableDescriptor(cluster.getClass().getName()));
     cluster.shutdown();
   }
-}
\ No newline at end of file
+}

Added: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java?rev=627918&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java (added)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java Thu Feb 14 16:29:04 2008
@@ -0,0 +1,560 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.NoSuchElementException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.io.HbaseMapWritable;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Writables;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.hbase.master.HMasterInterface;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.MasterNotRunningException;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.TableExistsException;
+import org.apache.hadoop.hbase.RemoteExceptionHandler;
+import org.apache.hadoop.hbase.HRegionInterface;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.HStoreKey;
+
+/**
+ * Provides administrative functions for HBase
+ */
+public class HBaseAdmin implements HConstants {
+  protected final Log LOG = LogFactory.getLog(this.getClass().getName());
+
+  protected final HConnection connection;
+  protected final long pause;
+  protected final int numRetries;
+  protected volatile HMasterInterface master;
+  
+  /**
+   * Constructor
+   * 
+   * @param conf Configuration object
+   * @throws MasterNotRunningException
+   */
+  public HBaseAdmin(HBaseConfiguration conf) throws MasterNotRunningException {
+    this.connection = HConnectionManager.getConnection(conf);
+    this.pause = conf.getLong("hbase.client.pause", 30 * 1000);
+    this.numRetries = conf.getInt("hbase.client.retries.number", 5);
+    this.master = connection.getMaster();
+  }
+
+  /**
+   * @return proxy connection to master server for this instance
+   * @throws MasterNotRunningException
+   */
+  public HMasterInterface getMaster() throws MasterNotRunningException{
+    return this.connection.getMaster();
+  }
+  
+  /** @return - true if the master server is running */
+  public boolean isMasterRunning() {
+    return this.connection.isMasterRunning();
+  }
+
+  /**
+   * @param tableName Table to check.
+   * @return True if table exists already.
+   * @throws MasterNotRunningException
+   */
+  public boolean tableExists(final Text tableName) throws MasterNotRunningException {
+    if (this.master == null) {
+      throw new MasterNotRunningException("master has been shut down");
+    }
+    
+    return connection.tableExists(tableName);
+  }
+
+  /**
+   * List all the userspace tables.  In other words, scan the META table.
+   *
+   * If we wanted this to be really fast, we could implement a special
+   * catalog table that just contains table names and their descriptors.
+   * Right now, it only exists as part of the META table's region info.
+   *
+   * @return - returns an array of HTableDescriptors 
+   * @throws IOException
+   */
+  public HTableDescriptor[] listTables() throws IOException {
+    return this.connection.listTables();
+  }
+
+  /**
+   * Creates a new table
+   * 
+   * @param desc table descriptor for table
+   * 
+   * @throws IllegalArgumentException if the table name is reserved
+   * @throws MasterNotRunningException if master is not running
+   * @throws NoServerForRegionException if root region is not being served
+   * @throws TableExistsException if table already exists (If concurrent
+   * threads, the table may have been created between test-for-existence
+   * and attempt-at-creation).
+   * @throws IOException
+   */
+  public void createTable(HTableDescriptor desc)
+  throws IOException {
+    createTableAsync(desc);
+
+    for (int tries = 0; tries < numRetries; tries++) {
+      try {
+        // Wait for new table to come on-line
+        connection.locateRegion(desc.getName(), EMPTY_START_ROW);
+        break;
+        
+      } catch (TableNotFoundException e) {
+        if (tries == numRetries - 1) {
+          // Ran out of tries
+          throw e;
+        }
+      }
+      try {
+        Thread.sleep(pause);
+      } catch (InterruptedException e) {
+        // continue
+      }
+    }
+  }
+  
+  /**
+   * Creates a new table but does not block and wait for it to come online.
+   * 
+   * @param desc table descriptor for table
+   * 
+   * @throws IllegalArgumentException if the table name is reserved
+   * @throws MasterNotRunningException if master is not running
+   * @throws NoServerForRegionException if root region is not being served
+   * @throws TableExistsException if table already exists (If concurrent
+   * threads, the table may have been created between test-for-existence
+   * and attempt-at-creation).
+   * @throws IOException
+   */
+  public void createTableAsync(HTableDescriptor desc)
+  throws IOException {
+    if (this.master == null) {
+      throw new MasterNotRunningException("master has been shut down");
+    }
+    checkReservedTableName(desc.getName());
+    try {
+      this.master.createTable(desc);
+    } catch (RemoteException e) {
+      throw RemoteExceptionHandler.decodeRemoteException(e);
+    }
+  }
+
+  /**
+   * Deletes a table
+   * 
+   * @param tableName name of table to delete
+   * @throws IOException
+   */
+  public void deleteTable(Text tableName) throws IOException {
+    if (this.master == null) {
+      throw new MasterNotRunningException("master has been shut down");
+    }
+    
+    checkReservedTableName(tableName);
+    HRegionLocation firstMetaServer = getFirstMetaServerForTable(tableName);
+
+    try {
+      this.master.deleteTable(tableName);
+    } catch (RemoteException e) {
+      throw RemoteExceptionHandler.decodeRemoteException(e);
+    }
+
+    // Wait until first region is deleted
+    HRegionInterface server =
+      connection.getHRegionConnection(firstMetaServer.getServerAddress());
+    HRegionInfo info = new HRegionInfo();
+    for (int tries = 0; tries < numRetries; tries++) {
+      long scannerId = -1L;
+      try {
+        scannerId =
+          server.openScanner(firstMetaServer.getRegionInfo().getRegionName(),
+            COL_REGIONINFO_ARRAY, tableName, System.currentTimeMillis(), null);
+        HbaseMapWritable values = server.next(scannerId);
+        if (values == null || values.size() == 0) {
+          break;
+        }
+        boolean found = false;
+        for (Map.Entry<Writable, Writable> e: values.entrySet()) {
+          HStoreKey key = (HStoreKey) e.getKey();
+          if (key.getColumn().equals(COL_REGIONINFO)) {
+            info = (HRegionInfo) Writables.getWritable(
+                  ((ImmutableBytesWritable) e.getValue()).get(), info);
+            
+            if (info.getTableDesc().getName().equals(tableName)) {
+              found = true;
+            }
+          }
+        }
+        if (!found) {
+          break;
+        }
+
+      } catch (IOException ex) {
+        if(tries == numRetries - 1) {           // no more tries left
+          if (ex instanceof RemoteException) {
+            ex = RemoteExceptionHandler.decodeRemoteException((RemoteException) ex);
+          }
+          throw ex;
+        }
+
+      } finally {
+        if (scannerId != -1L) {
+          try {
+            server.close(scannerId);
+          } catch (Exception ex) {
+            LOG.warn(ex);
+          }
+        }
+      }
+
+      try {
+        Thread.sleep(pause);
+      } catch (InterruptedException e) {
+        // continue
+      }
+    }
+    LOG.info("table " + tableName + " deleted");
+  }
+
+  /**
+   * Brings a table on-line (enables it)
+   * 
+   * @param tableName name of the table
+   * @throws IOException
+   */
+  public void enableTable(Text tableName) throws IOException {
+    if (this.master == null) {
+      throw new MasterNotRunningException("master has been shut down");
+    }
+    
+    checkReservedTableName(tableName);
+    HRegionLocation firstMetaServer = getFirstMetaServerForTable(tableName);
+    
+    try {
+      this.master.enableTable(tableName);
+      
+    } catch (RemoteException e) {
+      throw RemoteExceptionHandler.decodeRemoteException(e);
+    }
+
+    // Wait until first region is enabled
+    
+    HRegionInterface server =
+      connection.getHRegionConnection(firstMetaServer.getServerAddress());
+
+    HRegionInfo info = new HRegionInfo();
+    for (int tries = 0; tries < numRetries; tries++) {
+      int valuesfound = 0;
+      long scannerId = -1L;
+      try {
+        scannerId =
+          server.openScanner(firstMetaServer.getRegionInfo().getRegionName(),
+            COL_REGIONINFO_ARRAY, tableName, System.currentTimeMillis(), null);
+        boolean isenabled = false;
+        
+        while (true) {
+          HbaseMapWritable values = server.next(scannerId);
+          if (values == null || values.size() == 0) {
+            if (valuesfound == 0) {
+              throw new NoSuchElementException(
+                  "table " + tableName + " not found");
+            }
+            break;
+          }
+          valuesfound += 1;
+          for (Map.Entry<Writable, Writable> e: values.entrySet()) {
+            HStoreKey key = (HStoreKey) e.getKey();
+            if (key.getColumn().equals(COL_REGIONINFO)) {
+              info = (HRegionInfo) Writables.getWritable(
+                    ((ImmutableBytesWritable) e.getValue()).get(), info);
+            
+              isenabled = !info.isOffline();
+              break;
+            }
+          }
+          if (isenabled) {
+            break;
+          }
+        }
+        if (isenabled) {
+          break;
+        }
+        
+      } catch (IOException e) {
+        if (tries == numRetries - 1) {                  // no more retries
+          if (e instanceof RemoteException) {
+            e = RemoteExceptionHandler.decodeRemoteException((RemoteException) e);
+          }
+          throw e;
+        }
+        
+      } finally {
+        if (scannerId != -1L) {
+          try {
+            server.close(scannerId);
+            
+          } catch (Exception e) {
+            LOG.warn(e);
+          }
+        }
+      }
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Sleep. Waiting for first region to be enabled from " +
+            tableName);
+      }
+      try {
+        Thread.sleep(pause);
+        
+      } catch (InterruptedException e) {
+        // continue
+      }
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Wake. Waiting for first region to be enabled from " +
+            tableName);
+      }
+    }
+    LOG.info("Enabled table " + tableName);
+  }
+
+  /**
+   * Disables a table (takes it off-line) If it is being served, the master
+   * will tell the servers to stop serving it.
+   * 
+   * @param tableName name of table
+   * @throws IOException
+   */
+  public void disableTable(Text tableName) throws IOException {
+    if (this.master == null) {
+      throw new MasterNotRunningException("master has been shut down");
+    }
+    
+    checkReservedTableName(tableName);
+    HRegionLocation firstMetaServer = getFirstMetaServerForTable(tableName);
+
+    try {
+      this.master.disableTable(tableName);
+      
+    } catch (RemoteException e) {
+      throw RemoteExceptionHandler.decodeRemoteException(e);
+    }
+
+    // Wait until first region is disabled
+    
+    HRegionInterface server =
+      connection.getHRegionConnection(firstMetaServer.getServerAddress());
+
+    HRegionInfo info = new HRegionInfo();
+    for(int tries = 0; tries < numRetries; tries++) {
+      int valuesfound = 0;
+      long scannerId = -1L;
+      try {
+        scannerId =
+          server.openScanner(firstMetaServer.getRegionInfo().getRegionName(),
+            COL_REGIONINFO_ARRAY, tableName, System.currentTimeMillis(), null);
+        
+        boolean disabled = false;
+        while (true) {
+          HbaseMapWritable values = server.next(scannerId);
+          if (values == null || values.size() == 0) {
+            if (valuesfound == 0) {
+              throw new NoSuchElementException("table " + tableName + " not found");
+            }
+            break;
+          }
+          valuesfound += 1;
+          for (Map.Entry<Writable, Writable> e: values.entrySet()) {
+            HStoreKey key = (HStoreKey) e.getKey();
+            if (key.getColumn().equals(COL_REGIONINFO)) {
+              info = (HRegionInfo) Writables.getWritable(
+                    ((ImmutableBytesWritable) e.getValue()).get(), info);
+            
+              disabled = info.isOffline();
+              break;
+            }
+          }
+          if (disabled) {
+            break;
+          }
+        }
+        if (disabled) {
+          break;
+        }
+        
+      } catch (IOException e) {
+        if (tries == numRetries - 1) {                  // no more retries
+          if (e instanceof RemoteException) {
+            e = RemoteExceptionHandler.decodeRemoteException((RemoteException) e);
+          }
+          throw e;
+        }
+        
+      } finally {
+        if (scannerId != -1L) {
+          try {
+            server.close(scannerId);
+            
+          } catch (Exception e) {
+            LOG.warn(e);
+          }
+        }
+      }
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Sleep. Waiting for first region to be disabled from " +
+            tableName);
+      }
+      try {
+        Thread.sleep(pause);
+      } catch (InterruptedException e) {
+        // continue
+      }
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Wake. Waiting for first region to be disabled from " +
+            tableName);
+      }
+    }
+    LOG.info("Disabled table " + tableName);
+  }
+  
+  /**
+   * Add a column to an existing table
+   * 
+   * @param tableName name of the table to add column to
+   * @param column column descriptor of column to be added
+   * @throws IOException
+   */
+  public void addColumn(Text tableName, HColumnDescriptor column)
+  throws IOException {
+    if (this.master == null) {
+      throw new MasterNotRunningException("master has been shut down");
+    }
+    
+    checkReservedTableName(tableName);
+    try {
+      this.master.addColumn(tableName, column);
+      
+    } catch (RemoteException e) {
+      throw RemoteExceptionHandler.decodeRemoteException(e);
+    }
+  }
+
+  /**
+   * Delete a column from a table
+   * 
+   * @param tableName name of table
+   * @param columnName name of column to be deleted
+   * @throws IOException
+   */
+  public void deleteColumn(Text tableName, Text columnName)
+  throws IOException {
+    if (this.master == null) {
+      throw new MasterNotRunningException("master has been shut down");
+    }
+    
+    checkReservedTableName(tableName);
+    try {
+      this.master.deleteColumn(tableName, columnName);
+      
+    } catch (RemoteException e) {
+      throw RemoteExceptionHandler.decodeRemoteException(e);
+    }
+  }
+
+  /**
+   * Modify an existing column family on a table
+   * 
+   * @param tableName name of table
+   * @param columnName name of column to be modified
+   * @param descriptor new column descriptor to use
+   * @throws IOException
+   */
+  public void modifyColumn(Text tableName, Text columnName, 
+    HColumnDescriptor descriptor)
+  throws IOException {
+    if (this.master == null) {
+      throw new MasterNotRunningException("master has been shut down");
+    }
+    
+    checkReservedTableName(tableName);
+    try {
+      this.master.modifyColumn(tableName, columnName, descriptor);
+      
+    } catch (RemoteException e) {
+      throw RemoteExceptionHandler.decodeRemoteException(e);
+    }
+  }
+
+  
+  /** 
+   * Shuts down the HBase instance 
+   * @throws IOException
+   */
+  public synchronized void shutdown() throws IOException {
+    if (this.master == null) {
+      throw new MasterNotRunningException("master has been shut down");
+    }
+    
+    try {
+      this.master.shutdown();
+    } catch (RemoteException e) {
+      throw RemoteExceptionHandler.decodeRemoteException(e);
+    } finally {
+      this.master = null;
+    }
+  }
+
+  /*
+   * Verifies that the specified table name is not a reserved name
+   * @param tableName - the table name to be checked
+   * @throws IllegalArgumentException - if the table name is reserved
+   */
+  protected void checkReservedTableName(Text tableName) {
+    if (tableName == null || tableName.getLength() <= 0) {
+      throw new IllegalArgumentException("Null or empty table name");
+    }
+    if(tableName.charAt(0) == '-' ||
+        tableName.charAt(0) == '.' ||
+        tableName.find(",") != -1) {
+      throw new IllegalArgumentException(tableName + " is a reserved table name");
+    }
+  }
+  
+  private HRegionLocation getFirstMetaServerForTable(Text tableName)
+  throws IOException {
+    Text tableKey = new Text(tableName.toString() + ",,99999999999999");
+    return connection.locateRegion(META_TABLE_NAME, tableKey);
+  }
+}

Added: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnection.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnection.java?rev=627918&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnection.java (added)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnection.java Thu Feb 14 16:29:04 2008
@@ -0,0 +1,94 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.io.IOException;
+import java.util.SortedMap;
+
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.hbase.master.HMasterInterface;
+import org.apache.hadoop.hbase.MasterNotRunningException;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.HServerAddress;
+import org.apache.hadoop.hbase.HRegionInterface;
+/**
+ * 
+ */
+public interface HConnection {
+  /**
+   * @return proxy connection to master server for this instance
+   * @throws MasterNotRunningException
+   */
+  public HMasterInterface getMaster() throws MasterNotRunningException;
+
+  /** @return - true if the master server is running */
+  public boolean isMasterRunning();
+  
+  /**
+   * Checks if <code>tableName</code> exists.
+   * @param tableName Table to check.
+   * @return True if table exists already.
+   */
+  public boolean tableExists(final Text tableName);
+  
+  /**
+   * List all the userspace tables.  In other words, scan the META table.
+   *
+   * If we wanted this to be really fast, we could implement a special
+   * catalog table that just contains table names and their descriptors.
+   * Right now, it only exists as part of the META table's region info.
+   *
+   * @return - returns an array of HTableDescriptors 
+   * @throws IOException
+   */
+  public HTableDescriptor[] listTables() throws IOException;
+  
+  /**
+   * Find the location of the region of <i>tableName</i> that <i>row</i>
+   * lives in.
+   * @param tableName name of the table <i>row</i> is in
+   * @param row row key you're trying to find the region of
+   * @return HRegionLocation that describes where to find the reigon in 
+   * question
+   */
+  public HRegionLocation locateRegion(Text tableName, Text row)
+  throws IOException;
+  
+  /**
+   * Find the location of the region of <i>tableName</i> that <i>row</i>
+   * lives in, ignoring any value that might be in the cache.
+   * @param tableName name of the table <i>row</i> is in
+   * @param row row key you're trying to find the region of
+   * @return HRegionLocation that describes where to find the reigon in 
+   * question
+   */
+  public HRegionLocation relocateRegion(Text tableName, Text row)
+  throws IOException;  
+  
+  /** 
+   * Establishes a connection to the region server at the specified address.
+   * @param regionServer - the server to connect to
+   * @return proxy for HRegionServer
+   * @throws IOException
+   */
+  public HRegionInterface getHRegionConnection(HServerAddress regionServer)
+  throws IOException;
+}

Added: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java?rev=627918&view=auto
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java (added)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java Thu Feb 14 16:29:04 2008
@@ -0,0 +1,763 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.SortedMap;
+import java.util.TreeMap;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.ipc.HbaseRPC;
+import org.apache.hadoop.hbase.util.Writables;
+import org.apache.hadoop.hbase.io.HbaseMapWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.hbase.master.HMasterInterface;
+import org.apache.hadoop.hbase.util.SoftSortedMap;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HRegionInterface;
+import org.apache.hadoop.hbase.MasterNotRunningException;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.HServerAddress;
+import org.apache.hadoop.hbase.LocalHBaseCluster;
+import org.apache.hadoop.hbase.HStoreKey;
+import org.apache.hadoop.hbase.HRegionInterface;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HStoreKey;
+import org.apache.hadoop.hbase.NoServerForRegionException;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.RemoteExceptionHandler;
+
+/**
+ * A non-instantiable class that manages connections to multiple tables in
+ * multiple HBase instances
+ */
+public class HConnectionManager implements HConstants {
+  /*
+   * Private. Not instantiable.
+   */
+  private HConnectionManager() {
+    super();
+  }
+  
+  // A Map of master HServerAddress -> connection information for that instance
+  // Note that although the Map is synchronized, the objects it contains
+  // are mutable and hence require synchronized access to them
+  
+  private static final Map<String, TableServers> HBASE_INSTANCES =
+    Collections.synchronizedMap(new HashMap<String, TableServers>());
+
+  /**
+   * Get the connection object for the instance specified by the configuration
+   * If no current connection exists, create a new connection for that instance
+   * @param conf
+   * @return HConnection object for the instance specified by the configuration
+   */
+  public static HConnection getConnection(HBaseConfiguration conf) {
+    TableServers connection;
+    synchronized (HBASE_INSTANCES) {
+      String instanceName = conf.get(HBASE_DIR);
+
+      connection = HBASE_INSTANCES.get(instanceName);
+
+      if (connection == null) {
+        connection = new TableServers(conf);
+        HBASE_INSTANCES.put(instanceName, connection);
+      }
+    }
+    return connection;
+  }
+  
+  /**
+   * Delete connection information for the instance specified by the configuration
+   * @param conf
+   */
+  public static void deleteConnection(HBaseConfiguration conf) {
+    synchronized (HBASE_INSTANCES) {
+      HBASE_INSTANCES.remove(conf.get(HBASE_DIR));
+    }
+  }
+  
+  /* Encapsulates finding the servers for an HBase instance */
+  private static class TableServers implements HConnection, HConstants {
+    private static final Log LOG = LogFactory.getLog(TableServers.class);
+    private final Class<? extends HRegionInterface> serverInterfaceClass;
+    private final long pause;
+    private final int numRetries;
+
+    private final Integer masterLock = new Integer(0);
+    private volatile boolean closed;
+    private volatile HMasterInterface master;
+    private volatile boolean masterChecked;
+    
+    private final Integer rootRegionLock = new Integer(0);
+    private final Integer metaRegionLock = new Integer(0);
+    private final Integer userRegionLock = new Integer(0);
+        
+    private volatile HBaseConfiguration conf;
+    
+    // Known region HServerAddress.toString() -> HRegionInterface 
+    private Map<String, HRegionInterface> servers;
+
+    private HRegionLocation rootRegionLocation; 
+    
+    private Map<Text, SoftSortedMap<Text, HRegionLocation>> 
+      cachedRegionLocations = new ConcurrentHashMap<Text, 
+        SoftSortedMap<Text, HRegionLocation>>();;
+    
+    /** 
+     * constructor
+     * @param conf Configuration object
+     */
+    @SuppressWarnings("unchecked")
+    public TableServers(HBaseConfiguration conf) {
+      this.conf = LocalHBaseCluster.doLocal(new HBaseConfiguration(conf));
+      
+      String serverClassName =
+        conf.get(REGION_SERVER_CLASS, DEFAULT_REGION_SERVER_CLASS);
+
+      this.closed = false;
+      
+      try {
+        this.serverInterfaceClass =
+          (Class<? extends HRegionInterface>) Class.forName(serverClassName);
+        
+      } catch (ClassNotFoundException e) {
+        throw new UnsupportedOperationException(
+            "Unable to find region server interface " + serverClassName, e);
+      }
+
+      this.pause = conf.getLong("hbase.client.pause", 30 * 1000);
+      this.numRetries = conf.getInt("hbase.client.retries.number", 5);
+      
+      this.master = null;
+      this.masterChecked = false;
+      this.servers = new ConcurrentHashMap<String, HRegionInterface>();
+    }
+    
+    /** {@inheritDoc} */
+    public HMasterInterface getMaster() throws MasterNotRunningException {
+      synchronized (this.masterLock) {
+        for (int tries = 0;
+          !this.closed &&
+          !this.masterChecked && this.master == null &&
+          tries < numRetries;
+        tries++) {
+          
+          HServerAddress masterLocation = new HServerAddress(this.conf.get(
+              MASTER_ADDRESS, DEFAULT_MASTER_ADDRESS));
+
+          try {
+            HMasterInterface tryMaster = (HMasterInterface)HbaseRPC.getProxy(
+                HMasterInterface.class, HMasterInterface.versionID, 
+                masterLocation.getInetSocketAddress(), this.conf);
+            
+            if (tryMaster.isMasterRunning()) {
+              this.master = tryMaster;
+              break;
+            }
+            
+          } catch (IOException e) {
+            if(tries == numRetries - 1) {
+              // This was our last chance - don't bother sleeping
+              break;
+            }
+            LOG.info("Attempt " + tries + " of " + this.numRetries +
+                " failed with <" + e + ">. Retrying after sleep of " + this.pause);
+          }
+
+          // We either cannot connect to master or it is not running. Sleep & retry
+          
+          try {
+            Thread.sleep(this.pause);
+          } catch (InterruptedException e) {
+            // continue
+          }
+        }
+        this.masterChecked = true;
+      }
+      if (this.master == null) {
+        throw new MasterNotRunningException();
+      }
+      return this.master;
+    }
+
+    /** {@inheritDoc} */
+    public boolean isMasterRunning() {
+      if (this.master == null) {
+        try {
+          getMaster();
+          
+        } catch (MasterNotRunningException e) {
+          return false;
+        }
+      }
+      return true;
+    }
+
+    /** {@inheritDoc} */
+    public boolean tableExists(final Text tableName) {
+      if (tableName == null) {
+        throw new IllegalArgumentException("Table name cannot be null");
+      }
+      if (tableName.equals(ROOT_TABLE_NAME) || tableName.equals(META_TABLE_NAME)) {
+        return true;
+      }
+      boolean exists = false;
+      try {
+        HTableDescriptor[] tables = listTables();
+        for (int i = 0; i < tables.length; i++) {
+          if (tables[i].getName().equals(tableName)) {
+            exists = true;
+          }
+        }
+      } catch (IOException e) {
+        LOG.warn("Testing for table existence threw exception", e);
+      }
+      return exists;
+    }
+
+    /** {@inheritDoc} */
+    public HTableDescriptor[] listTables() throws IOException {
+      HashSet<HTableDescriptor> uniqueTables = new HashSet<HTableDescriptor>();
+      long scannerId = -1L;
+      HRegionInterface server = null;
+      
+      Text startRow = EMPTY_START_ROW;
+      HRegionLocation metaLocation = null;
+
+      // scan over the each meta region
+      do {
+        try{
+          // turn the start row into a location
+          metaLocation = locateRegion(META_TABLE_NAME, startRow);
+
+          // connect to the server hosting the .META. region
+          server = getHRegionConnection(metaLocation.getServerAddress());
+
+          // open a scanner over the meta region
+          scannerId = server.openScanner(
+            metaLocation.getRegionInfo().getRegionName(),
+            COLUMN_FAMILY_ARRAY, startRow, LATEST_TIMESTAMP,
+            null);
+          
+          // iterate through the scanner, accumulating unique table names
+          while (true) {
+            HbaseMapWritable values = server.next(scannerId);
+            if (values == null || values.size() == 0) {
+              break;
+            }
+            for (Map.Entry<Writable, Writable> e: values.entrySet()) {
+              HStoreKey key = (HStoreKey) e.getKey();
+              if (key.getColumn().equals(COL_REGIONINFO)) {
+                HRegionInfo info = new HRegionInfo();
+                info = (HRegionInfo) Writables.getWritable(
+                    ((ImmutableBytesWritable) e.getValue()).get(), info);
+
+                // Only examine the rows where the startKey is zero length   
+                if (info.getStartKey().getLength() == 0) {
+                  uniqueTables.add(info.getTableDesc());
+                }
+              }
+            }
+          }
+          
+          server.close(scannerId);
+          scannerId = -1L;
+          
+          // advance the startRow to the end key of the current region
+          startRow = metaLocation.getRegionInfo().getEndKey();          
+        } catch (IOException e) {
+          // Retry once.
+          metaLocation = relocateRegion(META_TABLE_NAME, startRow);
+          continue;
+        }
+        finally {
+          if (scannerId != -1L) {
+            server.close(scannerId);
+          }
+        }
+      } while (startRow.compareTo(LAST_ROW) != 0);
+      
+      return uniqueTables.toArray(new HTableDescriptor[uniqueTables.size()]);
+    }
+
+    public HRegionLocation locateRegion(Text tableName, Text row)
+    throws IOException{
+      return locateRegion(tableName, row, true);
+    }
+
+    public HRegionLocation relocateRegion(Text tableName, Text row)
+    throws IOException{
+      return locateRegion(tableName, row, false);
+    }
+
+    private HRegionLocation locateRegion(Text tableName, Text row, 
+      boolean useCache)
+    throws IOException{
+      if (tableName == null || tableName.getLength() == 0) {
+        throw new IllegalArgumentException(
+            "table name cannot be null or zero length");
+      }
+            
+      if (tableName.equals(ROOT_TABLE_NAME)) {
+        synchronized (rootRegionLock) {
+          // This block guards against two threads trying to find the root
+          // region at the same time. One will go do the find while the 
+          // second waits. The second thread will not do find.
+          
+          if (!useCache || rootRegionLocation == null) {
+            return locateRootRegion();
+          }
+          return rootRegionLocation;
+        }        
+      } else if (tableName.equals(META_TABLE_NAME)) {
+        synchronized (metaRegionLock) {
+          // This block guards against two threads trying to load the meta 
+          // region at the same time. The first will load the meta region and
+          // the second will use the value that the first one found.
+
+          return locateRegionInMeta(ROOT_TABLE_NAME, tableName, row, useCache);
+        }
+      } else {
+        synchronized(userRegionLock){
+          return locateRegionInMeta(META_TABLE_NAME, tableName, row, useCache);
+        }
+      }
+    }
+
+    /**
+      * Convenience method for turning a MapWritable into the underlying
+      * SortedMap we all know and love.
+      */
+    private SortedMap<Text, byte[]> sortedMapFromMapWritable(
+      HbaseMapWritable writable) {
+      SortedMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
+      for (Map.Entry<Writable, Writable> e: writable.entrySet()) {
+        HStoreKey key = (HStoreKey) e.getKey();
+        results.put(key.getColumn(), 
+          ((ImmutableBytesWritable) e.getValue()).get());
+      }
+      
+      return results;
+    }
+
+    /**
+      * Search one of the meta tables (-ROOT- or .META.) for the HRegionLocation
+      * info that contains the table and row we're seeking.
+      */
+    private HRegionLocation locateRegionInMeta(Text parentTable,
+      Text tableName, Text row, boolean useCache)
+    throws IOException{
+      HRegionLocation location = null;
+      
+      // if we're supposed to be using the cache, then check it for a possible
+      // hit. otherwise, delete any existing cached location so it won't 
+      // interfere.
+      if (useCache) {
+        location = getCachedLocation(tableName, row);
+        if (location != null) {
+          return location;
+        }
+      } else {
+        deleteCachedLocation(tableName, row);
+      }
+
+      // build the key of the meta region we should be looking for.
+      // the extra 9's on the end are necessary to allow "exact" matches
+      // without knowing the precise region names.
+      Text metaKey = new Text(tableName.toString() + "," 
+        + row.toString() + ",999999999999999");
+
+      int tries = 0;
+      while (true) {
+        tries++;
+        
+        if (tries >= numRetries) {
+          throw new NoServerForRegionException("Unable to find region for " 
+            + row + " after " + numRetries + " tries.");
+        }
+
+        try{
+          // locate the root region
+          HRegionLocation metaLocation = locateRegion(parentTable, metaKey);
+          HRegionInterface server = 
+            getHRegionConnection(metaLocation.getServerAddress());
+
+          // query the root region for the location of the meta region
+          HbaseMapWritable regionInfoRow = server.getClosestRowBefore(
+            metaLocation.getRegionInfo().getRegionName(), 
+            metaKey, HConstants.LATEST_TIMESTAMP);
+
+          if (regionInfoRow == null) {
+            throw new TableNotFoundException("Table '" + tableName + 
+              "' does not exist.");
+          }
+
+          // convert the MapWritable into a Map we can use
+          SortedMap<Text, byte[]> results = 
+            sortedMapFromMapWritable(regionInfoRow);
+
+          byte[] bytes = results.get(COL_REGIONINFO);
+
+          if (bytes == null || bytes.length == 0) {
+            throw new IOException("HRegionInfo was null or empty in " + 
+              parentTable);
+          }
+
+          // convert the row result into the HRegionLocation we need!
+          HRegionInfo regionInfo = (HRegionInfo) Writables.getWritable(
+              results.get(COL_REGIONINFO), new HRegionInfo());
+
+          // possible we got a region of a different table...
+          if (!regionInfo.getTableDesc().getName().equals(tableName)) {
+            throw new TableNotFoundException(
+              "Table '" + tableName + "' was not found.");
+          }
+
+          if (regionInfo.isOffline()) {
+            throw new IllegalStateException("region offline: " + 
+              regionInfo.getRegionName());
+          }
+
+          String serverAddress = 
+            Writables.bytesToString(results.get(COL_SERVER));
+        
+          if (serverAddress.equals("")) { 
+            throw new NoServerForRegionException(
+              "No server address listed in " + parentTable + " for region "
+              + regionInfo.getRegionName());
+          }
+        
+          // instantiate the location
+          location = new HRegionLocation(regionInfo, 
+            new HServerAddress(serverAddress));
+      
+          cacheLocation(tableName, location);
+
+          return location;
+        } catch (IllegalStateException e) {
+          if (tries < numRetries - 1) {
+            if (LOG.isDebugEnabled()) {
+              LOG.debug("reloading table servers because: " + e.getMessage());
+            }
+            relocateRegion(parentTable, metaKey);
+          } else {
+            throw e;
+          }
+        } catch (TableNotFoundException e) {
+          // if we got this error, probably means the table just plain doesn't
+          // exist. rethrow the error immediately. this should always be coming
+          // from the HTable constructor.
+          throw e;
+        } catch (IOException e) {
+          if (e instanceof RemoteException) {
+            e = RemoteExceptionHandler.decodeRemoteException(
+                (RemoteException) e);
+          }
+          if (tries < numRetries - 1) {
+            if (LOG.isDebugEnabled()) {
+              LOG.debug("reloading table servers because: " + e.getMessage());
+            }
+            relocateRegion(parentTable, metaKey);
+          } else {
+            throw e;
+          }
+        }
+      
+        try{
+          Thread.sleep(pause);              
+        } catch (InterruptedException e){
+          // continue
+        }
+      }
+    }
+
+    /** 
+      * Search the cache for a location that fits our table and row key.
+      * Return null if no suitable region is located. TODO: synchronization note
+      */
+    private HRegionLocation getCachedLocation(Text tableName, Text row) {
+      // find the map of cached locations for this table
+      SoftSortedMap<Text, HRegionLocation> tableLocations = 
+        cachedRegionLocations.get(tableName);
+
+      // if tableLocations for this table isn't built yet, make one
+      if (tableLocations == null) {
+        tableLocations = new SoftSortedMap<Text, HRegionLocation>();
+        cachedRegionLocations.put(tableName, tableLocations);
+      }
+
+      // start to examine the cache. we can only do cache actions
+      // if there's something in the cache for this table.
+      if (!tableLocations.isEmpty()) {
+        if (tableLocations.containsKey(row)) {
+          HRegionLocation rl = tableLocations.get(row);
+          if (rl != null && LOG.isDebugEnabled()) {
+            LOG.debug("Cache hit in table locations for row <" +
+              row + "> and tableName " + tableName +
+              ": location server " + rl.getServerAddress() +
+              ", location region name " + rl.getRegionInfo().getRegionName());
+          }
+          return rl;
+        }
+        
+        // cut the cache so that we only get the part that could contain
+        // regions that match our key
+        SoftSortedMap<Text, HRegionLocation> matchingRegions =
+          tableLocations.headMap(row);
+
+        // if that portion of the map is empty, then we're done. otherwise,
+        // we need to examine the cached location to verify that it is 
+        // a match by end key as well.
+        if (!matchingRegions.isEmpty()) {
+          HRegionLocation possibleRegion = 
+            matchingRegions.get(matchingRegions.lastKey());
+                  
+          Text endKey = possibleRegion.getRegionInfo().getEndKey();
+          
+          // make sure that the end key is greater than the row we're looking 
+          // for, otherwise the row actually belongs in the next region, not 
+          // this one. the exception case is when the endkey is EMPTY_START_ROW,
+          // signifying that the region we're checking is actually the last 
+          // region in the table.
+          if (endKey.equals(EMPTY_TEXT) || endKey.compareTo(row) > 0) {
+            if (LOG.isDebugEnabled()) {
+              LOG.debug("Found possible location for " + row + ", " +
+                possibleRegion);
+            }
+            return possibleRegion;
+          }
+        }
+      }
+      
+      // passed all the way through, so we got nothin - complete cache miss
+      return null;
+    }
+
+    /**
+      * Delete a cached location, if it satisfies the table name and row
+      * requirements.
+      */
+    private void deleteCachedLocation(Text tableName, Text row){
+      // find the map of cached locations for this table
+      SoftSortedMap<Text, HRegionLocation> tableLocations = 
+        cachedRegionLocations.get(tableName);
+
+      // if tableLocations for this table isn't built yet, make one
+      if (tableLocations == null) {
+        tableLocations = new SoftSortedMap<Text, HRegionLocation>();
+        cachedRegionLocations.put(tableName, tableLocations);
+      }
+
+      // start to examine the cache. we can only do cache actions
+      // if there's something in the cache for this table.
+      if (!tableLocations.isEmpty()) {
+        // cut the cache so that we only get the part that could contain
+        // regions that match our key
+        SoftSortedMap<Text, HRegionLocation> matchingRegions =
+          tableLocations.headMap(row);
+
+        // if that portion of the map is empty, then we're done. otherwise,
+        // we need to examine the cached location to verify that it is 
+        // a match by end key as well.
+        if (!matchingRegions.isEmpty()) {
+          HRegionLocation possibleRegion = 
+            matchingRegions.get(matchingRegions.lastKey());
+          
+          Text endKey = possibleRegion.getRegionInfo().getEndKey();
+          
+          // by nature of the map, we know that the start key has to be < 
+          // otherwise it wouldn't be in the headMap. 
+          if (endKey.compareTo(row) <= 0) {
+            // delete any matching entry
+            HRegionLocation rl = 
+              tableLocations.remove(matchingRegions.lastKey());
+            if (rl != null && LOG.isDebugEnabled()) {
+              LOG.debug("Removed " + rl.getRegionInfo().getRegionName() +
+                " from cache because of " + row);
+            }
+          }
+        }
+      }
+    }
+
+    /**
+      * Put a newly discovered HRegionLocation into the cache.
+      */
+    private void cacheLocation(Text tableName, HRegionLocation location){
+      Text startKey = location.getRegionInfo().getStartKey();
+      
+      // find the map of cached locations for this table
+      SoftSortedMap<Text, HRegionLocation> tableLocations = 
+        cachedRegionLocations.get(tableName);
+
+      // if tableLocations for this table isn't built yet, make one
+      if (tableLocations == null) {
+        tableLocations = new SoftSortedMap<Text, HRegionLocation>();
+        cachedRegionLocations.put(tableName, tableLocations);
+      }
+      
+      // save the HRegionLocation under the startKey
+      tableLocations.put(startKey, location);
+    }
+    
+    /** {@inheritDoc} */
+    public HRegionInterface getHRegionConnection(
+      HServerAddress regionServer) 
+    throws IOException {
+
+      HRegionInterface server;
+      synchronized (this.servers) {
+        // See if we already have a connection
+        server = this.servers.get(regionServer.toString());
+
+        if (server == null) { // Get a connection
+          long versionId = 0;
+          try {
+            versionId =
+              serverInterfaceClass.getDeclaredField("versionID").getLong(server);
+          } catch (IllegalAccessException e) {
+            // Should never happen unless visibility of versionID changes
+            throw new UnsupportedOperationException(
+                "Unable to open a connection to a " +
+                serverInterfaceClass.getName() + " server.", e);
+          } catch (NoSuchFieldException e) {
+            // Should never happen unless versionID field name changes in HRegionInterface
+            throw new UnsupportedOperationException(
+                "Unable to open a connection to a " +
+                serverInterfaceClass.getName() + " server.", e);
+          }
+
+          try {
+            server = (HRegionInterface)HbaseRPC.waitForProxy(serverInterfaceClass,
+                versionId, regionServer.getInetSocketAddress(), this.conf);
+          } catch (RemoteException e) {
+            throw RemoteExceptionHandler.decodeRemoteException(e);
+          }
+          this.servers.put(regionServer.toString(), server);
+        }
+      }
+      return server;
+    }
+
+    /*
+     * Repeatedly try to find the root region by asking the master for where it is
+     * @return HRegionLocation for root region if found
+     * @throws NoServerForRegionException - if the root region can not be located
+     * after retrying
+     * @throws IOException 
+     */
+    private HRegionLocation locateRootRegion()
+    throws IOException {
+    
+      getMaster();
+      
+      HServerAddress rootRegionAddress = null;
+      
+      for (int tries = 0; tries < numRetries; tries++) {
+        int localTimeouts = 0;
+        
+        // ask the master which server has the root region
+        while (rootRegionAddress == null && localTimeouts < numRetries) {
+          rootRegionAddress = master.findRootRegion();
+          if (rootRegionAddress == null) {
+            try {
+              if (LOG.isDebugEnabled()) {
+                LOG.debug("Sleeping. Waiting for root region.");
+              }
+              Thread.sleep(pause);
+              if (LOG.isDebugEnabled()) {
+                LOG.debug("Wake. Retry finding root region.");
+              }
+            } catch (InterruptedException iex) {
+              // continue
+            }
+            localTimeouts++;
+          }
+        }
+        
+        if (rootRegionAddress == null) {
+          throw new NoServerForRegionException(
+              "Timed out trying to locate root region");
+        }
+        
+        // get a connection to the region server
+        HRegionInterface server = getHRegionConnection(rootRegionAddress);
+
+        try {
+          // if this works, then we're good, and we have an acceptable address,
+          // so we can stop doing retries and return the result.
+          server.getRegionInfo(HRegionInfo.rootRegionInfo.getRegionName());
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Found ROOT " + HRegionInfo.rootRegionInfo);
+          }
+          break;
+        } catch (IOException e) {
+          if (tries == numRetries - 1) {
+            // Don't bother sleeping. We've run out of retries.
+            if (e instanceof RemoteException) {
+              e = RemoteExceptionHandler.decodeRemoteException(
+                  (RemoteException) e);
+            }
+            throw e;
+          }
+          
+          // Sleep and retry finding root region.
+          try {
+            if (LOG.isDebugEnabled()) {
+              LOG.debug("Root region location changed. Sleeping.");
+            }
+            Thread.sleep(pause);
+            if (LOG.isDebugEnabled()) {
+              LOG.debug("Wake. Retry finding root region.");
+            }
+          } catch (InterruptedException iex) {
+            // continue
+          }
+        }
+        
+        rootRegionAddress = null;
+      }
+      
+      // if the address is null by this point, then the retries have failed,
+      // and we're sort of sunk
+      if (rootRegionAddress == null) {
+        throw new NoServerForRegionException(
+          "unable to locate root region server");
+      }
+      
+      // return the region location
+      return new HRegionLocation(
+        HRegionInfo.rootRegionInfo, rootRegionAddress);
+    }
+  }
+}