You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2008/05/16 00:10:50 UTC

svn commit: r656868 [4/10] - in /hadoop/hbase/trunk: ./ src/java/org/apache/hadoop/hbase/ src/java/org/apache/hadoop/hbase/client/ src/java/org/apache/hadoop/hbase/filter/ src/java/org/apache/hadoop/hbase/hql/ src/java/org/apache/hadoop/hbase/io/ src/j...

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableSplit.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableSplit.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableSplit.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/mapred/TableSplit.java Thu May 15 15:10:47 2008
@@ -23,22 +23,22 @@
 import java.io.DataOutput;
 import java.io.IOException;
 
-import org.apache.hadoop.io.Text;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.mapred.InputSplit;
 
 /**
  * A table split corresponds to a key range [low, high)
  */
 public class TableSplit implements InputSplit {
-  private Text m_tableName;
-  private Text m_startRow;
-  private Text m_endRow;
+  private byte [] m_tableName;
+  private byte [] m_startRow;
+  private byte [] m_endRow;
 
   /** default constructor */
   public TableSplit() {
-    m_tableName = new Text();
-    m_startRow = new Text();
-    m_endRow = new Text();
+    this(HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY,
+      HConstants.EMPTY_BYTE_ARRAY);
   }
 
   /**
@@ -47,25 +47,24 @@
    * @param startRow
    * @param endRow
    */
-  public TableSplit(Text tableName, Text startRow, Text endRow) {
-    this();
-    m_tableName.set(tableName);
-    m_startRow.set(startRow);
-    m_endRow.set(endRow);
+  public TableSplit(byte [] tableName, byte [] startRow, byte [] endRow) {
+    m_tableName = tableName;
+    m_startRow = startRow;
+    m_endRow = endRow;
   }
 
   /** @return table name */
-  public Text getTableName() {
+  public byte [] getTableName() {
     return m_tableName;
   }
 
   /** @return starting row key */
-  public Text getStartRow() {
+  public byte [] getStartRow() {
     return m_startRow;
   }
 
   /** @return end row key */
-  public Text getEndRow() {
+  public byte [] getEndRow() {
     return m_endRow;
   }
 
@@ -83,21 +82,22 @@
 
   /** {@inheritDoc} */
   public void readFields(DataInput in) throws IOException {
-    m_tableName.readFields(in);
-    m_startRow.readFields(in);
-    m_endRow.readFields(in);
+    this.m_tableName = Bytes.readByteArray(in);
+    this.m_startRow = Bytes.readByteArray(in);
+    this.m_endRow = Bytes.readByteArray(in);
   }
 
   /** {@inheritDoc} */
   public void write(DataOutput out) throws IOException {
-    m_tableName.write(out);
-    m_startRow.write(out);
-    m_endRow.write(out);
+    Bytes.writeByteArray(out, this.m_tableName);
+    Bytes.writeByteArray(out, this.m_startRow);
+    Bytes.writeByteArray(out, this.m_endRow);
   }
 
   /** {@inheritDoc} */
   @Override
   public String toString() {
-    return m_tableName +"," + m_startRow + "," + m_endRow;
+    return Bytes.toString(m_tableName) +"," + Bytes.toString(m_startRow) +
+      "," + Bytes.toString(m_endRow);
   }
-}
+}
\ No newline at end of file

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/AddColumn.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/AddColumn.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/AddColumn.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/AddColumn.java Thu May 15 15:10:47 2008
@@ -24,13 +24,12 @@
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ipc.HRegionInterface;
-import org.apache.hadoop.io.Text;
 
 /** Instantiated to add a column family to a table */
 class AddColumn extends ColumnOperation {
   private final HColumnDescriptor newColumn;
 
-  AddColumn(final HMaster master, final Text tableName, 
+  AddColumn(final HMaster master, final byte [] tableName, 
     final HColumnDescriptor newColumn) 
   throws IOException {
     super(master, tableName);
@@ -48,4 +47,4 @@
       updateRegionInfo(server, m.getRegionName(), i);
     }
   }
-}
+}
\ No newline at end of file

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/BaseScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/BaseScanner.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/BaseScanner.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/BaseScanner.java Thu May 15 15:10:47 2008
@@ -35,8 +35,8 @@
 import org.apache.hadoop.hbase.Chore;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HServerInfo;
-import org.apache.hadoop.io.Text;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Writables;
 import org.apache.hadoop.hbase.RemoteExceptionHandler;
 import org.apache.hadoop.hbase.UnknownScannerException;
@@ -102,7 +102,7 @@
 abstract class BaseScanner extends Chore implements HConstants {
   static final Log LOG = LogFactory.getLog(BaseScanner.class.getName());
     
-  protected final boolean rootRegion;
+  private final boolean rootRegion;
   protected final HMaster master;
   protected final RegionManager regionManager;
   
@@ -153,26 +153,22 @@
     // scan we go check if parents can be removed.
     Map<HRegionInfo, RowResult> splitParents =
       new HashMap<HRegionInfo, RowResult>();
-    List<Text> emptyRows = new ArrayList<Text>();
+    List<byte []> emptyRows = new ArrayList<byte []>();
     try {
       regionServer = master.connection.getHRegionConnection(region.getServer());
-      scannerId =
-        regionServer.openScanner(region.getRegionName(), COLUMN_FAMILY_ARRAY,
-            EMPTY_START_ROW, HConstants.LATEST_TIMESTAMP, null);
-
+      scannerId = regionServer.openScanner(region.getRegionName(),
+        COLUMN_FAMILY_ARRAY, EMPTY_START_ROW, HConstants.LATEST_TIMESTAMP, null);
       int numberOfRegionsFound = 0;
       while (true) {
         RowResult values = regionServer.next(scannerId);
         if (values == null || values.size() == 0) {
           break;
         }
-
         HRegionInfo info = master.getHRegionInfo(values.getRow(), values);
         if (info == null) {
           emptyRows.add(values.getRow());
           continue;
         }
-
         String serverName = Writables.cellToString(values.get(COL_SERVER));
         long startCode = Writables.cellToLong(values.get(COL_STARTCODE));
         if (LOG.isDebugEnabled()) {
@@ -216,11 +212,9 @@
     // Scan is finished.
     
     // First clean up any meta region rows which had null HRegionInfos
-    
     if (emptyRows.size() > 0) {
-      LOG.warn("Found " + emptyRows.size() +
-          " rows with empty HRegionInfo while scanning meta region " +
-          region.getRegionName());
+      LOG.warn("Found " + emptyRows.size() + " rows with empty HRegionInfo " +
+        "while scanning meta region " + Bytes.toString(region.getRegionName()));
       master.deleteEmptyMetaRows(regionServer, region.getRegionName(),
           emptyRows);
     }
@@ -262,7 +256,7 @@
    * the filesystem.
    * @throws IOException
    */
-  private boolean cleanupSplits(final Text metaRegionName, 
+  private boolean cleanupSplits(final byte [] metaRegionName, 
     final HRegionInterface srvr, final HRegionInfo parent,
     RowResult rowContent)
   throws IOException {
@@ -302,9 +296,9 @@
    * @return True if still has references to parent.
    * @throws IOException
    */
-  private boolean hasReferences(final Text metaRegionName, 
-    final HRegionInterface srvr, final Text parent,
-    RowResult rowContent, final Text splitColumn)
+  private boolean hasReferences(final byte [] metaRegionName, 
+    final HRegionInterface srvr, final byte [] parent,
+    RowResult rowContent, final byte [] splitColumn)
   throws IOException {
     boolean result = false;
     HRegionInfo split =
@@ -314,9 +308,9 @@
     }
     Path tabledir =
       HTableDescriptor.getTableDir(master.rootdir, split.getTableDesc().getName());
-    for (HColumnDescriptor family: split.getTableDesc().families().values()) {
+    for (HColumnDescriptor family: split.getTableDesc().getFamilies()) {
       Path p = HStoreFile.getMapDir(tabledir, split.getEncodedName(),
-          family.getFamilyName());
+        family.getName());
 
       // Look for reference files.  Call listStatus with an anonymous
       // instance of PathFilter.
@@ -390,8 +384,9 @@
         && (storedInfo == null || storedInfo.getStartCode() != startCode)) {
       // The current assignment is invalid
       if (LOG.isDebugEnabled()) {
-        LOG.debug("Current assignment of " + info.getRegionName() +
-          " is not valid: storedInfo: " + storedInfo + ", startCode: " +
+        LOG.debug("Current assignment of " +
+          Bytes.toString(info.getRegionName()) +
+          " is not valid: serverInfo: " + storedInfo + ", passed startCode: " +
           startCode + ", storedInfo.startCode: " +
           ((storedInfo != null)? storedInfo.getStartCode(): -1) +
           ", unassignedRegions: " + 

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ChangeTableState.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ChangeTableState.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ChangeTableState.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ChangeTableState.java Thu May 15 15:10:47 2008
@@ -23,12 +23,13 @@
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
+import java.util.TreeMap;
 
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ipc.HRegionInterface;
 import org.apache.hadoop.hbase.io.BatchUpdate;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Writables;
-import org.apache.hadoop.io.Text;
 
 /** Instantiated to enable or disable a table */
 class ChangeTableState extends TableOperation {
@@ -39,7 +40,7 @@
   
   protected long lockid;
 
-  ChangeTableState(final HMaster master, final Text tableName, 
+  ChangeTableState(final HMaster master, final byte [] tableName, 
     final boolean onLine) 
   throws IOException {
     super(master, tableName);
@@ -118,9 +119,8 @@
 
       // Cause regions being served to be taken off-line and disabled
 
-      HashMap<Text, HRegionInfo> localKillList =
-        new HashMap<Text, HRegionInfo>();
-
+      Map<byte [], HRegionInfo> localKillList =
+        new TreeMap<byte [], HRegionInfo>(Bytes.BYTES_COMPARATOR);
       for (HRegionInfo i: e.getValue()) {
         if (LOG.isDebugEnabled()) {
           LOG.debug("adding region " + i.getRegionName() + " to kill list");
@@ -130,7 +130,7 @@
         // this marks the regions to be offlined once they are closed
         master.regionManager.markRegionForOffline(i.getRegionName());
       }
-      Map<Text, HRegionInfo> killedRegions = 
+      Map<byte [], HRegionInfo> killedRegions = 
         master.regionManager.removeMarkedToClose(serverName);
       if (killedRegions != null) {
         localKillList.putAll(killedRegions);

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ColumnOperation.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ColumnOperation.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ColumnOperation.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ColumnOperation.java Thu May 15 15:10:47 2008
@@ -20,7 +20,6 @@
 package org.apache.hadoop.hbase.master;
 
 import java.io.IOException;
-import org.apache.hadoop.io.Text;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ipc.HRegionInterface;
 import org.apache.hadoop.hbase.io.BatchUpdate;
@@ -29,7 +28,7 @@
 
 abstract class ColumnOperation extends TableOperation {
   
-  protected ColumnOperation(final HMaster master, final Text tableName) 
+  protected ColumnOperation(final HMaster master, final byte [] tableName) 
   throws IOException {
     super(master, tableName);
   }
@@ -44,7 +43,7 @@
     }
   }
 
-  protected void updateRegionInfo(HRegionInterface server, Text regionName,
+  protected void updateRegionInfo(HRegionInterface server, byte [] regionName,
     HRegionInfo i) 
   throws IOException {
     BatchUpdate b = new BatchUpdate(i.getRegionName());

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/DeleteColumn.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/DeleteColumn.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/DeleteColumn.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/DeleteColumn.java Thu May 15 15:10:47 2008
@@ -21,7 +21,6 @@
 
 import java.io.IOException;
 
-import org.apache.hadoop.io.Text;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.regionserver.HStoreFile;
@@ -29,10 +28,10 @@
 
 /** Instantiated to remove a column family from a table */
 class DeleteColumn extends ColumnOperation {
-  private final Text columnName;
+  private final byte [] columnName;
 
-  DeleteColumn(final HMaster master, final Text tableName, 
-    final Text columnName) 
+  DeleteColumn(final HMaster master, final byte [] tableName, 
+    final byte [] columnName) 
   throws IOException {
     super(master, tableName);
     this.columnName = columnName;
@@ -43,16 +42,14 @@
   throws IOException {
     Path tabledir = new Path(this.master.rootdir, tableName.toString());
     for (HRegionInfo i: unservedRegions) {
-      i.getTableDesc().families().remove(columnName);
+      i.getTableDesc().removeFamily(columnName);
       updateRegionInfo(server, m.getRegionName(), i);
-
       // Delete the directories used by the column
-
-      String encodedName = i.getEncodedName();
+      int encodedName = i.getEncodedName();
       this.master.fs.delete(
         HStoreFile.getMapDir(tabledir, encodedName, columnName));
       this.master.fs.delete(
         HStoreFile.getInfoDir(tabledir, encodedName, columnName));
     }
   }
-}
+}
\ No newline at end of file

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/HMaster.java Thu May 15 15:10:47 2008
@@ -36,41 +36,39 @@
 import org.apache.hadoop.dfs.FSConstants;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.io.Cell;
-import org.apache.hadoop.hbase.io.RowResult;
-import org.apache.hadoop.hbase.ipc.HbaseRPC;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.InfoServer;
-import org.apache.hadoop.hbase.util.Sleeper;
-import org.apache.hadoop.hbase.util.Writables;
-import org.apache.hadoop.hbase.io.HbaseMapWritable;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.ipc.Server;
-
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HStoreKey;
-import org.apache.hadoop.hbase.HServerAddress;
 import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HMsg;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HServerAddress;
+import org.apache.hadoop.hbase.HServerInfo;
 import org.apache.hadoop.hbase.HServerLoad;
-import org.apache.hadoop.hbase.RemoteExceptionHandler;
-import org.apache.hadoop.hbase.HMsg;
+import org.apache.hadoop.hbase.HStoreKey;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.LocalHBaseCluster;
-import org.apache.hadoop.hbase.HServerInfo;
-import org.apache.hadoop.hbase.TableExistsException;
 import org.apache.hadoop.hbase.MasterNotRunningException;
-
+import org.apache.hadoop.hbase.RemoteExceptionHandler;
+import org.apache.hadoop.hbase.TableExistsException;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.HConnectionManager;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-
-import org.apache.hadoop.hbase.ipc.HRegionInterface;
+import org.apache.hadoop.hbase.io.Cell;
+import org.apache.hadoop.hbase.io.RowResult;
 import org.apache.hadoop.hbase.ipc.HMasterInterface;
 import org.apache.hadoop.hbase.ipc.HMasterRegionInterface;
+import org.apache.hadoop.hbase.ipc.HRegionInterface;
+import org.apache.hadoop.hbase.ipc.HbaseRPC;
 import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.InfoServer;
+import org.apache.hadoop.hbase.util.Sleeper;
+import org.apache.hadoop.hbase.util.Writables;
+import org.apache.hadoop.io.MapWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.ipc.Server;
 
 /**
  * HMaster is the "master server" for a HBase.
@@ -193,12 +191,12 @@
     this.conf.set(HConstants.HBASE_DIR, this.rootdir.toString());
     this.rand = new Random();
     Path rootRegionDir =
-      HRegion.getRegionDir(rootdir, HRegionInfo.rootRegionInfo);
+      HRegion.getRegionDir(rootdir, HRegionInfo.ROOT_REGIONINFO);
     LOG.info("Root region dir: " + rootRegionDir.toString());
 
     try {
       // Make sure the root directory exists!
-      if(! fs.exists(rootdir)) {
+      if (!fs.exists(rootdir)) {
         fs.mkdirs(rootdir); 
         FSUtils.setVersion(fs, rootdir);
       } else {
@@ -206,11 +204,11 @@
       }
 
       if (!fs.exists(rootRegionDir)) {
-        LOG.info("bootstrap: creating ROOT and first META regions");
+        LOG.info("BOOTSTRAP: creating ROOT and first META regions");
         try {
-          HRegion root = HRegion.createHRegion(HRegionInfo.rootRegionInfo,
-              this.rootdir, this.conf);
-          HRegion meta = HRegion.createHRegion(HRegionInfo.firstMetaRegionInfo,
+          HRegion root = HRegion.createHRegion(HRegionInfo.ROOT_REGIONINFO,
+            this.rootdir, this.conf);
+          HRegion meta = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO,
             this.rootdir, this.conf);
 
           // Add first region from the META table to the ROOT region.
@@ -328,7 +326,7 @@
   /**
    * @return Read-only map of online regions.
    */
-  public Map<Text, MetaRegion> getOnlineMetaRegions() {
+  public Map<byte [], MetaRegion> getOnlineMetaRegions() {
     return regionManager.getOnlineMetaRegions();
   }
 
@@ -501,9 +499,6 @@
     if (LOG.isDebugEnabled()) {
       LOG.debug("Started service threads");
     }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Started service threads");
-    }
   }
 
   /*
@@ -526,11 +521,10 @@
 
   /** {@inheritDoc} */
   @SuppressWarnings("unused")
-  public HbaseMapWritable regionServerStartup(HServerInfo serverInfo)
+  public MapWritable regionServerStartup(HServerInfo serverInfo)
   throws IOException {
     // register with server manager
     serverManager.regionServerStartup(serverInfo);
-    
     // send back some config info
     return createConfigurationSubset();
   }
@@ -539,12 +533,12 @@
    * @return Subset of configuration to pass initializing regionservers: e.g.
    * the filesystem to use and root directory to use.
    */
-  protected HbaseMapWritable createConfigurationSubset() {
-    HbaseMapWritable mw = addConfig(new HbaseMapWritable(), HConstants.HBASE_DIR);
+  protected MapWritable createConfigurationSubset() {
+    MapWritable mw = addConfig(new MapWritable(), HConstants.HBASE_DIR);
     return addConfig(mw, "fs.default.name");
   }
 
-  private HbaseMapWritable addConfig(final HbaseMapWritable mw, final String key) {
+  private MapWritable addConfig(final MapWritable mw, final String key) {
     mw.put(new Text(key), new Text(this.conf.get(key)));
     return mw;
   }
@@ -588,7 +582,7 @@
           break;
         }
         createTable(newRegion);
-        LOG.info("created table " + desc.getName());
+        LOG.info("created table " + desc.getNameAsString());
         break;
       } catch (TableExistsException e) {
         throw e;
@@ -603,14 +597,14 @@
 
   private synchronized void createTable(final HRegionInfo newRegion) 
   throws IOException {
-    Text tableName = newRegion.getTableDesc().getName();
+    byte [] tableName = newRegion.getTableDesc().getName();
     // 1. Check to see if table already exists. Get meta region where
     // table would sit should it exist. Open scanner on it. If a region
     // for the table we want to create already exists, then table already
     // created. Throw already-exists exception.
     MetaRegion m = regionManager.getFirstMetaRegionForRegion(newRegion);
         
-    Text metaRegionName = m.getRegionName();
+    byte [] metaRegionName = m.getRegionName();
     HRegionInterface srvr = connection.getHRegionConnection(m.getServer());
     long scannerid = srvr.openScanner(metaRegionName, COL_REGIONINFO_ARRAY,
       tableName, LATEST_TIMESTAMP, null);
@@ -621,51 +615,50 @@
       // does not exist, scanner will return row after where our table would
       // be inserted if it exists so look for exact match on table name.            
       if (data != null && data.size() > 0) {
-        if (HRegionInfo.getTableNameFromRegionName(
-          data.getRow()).equals(tableName)) {
+        byte [] tn = HRegionInfo.getTableNameFromRegionName(data.getRow());
+        if (Bytes.equals(tn, tableName)) {
           // Then a region for this table already exists. Ergo table exists.
-          throw new TableExistsException(tableName.toString());
+          throw new TableExistsException(Bytes.toString(tableName));
         }
       }
     } finally {
       srvr.close(scannerid);
     }
-
     regionManager.createRegion(newRegion, srvr, metaRegionName);
   }
 
   /** {@inheritDoc} */
-  public void deleteTable(Text tableName) throws IOException {
+  public void deleteTable(final byte [] tableName) throws IOException {
     new TableDelete(this, tableName).process();
-    LOG.info("deleted table: " + tableName);
+    LOG.info("deleted table: " + Bytes.toString(tableName));
   }
 
   /** {@inheritDoc} */
-  public void addColumn(Text tableName, HColumnDescriptor column)
+  public void addColumn(byte [] tableName, HColumnDescriptor column)
   throws IOException {    
     new AddColumn(this, tableName, column).process();
   }
 
   /** {@inheritDoc} */
-  public void modifyColumn(Text tableName, Text columnName, 
+  public void modifyColumn(byte [] tableName, byte [] columnName, 
     HColumnDescriptor descriptor)
   throws IOException {
     new ModifyColumn(this, tableName, columnName, descriptor).process();
   }
 
   /** {@inheritDoc} */
-  public void deleteColumn(Text tableName, Text columnName) throws IOException {
-    new DeleteColumn(this, tableName, 
-      HStoreKey.extractFamily(columnName)).process();
+  public void deleteColumn(final byte [] tableName, final byte [] c)
+  throws IOException {
+    new DeleteColumn(this, tableName, HStoreKey.getFamily(c)).process();
   }
 
   /** {@inheritDoc} */
-  public void enableTable(Text tableName) throws IOException {
+  public void enableTable(final byte [] tableName) throws IOException {
     new ChangeTableState(this, tableName, true).process();
   }
 
   /** {@inheritDoc} */
-  public void disableTable(Text tableName) throws IOException {
+  public void disableTable(final byte [] tableName) throws IOException {
     new ChangeTableState(this, tableName, false).process();
   }
 
@@ -694,33 +687,41 @@
    * @return Null or found HRegionInfo.
    * @throws IOException
    */
-  HRegionInfo getHRegionInfo(final Text row, final Map<Text, Cell> map)
+  HRegionInfo getHRegionInfo(final byte [] row, final Map<byte [], Cell> map)
   throws IOException {
     Cell regioninfo = map.get(COL_REGIONINFO);
     if (regioninfo == null) {
-      LOG.warn(COL_REGIONINFO.toString() + " is empty for row: " + row +
-          "; has keys: " + map.keySet().toString());
+      StringBuilder sb =  new StringBuilder();
+      for (byte [] e: map.keySet()) {
+        if (sb.length() > 0) {
+          sb.append(", ");
+        }
+        sb.append(Bytes.toString(e));
+      }
+      LOG.warn(Bytes.toString(COL_REGIONINFO) + " is empty for row: " +
+         Bytes.toString(row) + "; has keys: " + sb.toString());
       return null;
     }
-    return (HRegionInfo)Writables.getWritable(regioninfo.getValue(), new HRegionInfo());
+    return Writables.getHRegionInfo(regioninfo.getValue());
   }
 
   /*
    * When we find rows in a meta region that has an empty HRegionInfo, we
    * clean them up here.
    * 
-   * @param server connection to server serving meta region
+   * @param s connection to server serving meta region
    * @param metaRegionName name of the meta region we scanned
    * @param emptyRows the row keys that had empty HRegionInfos
    */
-  protected void deleteEmptyMetaRows(HRegionInterface server, 
-      Text metaRegionName,
-      List<Text> emptyRows) {
-    for (Text regionName: emptyRows) {
+  protected void deleteEmptyMetaRows(HRegionInterface s, 
+      byte [] metaRegionName,
+      List<byte []> emptyRows) {
+    for (byte [] regionName: emptyRows) {
       try {
-        HRegion.removeRegionFromMETA(server, metaRegionName, regionName);
-        LOG.warn("Removed region: " + regionName + " from meta region: " +
-            metaRegionName + " because HRegionInfo was empty");
+        HRegion.removeRegionFromMETA(s, metaRegionName, regionName);
+        LOG.warn("Removed region: " + Bytes.toString(regionName) +
+          " from meta region: " +
+          Bytes.toString(metaRegionName) + " because HRegionInfo was empty");
       } catch (IOException e) {
         LOG.error("deleting region: " + regionName + " from meta region: " +
             metaRegionName, e);

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/MetaRegion.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/MetaRegion.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/MetaRegion.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/MetaRegion.java Thu May 15 15:10:47 2008
@@ -19,43 +19,44 @@
  */
 package org.apache.hadoop.hbase.master;
 
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HServerAddress;
-import org.apache.hadoop.io.Text;
+import org.apache.hadoop.hbase.util.Bytes;
 
 
 /** Describes a meta region and its server */
 public class MetaRegion implements Comparable<MetaRegion> {
   private final HServerAddress server;
-  private final Text regionName;
-  private final Text startKey;
+  private final byte [] regionName;
+  private final byte [] startKey;
 
-  MetaRegion(final HServerAddress server, final Text regionName, 
-    final Text startKey) {
+  MetaRegion(final HServerAddress server, final byte [] regionName) {
+    this (server, regionName, HConstants.EMPTY_START_ROW);
+  }
+
+  MetaRegion(final HServerAddress server, final byte [] regionName,
+      final byte [] startKey) {
     if (server == null) {
       throw new IllegalArgumentException("server cannot be null");
     }
     this.server = server;
-    
     if (regionName == null) {
       throw new IllegalArgumentException("regionName cannot be null");
     }
-    this.regionName = new Text(regionName);
-    
-    this.startKey = new Text();
-    if (startKey != null) {
-      this.startKey.set(startKey);
-    }
+    this.regionName = regionName;
+    this.startKey = startKey;
   }
   
   /** {@inheritDoc} */
   @Override
   public String toString() {
-    return "{regionname: " + this.regionName.toString() + ", startKey: <" +
-      this.startKey.toString() + ">, server: " + this.server.toString() + "}";
+    return "{regionname: " + Bytes.toString(this.regionName) +
+      ", startKey: <" + Bytes.toString(this.startKey) +
+      ">, server: " + this.server.toString() + "}";
   }
 
   /** @return the regionName */
-  public Text getRegionName() {
+  public byte [] getRegionName() {
     return regionName;
   }
 
@@ -65,7 +66,7 @@
   }
 
   /** @return the startKey */
-  public Text getStartKey() {
+  public byte [] getStartKey() {
     return startKey;
   }
 
@@ -87,9 +88,9 @@
 
   /** {@inheritDoc} */
   public int compareTo(MetaRegion other) {
-    int result = this.regionName.compareTo(other.getRegionName());
+    int result = Bytes.compareTo(this.regionName, other.getRegionName());
     if(result == 0) {
-      result = this.startKey.compareTo(other.getStartKey());
+      result = Bytes.compareTo(this.startKey, other.getStartKey());
       if (result == 0) {
         // Might be on different host?
         result = this.server.compareTo(other.server);
@@ -97,5 +98,4 @@
     }
     return result;
   }
-}
-
+}
\ No newline at end of file

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/MetaScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/MetaScanner.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/MetaScanner.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/MetaScanner.java Thu May 15 15:10:47 2008
@@ -97,7 +97,7 @@
     MetaRegion region = null;
     while (!master.closed.get() &&
         (region == null && metaRegionsToScan.size() > 0) &&
-        !metaRegionsScanned()) {
+          !metaRegionsScanned()) {
       try {
         region = metaRegionsToScan.poll(master.threadWakeFrequency, 
           TimeUnit.MILLISECONDS);
@@ -146,11 +146,11 @@
    */
   synchronized boolean waitForMetaRegionsOrClose() {
     while (!master.closed.get()) {
-      if (regionManager.isInitialRootScanComplete() && 
-        regionManager.numMetaRegions() == regionManager.numOnlineMetaRegions()) {
+      if (regionManager.isInitialRootScanComplete() &&
+          regionManager.numMetaRegions() ==
+            regionManager.numOnlineMetaRegions()) {
         break;
       }
-
       try {
         wait(master.threadWakeFrequency);
       } catch (InterruptedException e) {
@@ -163,7 +163,7 @@
   /**
    * Add another meta region to scan to the queue.
    */ 
-  void addMetaRegionToScan(MetaRegion m) throws InterruptedException {
+  void addMetaRegionToScan(MetaRegion m) {
     metaRegionsToScan.add(m);
   }
-}
+}
\ No newline at end of file

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ModifyColumn.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ModifyColumn.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ModifyColumn.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ModifyColumn.java Thu May 15 15:10:47 2008
@@ -25,15 +25,14 @@
 import org.apache.hadoop.hbase.InvalidColumnNameException;
 import org.apache.hadoop.hbase.ipc.HRegionInterface;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.io.Text;
 
 /** Instantiated to modify an existing column family on a table */
 class ModifyColumn extends ColumnOperation {
   private final HColumnDescriptor descriptor;
-  private final Text columnName;
+  private final byte [] columnName;
   
-  ModifyColumn(final HMaster master, final Text tableName, 
-    final Text columnName, HColumnDescriptor descriptor) 
+  ModifyColumn(final HMaster master, final byte [] tableName, 
+    final byte [] columnName, HColumnDescriptor descriptor) 
   throws IOException {
     super(master, tableName);
     this.descriptor = descriptor;
@@ -44,18 +43,13 @@
   protected void postProcessMeta(MetaRegion m, HRegionInterface server)
   throws IOException {
     for (HRegionInfo i: unservedRegions) {
-      // get the column families map from the table descriptor
-      Map<Text, HColumnDescriptor> families = i.getTableDesc().families();
-      
-      // if the table already has this column, then put the new descriptor 
-      // version.
-      if (families.get(columnName) != null){
-        families.put(columnName, descriptor);
-        updateRegionInfo(server, m.getRegionName(), i);          
-      } else{ // otherwise, we have an error.
+      if (!i.getTableDesc().hasFamily(columnName)) {
+        i.getTableDesc().addFamily(descriptor);
+        updateRegionInfo(server, m.getRegionName(), i);
+      } else { // otherwise, we have an error.
         throw new InvalidColumnNameException("Column family '" + columnName + 
           "' doesn't exist, so cannot be modified.");
       }
     }
   }
-}
+}
\ No newline at end of file

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java Thu May 15 15:10:47 2008
@@ -21,11 +21,11 @@
 
 import java.io.IOException;
 
+import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HServerAddress;
 import org.apache.hadoop.hbase.HServerInfo;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.util.Writables;
 import org.apache.hadoop.hbase.io.BatchUpdate;
+import org.apache.hadoop.hbase.util.Bytes;
 
 /** 
  * ProcessRegionOpen is instantiated when a region server reports that it is
@@ -42,12 +42,13 @@
    * @param regionInfo
    * @throws IOException
    */
+  @SuppressWarnings("unused")
   public ProcessRegionOpen(HMaster master, HServerInfo info, 
     HRegionInfo regionInfo)
   throws IOException {
     super(master, regionInfo);
     this.serverAddress = info.getServerAddress();
-    this.startCode = Writables.longToBytes(info.getStartCode());
+    this.startCode = Bytes.toBytes(info.getStartCode());
   }
 
   /** {@inheritDoc} */
@@ -61,8 +62,8 @@
     Boolean result =
       new RetryableMetaOperation<Boolean>(this.metaRegion, this.master) {
         public Boolean call() throws IOException {
-          LOG.info(regionInfo.toString() + " open on " + serverAddress.toString());
-
+          LOG.info(regionInfo.getRegionNameAsString() + " open on " +
+            serverAddress.toString());
           if (!metaRegionAvailable()) {
             // We can't proceed unless the meta region we are going to update
             // is online. metaRegionAvailable() has put this operation on the
@@ -72,14 +73,12 @@
           }
 
           // Register the newly-available Region's location.
-
-          LOG.info("updating row " + regionInfo.getRegionName() + " in table " +
-              metaRegionName + " with startcode " +
-              Writables.bytesToLong(startCode) + " and server " +
+          LOG.info("updating row " + regionInfo.getRegionNameAsString() +
+              " in region " + Bytes.toString(metaRegionName) +
+              " with startcode " + Bytes.toLong(startCode) + " and server " +
               serverAddress.toString());
-
           BatchUpdate b = new BatchUpdate(regionInfo.getRegionName());
-          b.put(COL_SERVER, Writables.stringToBytes(serverAddress.toString()));
+          b.put(COL_SERVER, Bytes.toBytes(serverAddress.toString()));
           b.put(COL_STARTCODE, startCode);
           server.batchUpdate(metaRegionName, b);
           if (isMetaTable) {

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessRegionStatusChange.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessRegionStatusChange.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessRegionStatusChange.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessRegionStatusChange.java Thu May 15 15:10:47 2008
@@ -21,7 +21,6 @@
 
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.io.Text;
 
 /**
  * Abstract class that performs common operations for 
@@ -31,7 +30,7 @@
   protected final boolean isMetaTable;
   protected final HRegionInfo regionInfo;
   protected final MetaRegion metaRegion;
-  protected final Text metaRegionName;
+  protected final byte [] metaRegionName;
   
   /**
    * @param master
@@ -42,7 +41,7 @@
     this.regionInfo = regionInfo;
     this.isMetaTable = regionInfo.isMetaTable();
     if (isMetaTable) {
-      this.metaRegionName = HRegionInfo.rootRegionInfo.getRegionName();
+      this.metaRegionName = HRegionInfo.ROOT_REGIONINFO.getRegionName();
       this.metaRegion = new MetaRegion(master.getRootRegionLocation(),
           this.metaRegionName, HConstants.EMPTY_START_ROW);
     } else {
@@ -61,7 +60,8 @@
         available = false;
       }
     } else {
-      if (!master.regionManager.isInitialRootScanComplete() || !metaTableAvailable()) {
+      if (!master.regionManager.isInitialRootScanComplete() ||
+          !metaTableAvailable()) {
         // The root region has not been scanned or the meta table is not
         // available so we can't proceed.
         // Put the operation on the delayedToDoQueue
@@ -71,4 +71,4 @@
     }
     return available;
   }
-}
+}
\ No newline at end of file

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java Thu May 15 15:10:47 2008
@@ -36,7 +36,6 @@
 import org.apache.hadoop.hbase.regionserver.HLog;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.util.Writables;
-import org.apache.hadoop.io.Text;
 import org.apache.hadoop.hbase.io.RowResult;
 
 /** 
@@ -53,10 +52,10 @@
 
   private class ToDoEntry {
     boolean regionOffline;
-    Text row;
-    HRegionInfo info;
+    final byte [] row;
+    final HRegionInfo info;
 
-    ToDoEntry(Text row, HRegionInfo info) {
+    ToDoEntry(final byte [] row, final HRegionInfo info) {
       this.regionOffline = false;
       this.row = row;
       this.info = info;
@@ -90,11 +89,11 @@
 
   /** Finds regions that the dead region server was serving */
   protected void scanMetaRegion(HRegionInterface server, long scannerId,
-      Text regionName) throws IOException {
+      byte [] regionName) throws IOException {
 
     List<ToDoEntry> toDoList = new ArrayList<ToDoEntry>();
     Set<HRegionInfo> regions = new HashSet<HRegionInfo>();
-    List<Text> emptyRows = new ArrayList<Text>();
+    List<byte []> emptyRows = new ArrayList<byte []>();
     try {
       while (true) {
         RowResult values = null;
@@ -109,7 +108,7 @@
           break;
         }
         
-        Text row = values.getRow();
+        byte [] row = values.getRow();
         
         if (LOG.isDebugEnabled() && row != null) {
           LOG.debug("shutdown scanner looking at " + row.toString());
@@ -118,13 +117,7 @@
         // Check server name.  If null, be conservative and treat as though
         // region had been on shutdown server (could be null because we
         // missed edits in hlog because hdfs does not do write-append).
-        String serverName;
-        try {
-          serverName = Writables.cellToString(values.get(COL_SERVER));
-        } catch (UnsupportedEncodingException e) {
-          LOG.error("Server name", e);
-          break;
-        }
+        String serverName = Writables.cellToString(values.get(COL_SERVER));
         if (serverName.length() > 0 &&
             deadServerName.compareTo(serverName) != 0) {
           // This isn't the server you're looking for - move along
@@ -205,10 +198,10 @@
             master.getRootRegionLocation().getBindAddress());
       }
       long scannerId = server.openScanner(
-          HRegionInfo.rootRegionInfo.getRegionName(), COLUMN_FAMILY_ARRAY,
+          HRegionInfo.ROOT_REGIONINFO.getRegionName(), COLUMN_FAMILY_ARRAY,
           EMPTY_START_ROW, HConstants.LATEST_TIMESTAMP, null);
       scanMetaRegion(server, scannerId,
-          HRegionInfo.rootRegionInfo.getRegionName());
+          HRegionInfo.ROOT_REGIONINFO.getRegionName());
       return true;
     }
   }
@@ -268,7 +261,7 @@
       // Scan the ROOT region
       Boolean result = new ScanRootRegion(
           new MetaRegion(master.getRootRegionLocation(),
-              HRegionInfo.rootRegionInfo.getRegionName(),
+              HRegionInfo.ROOT_REGIONINFO.getRegionName(),
               HConstants.EMPTY_START_ROW), this.master).doWithRetries();
         
       if (result == null) {

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/RegionManager.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/RegionManager.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/RegionManager.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/RegionManager.java Thu May 15 15:10:47 2008
@@ -24,17 +24,17 @@
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
+import java.util.HashSet;
 import java.util.List;
 import java.util.ArrayList;
-import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
 import java.util.SortedMap;
 import java.util.TreeMap;
 import java.util.Collections;
+import java.util.TreeSet;
 import java.util.concurrent.ConcurrentHashMap;
 
-import org.apache.hadoop.io.Text;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HConstants;
@@ -45,6 +45,7 @@
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.ipc.HRegionInterface;
 import org.apache.hadoop.hbase.HMsg;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.io.BatchUpdate;
 import org.apache.hadoop.hbase.util.Writables;
@@ -67,8 +68,9 @@
   private final AtomicInteger numberOfMetaRegions = new AtomicInteger();
 
   /** These are the online meta regions */
-  private final SortedMap<Text, MetaRegion> onlineMetaRegions =
-    Collections.synchronizedSortedMap(new TreeMap<Text, MetaRegion>());
+  private final SortedMap<byte [], MetaRegion> onlineMetaRegions =
+    Collections.synchronizedSortedMap(new TreeMap<byte [],
+      MetaRegion>(Bytes.BYTES_COMPARATOR));
 
   /**
    * The 'unassignedRegions' table maps from a HRegionInfo to a timestamp that
@@ -90,25 +92,25 @@
    * Regions that have been assigned, and the server has reported that it has
    * started serving it, but that we have not yet recorded in the meta table.
    */
-  private final Set<Text> pendingRegions =
-    Collections.synchronizedSet(new HashSet<Text>());
+  private final Set<byte []> pendingRegions =
+    Collections.synchronizedSet(new TreeSet<byte []>(Bytes.BYTES_COMPARATOR));
 
   /**
    * List of regions that are going to be closed.
    */
-  private final Map<String, Map<Text, HRegionInfo>> regionsToClose =
-    new ConcurrentHashMap<String, Map<Text, HRegionInfo>>();
+  private final Map<String, Map<byte [], HRegionInfo>> regionsToClose =
+    new ConcurrentHashMap<String, Map<byte [], HRegionInfo>>();
 
   /** Regions that are in the process of being closed */
-  private final Set<Text> closingRegions =
-    Collections.synchronizedSet(new HashSet<Text>());
+  private final Set<byte []> closingRegions =
+    Collections.synchronizedSet(new TreeSet<byte []>(Bytes.BYTES_COMPARATOR));
 
   /**
    * Set of regions that, once closed, should be marked as offline so that they
    * are not reassigned.
    */
-  private final Set<Text> regionsToOffline = 
-    Collections.synchronizedSet(new HashSet<Text>());
+  private final Set<byte []> regionsToOffline = 
+    Collections.synchronizedSet(new TreeSet<byte []>(Bytes.BYTES_COMPARATOR));
   // How many regions to assign a server at a time.
   private final int maxAssignInOneGo;
 
@@ -147,7 +149,7 @@
   void unassignRootRegion() {
     rootRegionLocation.set(null);
     if (!master.shutdownRequested) {
-      unassignedRegions.put(HRegionInfo.rootRegionInfo, ZERO_L);
+      unassignedRegions.put(HRegionInfo.ROOT_REGIONINFO, ZERO_L);
     }
   }
   
@@ -161,9 +163,7 @@
    */
   void assignRegions(HServerInfo info, String serverName,
     HRegionInfo[] mostLoadedRegions, ArrayList<HMsg> returnMsgs) {
-    
     HServerLoad thisServersLoad = info.getLoad();
-    
     synchronized (unassignedRegions) {
       // We need to hold a lock on assign attempts while we figure out what to
       // do so that multiple threads do not execute this method in parallel
@@ -172,7 +172,6 @@
       // figure out what regions need to be assigned and aren't currently being
       // worked on elsewhere.
       Set<HRegionInfo> regionsToAssign = regionsAwaitingAssignment();
-
       if (regionsToAssign.size() == 0) {
         // There are no regions waiting to be assigned. This is an opportunity
         // for us to check if this server is overloaded. 
@@ -252,8 +251,9 @@
       
       long now = System.currentTimeMillis();
       for (HRegionInfo regionInfo: regionsToAssign) {
-        LOG.info("assigning region " + regionInfo.getRegionName() +
-            " to server " + serverName);
+        LOG.info("assigning region " +
+          Bytes.toString(regionInfo.getRegionName())+
+          " to server " + serverName);
         unassignedRegions.put(regionInfo, Long.valueOf(now));
         returnMsgs.add(new HMsg(HMsg.MSG_REGION_OPEN, regionInfo));
         if (--nregions <= 0) {
@@ -376,7 +376,8 @@
       final String serverName, final ArrayList<HMsg> returnMsgs) {
     long now = System.currentTimeMillis();
     for (HRegionInfo regionInfo: regionsToAssign) {
-      LOG.info("assigning region " + regionInfo.getRegionName() +
+      LOG.info("assigning region " +
+          Bytes.toString(regionInfo.getRegionName()) +
           " to the only server " + serverName);
       unassignedRegions.put(regionInfo, Long.valueOf(now));
       returnMsgs.add(new HMsg(HMsg.MSG_REGION_OPEN, regionInfo));
@@ -428,9 +429,9 @@
   /**
    * @return Read-only map of online regions.
    */
-  public Map<Text, MetaRegion> getOnlineMetaRegions() {
+  public Map<byte [], MetaRegion> getOnlineMetaRegions() {
     synchronized (onlineMetaRegions) {
-      return new TreeMap<Text, MetaRegion>(onlineMetaRegions);
+      return Collections.unmodifiableMap(onlineMetaRegions);
     }
   }
   
@@ -505,8 +506,8 @@
    * @param tableName Table you need to know all the meta regions for
    * @return set of MetaRegion objects that contain the table
    */
-  public Set<MetaRegion> getMetaRegionsForTable(Text tableName) {
-    Text firstMetaRegion = null;
+  public Set<MetaRegion> getMetaRegionsForTable(byte [] tableName) {
+    byte [] firstMetaRegion = null;
     Set<MetaRegion> metaRegions = new HashSet<MetaRegion>();
     
     synchronized (onlineMetaRegions) {
@@ -533,7 +534,7 @@
    * @throws IOException
    */
   public void createRegion(HRegionInfo newRegion, HRegionInterface server, 
-    Text metaRegionName) 
+      byte [] metaRegionName) 
   throws IOException {
     // 2. Create the HRegion
     HRegion region = 
@@ -541,7 +542,7 @@
 
     // 3. Insert into meta
     HRegionInfo info = region.getRegionInfo();
-    Text regionName = region.getRegionName();
+    byte [] regionName = region.getRegionName();
     BatchUpdate b = new BatchUpdate(regionName);
     b.put(COL_REGIONINFO, Writables.getBytes(info));
     server.batchUpdate(metaRegionName, b);
@@ -587,7 +588,7 @@
    * @param startKey name of the meta region to check
    * @return true if the region is online, false otherwise
    */
-  public boolean isMetaRegionOnline(Text startKey) {
+  public boolean isMetaRegionOnline(byte [] startKey) {
     return onlineMetaRegions.containsKey(startKey);
   }
   
@@ -595,7 +596,7 @@
    * Set an online MetaRegion offline - remove it from the map. 
    * @param startKey region name
    */
-  public void offlineMetaRegion(Text startKey) {
+  public void offlineMetaRegion(byte [] startKey) {
     onlineMetaRegions.remove(startKey); 
   }
   
@@ -615,7 +616,7 @@
    * @param regionName name of the region
    * @return true if pending, false otherwise
    */
-  public boolean isPending(Text regionName) {
+  public boolean isPending(byte [] regionName) {
     return pendingRegions.contains(regionName);
   }
   
@@ -636,7 +637,7 @@
    * Set a region to pending assignment 
    * @param regionName
    */
-  public void setPending(Text regionName) {
+  public void setPending(byte [] regionName) {
     pendingRegions.add(regionName);
   }
   
@@ -644,7 +645,7 @@
    * Unset region's pending status 
    * @param regionName 
    */
-  public void noLongerPending(Text regionName) {
+  public void noLongerPending(byte [] regionName) {
     pendingRegions.remove(regionName);
   }
   
@@ -679,7 +680,7 @@
    */
   public void markToClose(String serverName, HRegionInfo info) {
     synchronized (regionsToClose) {
-      Map<Text, HRegionInfo> serverToClose = regionsToClose.get(serverName);
+      Map<byte [], HRegionInfo> serverToClose = regionsToClose.get(serverName);
       if (serverToClose != null) {
         serverToClose.put(info.getRegionName(), info);
       }
@@ -691,10 +692,10 @@
    * @param serverName address info of server
    * @param map map of region names to region infos of regions to close
    */
-  public void markToCloseBulk(String serverName, 
-      Map<Text, HRegionInfo> map) {
+  public void markToCloseBulk(String serverName,
+      Map<byte [], HRegionInfo> map) {
     synchronized (regionsToClose) {
-      Map<Text, HRegionInfo> regions = regionsToClose.get(serverName);
+      Map<byte [], HRegionInfo> regions = regionsToClose.get(serverName);
       if (regions != null) {
         regions.putAll(map);
       } else {
@@ -711,7 +712,7 @@
    * @param serverName
    * @return map of region names to region infos to close
    */
-  public Map<Text, HRegionInfo> removeMarkedToClose(String serverName) {
+  public Map<byte [], HRegionInfo> removeMarkedToClose(String serverName) {
     return regionsToClose.remove(serverName);
   }
   
@@ -721,9 +722,9 @@
    * @param regionName name of the region we might want to close
    * @return true if the region is marked to close, false otherwise
    */
-  public boolean isMarkedToClose(String serverName, Text regionName) {
+  public boolean isMarkedToClose(String serverName, byte [] regionName) {
     synchronized (regionsToClose) {
-      Map<Text, HRegionInfo> serverToClose = regionsToClose.get(serverName);
+      Map<byte [], HRegionInfo> serverToClose = regionsToClose.get(serverName);
       return (serverToClose != null && serverToClose.containsKey(regionName));
     }
   }
@@ -734,9 +735,9 @@
    * @param serverName address info of server
    * @param regionName name of the region
    */
-  public void noLongerMarkedToClose(String serverName, Text regionName) {
+  public void noLongerMarkedToClose(String serverName, byte [] regionName) {
     synchronized (regionsToClose) {
-      Map<Text, HRegionInfo> serverToClose = regionsToClose.get(serverName);
+      Map<byte [], HRegionInfo> serverToClose = regionsToClose.get(serverName);
       if (serverToClose != null) {
         serverToClose.remove(regionName);
       }
@@ -757,7 +758,7 @@
    * @param regionName 
    * @return true if the region is marked as closing, false otherwise
    */
-  public boolean isClosing(Text regionName) {
+  public boolean isClosing(byte [] regionName) {
     return closingRegions.contains(regionName);
   }
 
@@ -765,7 +766,7 @@
    * Set a region as no longer closing (closed?) 
    * @param regionName
    */
-  public void noLongerClosing(Text regionName) {
+  public void noLongerClosing(byte [] regionName) {
     closingRegions.remove(regionName);
   }
   
@@ -773,7 +774,7 @@
    * Mark a region as closing 
    * @param regionName
    */
-  public void setClosing(Text regionName) {
+  public void setClosing(byte [] regionName) {
     closingRegions.add(regionName);
   }
   
@@ -790,7 +791,7 @@
    * Note that a region should be offlined as soon as its closed. 
    * @param regionName
    */
-  public void markRegionForOffline(Text regionName) {
+  public void markRegionForOffline(byte [] regionName) {
     regionsToOffline.add(regionName);
   }
   
@@ -799,7 +800,7 @@
    * @param regionName
    * @return true if marked for offline, false otherwise
    */
-  public boolean isMarkedForOffline(Text regionName) {
+  public boolean isMarkedForOffline(byte [] regionName) {
     return regionsToOffline.contains(regionName);
   }
   
@@ -807,7 +808,7 @@
    * Region was offlined as planned, remove it from the list to offline 
    * @param regionName
    */
-  public void regionOfflined(Text regionName) {
+  public void regionOfflined(byte [] regionName) {
     regionsToOffline.remove(regionName);
   }
   

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/RootScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/RootScanner.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/RootScanner.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/RootScanner.java Thu May 15 15:10:47 2008
@@ -44,8 +44,10 @@
     try {
       // Don't interrupt us while we're working
       synchronized(scannerLock) {
-        scanRegion(new MetaRegion(master.getRootRegionLocation(),
-          HRegionInfo.rootRegionInfo.getRegionName(), null));
+        if (master.getRootRegionLocation() != null) {
+          scanRegion(new MetaRegion(master.getRootRegionLocation(),
+            HRegionInfo.ROOT_REGIONINFO.getRegionName()));
+        }
       }
       scanSuccessful = true;
     } catch (IOException e) {
@@ -71,4 +73,4 @@
   protected void maintenanceScan() {
     scanRoot();
   }
-}
+}
\ No newline at end of file

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ServerManager.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ServerManager.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ServerManager.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/ServerManager.java Thu May 15 15:10:47 2008
@@ -42,7 +42,6 @@
 import org.apache.hadoop.hbase.Leases;
 import org.apache.hadoop.hbase.LeaseListener;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.io.Text;
 
 /**
  * The ServerManager class manages info about region servers - HServerInfo, 
@@ -153,11 +152,10 @@
    * 
    * @throws IOException
    */
-  public HMsg[] regionServerReport(HServerInfo serverInfo, HMsg msgs[], 
+  public HMsg [] regionServerReport(HServerInfo serverInfo, HMsg msgs[], 
     HRegionInfo[] mostLoadedRegions)
   throws IOException {
     String serverName = serverInfo.getServerAddress().toString().trim();
-
     if (msgs.length > 0) {
       if (msgs[0].getMsg() == HMsg.MSG_REPORT_EXITING) {
         processRegionServerExit(serverName, msgs);
@@ -183,7 +181,7 @@
           return new HMsg[0];
         }
         // Tell the server to stop serving any user regions
-        return new HMsg[]{new HMsg(HMsg.MSG_REGIONSERVER_QUIESCE)};
+        return new HMsg [] {new HMsg(HMsg.MSG_REGIONSERVER_QUIESCE)};
       }
     }
 
@@ -191,7 +189,7 @@
       // Tell server to shut down if we are shutting down.  This should
       // happen after check of MSG_REPORT_EXITING above, since region server
       // will send us one of these messages after it gets MSG_REGIONSERVER_STOP
-      return new HMsg[]{new HMsg(HMsg.MSG_REGIONSERVER_STOP)};
+      return new HMsg [] {new HMsg(HMsg.MSG_REGIONSERVER_STOP)};
     }
 
     HServerInfo storedInfo = serversToServerInfo.get(serverName);
@@ -325,14 +323,13 @@
     HRegionInfo[] mostLoadedRegions, HMsg incomingMsgs[])
   throws IOException { 
     ArrayList<HMsg> returnMsgs = new ArrayList<HMsg>();
-    Map<Text, HRegionInfo> regionsToKill = 
+    Map<byte [], HRegionInfo> regionsToKill = 
       master.regionManager.removeMarkedToClose(serverName);
 
     // Get reports on what the RegionServer did.
     for (int i = 0; i < incomingMsgs.length; i++) {
       if (LOG.isDebugEnabled()) {
-        LOG.debug("Received " + incomingMsgs[i].toString() + " from " +
-            serverName);
+        LOG.debug("Received " + incomingMsgs[i] + " from " + serverName);
       }
       HRegionInfo region = incomingMsgs[i].getRegionInfo();
 
@@ -369,7 +366,6 @@
         master.regionManager.setClosing(i.getRegionName());
       }
     }
-
     // Figure out what the RegionServer ought to do, and write back.
     master.regionManager.assignRegions(serverInfo, serverName, 
       mostLoadedRegions, returnMsgs);
@@ -410,7 +406,6 @@
     HRegionInfo region, ArrayList<HMsg> returnMsgs) 
   throws IOException {
     boolean duplicateAssignment = false;
-    
     if (!master.regionManager.isUnassigned(region)) {
       if (region.isRootRegion()) {
         // Root region
@@ -448,9 +443,6 @@
       // and then try to reopen it elsewhere; that's not what we want.
       returnMsgs.add(new HMsg(HMsg.MSG_REGION_CLOSE_WITHOUT_REPORT, region));
     } else {
-      LOG.info(serverInfo.getServerAddress().toString() + " serving " +
-        region.getRegionName());
-
       // it was assigned, and it's not a duplicate assignment, so take it out 
       // of the unassigned list.
       master.regionManager.noLongerUnassigned(region);
@@ -462,7 +454,6 @@
         // Note that the table has been assigned and is waiting for the
         // meta table to be updated.
         master.regionManager.setPending(region.getRegionName());
-
         // Queue up an update to note the region location.
         try {
           master.toDoQueue.put(
@@ -658,7 +649,9 @@
           serversToServerInfo.values());
         try {
           serversToServerInfo.wait(master.threadWakeFrequency);
-        } catch (InterruptedException e) {}
+        } catch (InterruptedException e) {
+          // continue
+        }
       }
     }
   }

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/TableDelete.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/TableDelete.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/TableDelete.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/TableDelete.java Thu May 15 15:10:47 2008
@@ -26,7 +26,6 @@
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.RemoteExceptionHandler;
 import org.apache.hadoop.hbase.TableNotDisabledException;
-import org.apache.hadoop.io.Text;
 
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.ipc.HRegionInterface;
@@ -36,7 +35,7 @@
  */
 class TableDelete extends TableOperation {
 
-  TableDelete(final HMaster master, final Text tableName) throws IOException {
+  TableDelete(final HMaster master, final byte [] tableName) throws IOException {
     super(master, tableName);
   }
 

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/TableOperation.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/TableOperation.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/TableOperation.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/master/TableOperation.java Thu May 15 15:10:47 2008
@@ -33,9 +33,9 @@
 import org.apache.hadoop.hbase.HServerInfo;
 import org.apache.hadoop.hbase.MasterNotRunningException;
 import org.apache.hadoop.hbase.RemoteExceptionHandler;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.util.Writables;
-import org.apache.hadoop.io.Text;
 import org.apache.hadoop.hbase.io.RowResult;
 import org.apache.hadoop.hbase.util.Sleeper;
 
@@ -50,13 +50,13 @@
   protected static final Log LOG = LogFactory.getLog(TableOperation.class);
   
   protected Set<MetaRegion> metaRegions;
-  protected Text tableName;
+  protected byte [] tableName;
   protected Set<HRegionInfo> unservedRegions;
   protected HMaster master;
   protected final int numRetries;
   protected final Sleeper sleeper;
   
-  protected TableOperation(final HMaster master, final Text tableName) 
+  protected TableOperation(final HMaster master, final byte [] tableName) 
   throws IOException {
     this.sleeper = master.sleeper;
     this.numRetries = master.numRetries;
@@ -94,7 +94,7 @@
       long scannerId = server.openScanner(m.getRegionName(),
           COLUMN_FAMILY_ARRAY, tableName, HConstants.LATEST_TIMESTAMP, null);
 
-      List<Text> emptyRows = new ArrayList<Text>();
+      List<byte []> emptyRows = new ArrayList<byte []>();
       try {
         while (true) {
           RowResult values = server.next(scannerId);
@@ -109,7 +109,7 @@
           }
           String serverName = Writables.cellToString(values.get(COL_SERVER));
           long startCode = Writables.cellToLong(values.get(COL_STARTCODE));
-          if (info.getTableDesc().getName().compareTo(tableName) > 0) {
+          if (Bytes.compareTo(info.getTableDesc().getName(), tableName) > 0) {
             break; // Beyond any more entries for this table
           }
 

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java Thu May 15 15:10:47 2008
@@ -26,7 +26,6 @@
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.ReentrantLock;
 
-import org.apache.hadoop.io.Text;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -36,6 +35,7 @@
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.io.BatchUpdate;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Writables;
 
 /** 
@@ -83,7 +83,7 @@
           lock.lock();
           try {
             // Don't interrupt us while we are working
-            Text midKey = r.compactStores();
+            byte [] midKey = r.compactStores();
             if (midKey != null) {
               split(r, midKey);
             }
@@ -119,7 +119,8 @@
    * @param r HRegion store belongs to
    */
   public synchronized void compactionRequested(HRegion r) {
-    LOG.debug("Compaction requested for region: " + r.getRegionName());
+    LOG.debug("Compaction requested for region: " +
+      Bytes.toString(r.getRegionName()));
     synchronized (regionsInQueue) {
       if (!regionsInQueue.contains(r)) {
         compactionQueue.add(r);
@@ -128,7 +129,7 @@
     }
   }
   
-  private void split(final HRegion region, final Text midKey)
+  private void split(final HRegion region, final byte [] midKey)
   throws IOException {
     final HRegionInfo oldRegionInfo = region.getRegionInfo();
     final HRegion[] newRegions = region.splitRegion(this, midKey);
@@ -190,11 +191,13 @@
   }
   
   /** {@inheritDoc} */
-  public void closing(@SuppressWarnings("unused") final Text regionName) {
+  public void closing(@SuppressWarnings("unused") final byte [] regionName) {
+    // continue
   }
   
   /** {@inheritDoc} */
-  public void closed(@SuppressWarnings("unused") final Text regionName) {
+  public void closed(@SuppressWarnings("unused") final byte [] regionName) {
+    // continue
   }
 
   /**
@@ -205,4 +208,4 @@
       this.interrupt();
     }
   }
-}
+}
\ No newline at end of file

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HAbstractScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HAbstractScanner.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HAbstractScanner.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HAbstractScanner.java Thu May 15 15:10:47 2008
@@ -20,9 +20,10 @@
 package org.apache.hadoop.hbase.regionserver;
 
 import java.io.IOException;
+import java.util.HashMap;
 import java.util.Iterator;
+import java.util.Map;
 import java.util.SortedMap;
-import java.util.TreeMap;
 import java.util.Vector;
 import java.util.Map.Entry;
 import java.util.regex.Pattern;
@@ -30,7 +31,7 @@
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HStoreKey;
-import org.apache.hadoop.io.Text;
+import org.apache.hadoop.hbase.util.Bytes;
 
 /**
  * Abstract base class that implements the InternalScanner.
@@ -64,20 +65,22 @@
   private static class ColumnMatcher {
     private boolean wildCardmatch;
     private MATCH_TYPE matchType;
-    private Text family;
+    private byte [] family;
     private Pattern columnMatcher;
-    private Text col;
+    private byte [] col;
   
-    ColumnMatcher(final Text col) throws IOException {
-      Text qualifier = HStoreKey.extractQualifier(col);
+    ColumnMatcher(final byte [] col) throws IOException {
+      byte [][] parse = HStoreKey.parseColumn(col);
+      // First position has family.  Second has qualifier.
+      byte [] qualifier = parse[1];
       try {
-        if (qualifier == null || qualifier.getLength() == 0) {
+        if (qualifier == null || qualifier.length == 0) {
           this.matchType = MATCH_TYPE.FAMILY_ONLY;
-          this.family = HStoreKey.extractFamily(col).toText();
+          this.family = parse[0];
           this.wildCardmatch = true;
-        } else if(isRegexPattern.matcher(qualifier.toString()).matches()) {
+        } else if (isRegexPattern.matcher(Bytes.toString(qualifier)).matches()) {
           this.matchType = MATCH_TYPE.REGEX;
-          this.columnMatcher = Pattern.compile(col.toString());
+          this.columnMatcher = Pattern.compile(Bytes.toString(col));
           this.wildCardmatch = true;
         } else {
           this.matchType = MATCH_TYPE.SIMPLE;
@@ -85,18 +88,19 @@
           this.wildCardmatch = false;
         }
       } catch(Exception e) {
-        throw new IOException("Column: " + col + ": " + e.getMessage());
+        throw new IOException("Column: " + Bytes.toString(col) + ": " +
+          e.getMessage());
       }
     }
     
     /** Matching method */
-    boolean matches(Text c) throws IOException {
+    boolean matches(final byte [] c) throws IOException {
       if(this.matchType == MATCH_TYPE.SIMPLE) {
-        return c.equals(this.col);
+        return Bytes.equals(c, this.col);
       } else if(this.matchType == MATCH_TYPE.FAMILY_ONLY) {
-        return HStoreKey.extractFamily(c).equals(this.family);
-      } else if(this.matchType == MATCH_TYPE.REGEX) {
-        return this.columnMatcher.matcher(c.toString()).matches();
+        return HStoreKey.matchingFamily(this.family, c);
+      } else if (this.matchType == MATCH_TYPE.REGEX) {
+        return this.columnMatcher.matcher(Bytes.toString(c)).matches();
       } else {
         throw new IOException("Invalid match type: " + this.matchType);
       }
@@ -107,8 +111,10 @@
     }
   }
 
-  // Holds matchers for each column family 
-  protected TreeMap<Text, Vector<ColumnMatcher>> okCols;
+  // Holds matchers for each column family.  Its keyed by the byte [] hashcode
+  // which you can get by calling Bytes.mapKey.
+  private Map<Integer, Vector<ColumnMatcher>> okCols =
+    new HashMap<Integer, Vector<ColumnMatcher>>();
   
   // True when scanning is done
   protected volatile boolean scannerClosed = false;
@@ -120,14 +126,13 @@
   private boolean multipleMatchers;
 
   /** Constructor for abstract base class */
-  HAbstractScanner(long timestamp, Text[] targetCols) throws IOException {
+  HAbstractScanner(long timestamp, byte [][] targetCols) throws IOException {
     this.timestamp = timestamp;
     this.wildcardMatch = false;
     this.multipleMatchers = false;
-    this.okCols = new TreeMap<Text, Vector<ColumnMatcher>>();
     for(int i = 0; i < targetCols.length; i++) {
-      Text family = HStoreKey.extractFamily(targetCols[i]).toText();
-      Vector<ColumnMatcher> matchers = okCols.get(family);
+      Integer key = HStoreKey.getFamilyMapKey(targetCols[i]);
+      Vector<ColumnMatcher> matchers = okCols.get(key);
       if (matchers == null) {
         matchers = new Vector<ColumnMatcher>();
       }
@@ -139,7 +144,7 @@
       if (matchers.size() > 1) {
         this.multipleMatchers = true;
       }
-      okCols.put(family, matchers);
+      okCols.put(key, matchers);
     }
   }
 
@@ -154,14 +159,14 @@
    *                 
    * @throws IOException
    */
-  protected boolean columnMatch(final Text column) throws IOException {
+  protected boolean columnMatch(final byte [] column) throws IOException {
     Vector<ColumnMatcher> matchers =
-      this.okCols.get(HStoreKey.extractFamily(column));
+      this.okCols.get(HStoreKey.getFamilyMapKey(column));
     if (matchers == null) {
       return false;
     }
     for(int m = 0; m < matchers.size(); m++) {
-      if(matchers.get(m).matches(column)) {
+      if (matchers.get(m).matches(column)) {
         return true;
       }
     }
@@ -178,10 +183,11 @@
     return this.multipleMatchers;
   }
 
-  public abstract boolean next(HStoreKey key, SortedMap<Text, byte []> results)
+  public abstract boolean next(HStoreKey key,
+    SortedMap<byte [], byte []> results)
   throws IOException;
   
-  public Iterator<Entry<HStoreKey, SortedMap<Text, byte[]>>> iterator() {
+  public Iterator<Entry<HStoreKey, SortedMap<byte [], byte[]>>> iterator() {
     throw new UnsupportedOperationException("Unimplemented serverside. " +
       "next(HStoreKey, StortedMap(...) is more efficient");
   }

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLog.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLog.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLog.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLog.java Thu May 15 15:10:47 2008
@@ -19,16 +19,14 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
-import java.io.FileNotFoundException;
 import java.io.EOFException;
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.Collections;
-import java.util.HashMap;
 import java.util.Map;
 import java.util.SortedMap;
 import java.util.TreeMap;
 import java.util.TreeSet;
-import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
 
@@ -39,18 +37,18 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.SequenceFile;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.SequenceFile.CompressionType;
-import org.apache.hadoop.io.SequenceFile.Reader;
-
+import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HStoreKey;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.RemoteExceptionHandler;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.io.SequenceFile;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.SequenceFile.CompressionType;
+import org.apache.hadoop.io.SequenceFile.Reader;
 
 /**
  * HLog stores all the edits to the HStore.
@@ -97,8 +95,8 @@
 public class HLog implements HConstants {
   private static final Log LOG = LogFactory.getLog(HLog.class);
   private static final String HLOG_DATFILE = "hlog.dat.";
-  static final Text METACOLUMN = new Text("METACOLUMN:");
-  static final Text METAROW = new Text("METAROW");
+  static final byte [] METACOLUMN = Bytes.toBytes("METACOLUMN:");
+  static final byte [] METAROW = Bytes.toBytes("METAROW");
   final FileSystem fs;
   final Path dir;
   final Configuration conf;
@@ -120,7 +118,8 @@
   /*
    * Map of region to last sequence/edit id. 
    */
-  final Map<Text, Long> lastSeqWritten = new ConcurrentHashMap<Text, Long>();
+  private final Map<byte [], Long> lastSeqWritten = Collections.
+    synchronizedSortedMap(new TreeMap<byte [], Long>(Bytes.BYTES_COMPARATOR));
 
   private volatile boolean closed = false;
 
@@ -274,8 +273,8 @@
             // Now remove old log files (if any)
             if (LOG.isDebugEnabled()) {
               // Find region associated with oldest key -- helps debugging.
-              Text oldestRegion = null;
-              for (Map.Entry<Text, Long> e: this.lastSeqWritten.entrySet()) {
+              byte [] oldestRegion = null;
+              for (Map.Entry<byte [], Long> e: this.lastSeqWritten.entrySet()) {
                 if (e.getValue().longValue() == oldestOutstandingSeqNum.longValue()) {
                   oldestRegion = e.getKey();
                   break;
@@ -370,9 +369,9 @@
    * @param timestamp
    * @throws IOException
    */
-  void append(Text regionName, Text tableName,
-      TreeMap<HStoreKey, byte[]> edits) throws IOException {
-    
+  void append(byte [] regionName, byte [] tableName,
+      TreeMap<HStoreKey, byte[]> edits)
+  throws IOException {
     if (closed) {
       throw new IOException("Cannot append; log is closed");
     }
@@ -479,7 +478,7 @@
    * @param logSeqId
    * @throws IOException
    */
-  void completeCacheFlush(final Text regionName, final Text tableName,
+  void completeCacheFlush(final byte [] regionName, final byte [] tableName,
       final long logSeqId) throws IOException {
 
     try {
@@ -535,8 +534,8 @@
     }
     LOG.info("splitting " + logfiles.length + " log(s) in " +
       srcDir.toString());
-    Map<Text, SequenceFile.Writer> logWriters =
-      new HashMap<Text, SequenceFile.Writer>();
+    Map<byte [], SequenceFile.Writer> logWriters =
+      new TreeMap<byte [], SequenceFile.Writer>(Bytes.BYTES_COMPARATOR);
     try {
       for (int i = 0; i < logfiles.length; i++) {
         if (LOG.isDebugEnabled()) {
@@ -556,17 +555,15 @@
         try {
           int count = 0;
           for (; in.next(key, val); count++) {
-            Text tableName = key.getTablename();
-            Text regionName = key.getRegionName();
+            byte [] tableName = key.getTablename();
+            byte [] regionName = key.getRegionName();
             SequenceFile.Writer w = logWriters.get(regionName);
             if (w == null) {
               Path logfile = new Path(
-                  HRegion.getRegionDir(
-                      HTableDescriptor.getTableDir(rootDir, tableName),
-                      HRegionInfo.encodeRegionName(regionName)
-                  ),
-                  HREGION_OLDLOGFILE_NAME
-              );
+                HRegion.getRegionDir(
+                  HTableDescriptor.getTableDir(rootDir, tableName),
+                  HRegionInfo.encodeRegionName(regionName)),
+                HREGION_OLDLOGFILE_NAME);
               Path oldlogfile = null;
               SequenceFile.Reader old = null;
               if (fs.exists(logfile)) {
@@ -580,7 +577,7 @@
                 HLogEdit.class, getCompressionType(conf));
               // Use copy of regionName; regionName object is reused inside in
               // HStoreKey.getRegionName so its content changes as we iterate.
-              logWriters.put(new Text(regionName), w);
+              logWriters.put(regionName, w);
               if (LOG.isDebugEnabled()) {
                 LOG.debug("Creating new log file writer for path " + logfile +
                   " and region " + regionName);

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLogEdit.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLogEdit.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLogEdit.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLogEdit.java Thu May 15 15:10:47 2008
@@ -20,6 +20,7 @@
 package org.apache.hadoop.hbase.regionserver;
 
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.*;
 
 import java.io.*;
@@ -62,7 +63,7 @@
     return (value == null)? false: deleteBytes.compareTo(value) == 0;
   }
 
-  private Text column = new Text();
+  private byte [] column;
   private byte [] val;
   private long timestamp;
   private static final int MAX_VALUE_LEN = 128;
@@ -76,18 +77,18 @@
 
   /**
    * Construct a fully initialized HLogEdit
-   * @param column column name
+   * @param c column name
    * @param bval value
    * @param timestamp timestamp for modification
    */
-  public HLogEdit(Text column, byte [] bval, long timestamp) {
-    this.column.set(column);
+  public HLogEdit(byte [] c, byte [] bval, long timestamp) {
+    this.column = c;
     this.val = bval;
     this.timestamp = timestamp;
   }
 
   /** @return the column */
-  public Text getColumn() {
+  public byte [] getColumn() {
     return this.column;
   }
 
@@ -116,7 +117,7 @@
     } catch (UnsupportedEncodingException e) {
       throw new RuntimeException("UTF8 encoding not present?", e);
     }
-    return "(" + getColumn().toString() + "/" + getTimestamp() + "/" +
+    return "(" + Bytes.toString(getColumn()) + "/" + getTimestamp() + "/" +
       value + ")";
   }
   
@@ -124,7 +125,7 @@
 
   /** {@inheritDoc} */
   public void write(DataOutput out) throws IOException {
-    this.column.write(out);
+    Bytes.writeByteArray(out, this.column);
     out.writeInt(this.val.length);
     out.write(this.val);
     out.writeLong(timestamp);
@@ -132,7 +133,7 @@
   
   /** {@inheritDoc} */
   public void readFields(DataInput in) throws IOException {
-    this.column.readFields(in);
+    this.column = Bytes.readByteArray(in);
     this.val = new byte[in.readInt()];
     in.readFully(this.val);
     this.timestamp = in.readLong();

Modified: hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLogKey.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLogKey.java?rev=656868&r1=656867&r2=656868&view=diff
==============================================================================
--- hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLogKey.java (original)
+++ hadoop/hbase/trunk/src/java/org/apache/hadoop/hbase/regionserver/HLogKey.java Thu May 15 15:10:47 2008
@@ -19,6 +19,7 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.*;
 
 import java.io.*;
@@ -31,14 +32,14 @@
  * also sorted.
  */
 public class HLogKey implements WritableComparable {
-  Text regionName = new Text();
-  Text tablename = new Text();
-  Text row = new Text();
-  long logSeqNum = 0L;
+  private byte [] regionName;
+  private byte [] tablename;
+  private byte [] row;
+  private long logSeqNum;
 
   /** Create an empty key useful when deserializing */
   public HLogKey() {
-    super();
+    this(null, null, null, 0L);
   }
   
   /**
@@ -51,11 +52,11 @@
    * @param row         - row key
    * @param logSeqNum   - log sequence number
    */
-  public HLogKey(Text regionName, Text tablename, Text row, long logSeqNum) {
-    // TODO: Is this copy of the instances necessary? They are expensive.
-    this.regionName.set(regionName);
-    this.tablename.set(tablename);
-    this.row.set(row);
+  public HLogKey(final byte [] regionName, final byte [] tablename,
+      final byte [] row, long logSeqNum) {
+    this.regionName = regionName;
+    this.tablename = tablename;
+    this.row = row;
     this.logSeqNum = logSeqNum;
   }
 
@@ -63,15 +64,15 @@
   // A bunch of accessors
   //////////////////////////////////////////////////////////////////////////////
 
-  Text getRegionName() {
+  byte [] getRegionName() {
     return regionName;
   }
   
-  Text getTablename() {
+  byte [] getTablename() {
     return tablename;
   }
   
-  Text getRow() {
+  byte [] getRow() {
     return row;
   }
   
@@ -84,7 +85,8 @@
    */
   @Override
   public String toString() {
-    return tablename + "/" + regionName + "/" + row + "/" + logSeqNum;
+    return Bytes.toString(tablename) + "/" + Bytes.toString(regionName) + "/" +
+      Bytes.toString(row) + "/" + logSeqNum;
   }
   
   /**
@@ -115,10 +117,10 @@
    */
   public int compareTo(Object o) {
     HLogKey other = (HLogKey) o;
-    int result = this.regionName.compareTo(other.regionName);
+    int result = Bytes.compareTo(this.regionName, other.regionName);
     
     if(result == 0) {
-      result = this.row.compareTo(other.row);
+      result = Bytes.compareTo(this.row, other.row);
       
       if(result == 0) {
         
@@ -141,9 +143,9 @@
    * {@inheritDoc}
    */
   public void write(DataOutput out) throws IOException {
-    this.regionName.write(out);
-    this.tablename.write(out);
-    this.row.write(out);
+    Bytes.writeByteArray(out, this.regionName);
+    Bytes.writeByteArray(out, this.tablename);
+    Bytes.writeByteArray(out, this.row);
     out.writeLong(logSeqNum);
   }
   
@@ -151,9 +153,9 @@
    * {@inheritDoc}
    */
   public void readFields(DataInput in) throws IOException {
-    this.regionName.readFields(in);
-    this.tablename.readFields(in);
-    this.row.readFields(in);
+    this.regionName = Bytes.readByteArray(in);
+    this.tablename = Bytes.readByteArray(in);
+    this.row = Bytes.readByteArray(in);
     this.logSeqNum = in.readLong();
   }
 }
\ No newline at end of file