You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ji...@apache.org on 2007/05/29 19:17:46 UTC

svn commit: r542592 [2/2] - in /lucene/hadoop/trunk/src/contrib/hbase/src: java/org/apache/hadoop/hbase/ test/org/apache/hadoop/hbase/

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java?view=diff&rev=542592&r1=542591&r2=542592
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java Tue May 29 10:17:44 2007
@@ -24,6 +24,7 @@
 import java.util.Map;
 import java.util.Random;
 import java.util.SortedMap;
+import java.util.SortedSet;
 import java.util.Timer;
 import java.util.TimerTask;
 import java.util.TreeMap;
@@ -173,7 +174,9 @@
           }
 
           for (int i = 0; i < values.length; i++) {
-            results.put(values[i].getLabel(), values[i].getData().get());
+            byte[] bytes = new byte[values[i].getData().getSize()];
+            System.arraycopy(values[i].getData().get(), 0, bytes, 0, bytes.length);
+            results.put(values[i].getLabel(), bytes);
           }
 
           HRegionInfo info = getRegionInfo(COL_REGIONINFO, results, inbuf);
@@ -220,29 +223,67 @@
     }
     
     protected String getServerName(final Text key,
-        final TreeMap<Text, byte[]> data)
-    throws UnsupportedEncodingException {
+        final TreeMap<Text, byte[]> data) {
+      
       byte [] bytes = data.get(key);
-      String name = (bytes != null && bytes.length != 0)?
-        new String(bytes, UTF8_ENCODING): null;
+      String name = null;
+      try {
+        name = (bytes != null && bytes.length != 0) ?
+            new String(bytes, UTF8_ENCODING): null;
+
+      } catch(UnsupportedEncodingException e) {
+        assert(false);
+      }
       return (name != null)? name.trim(): name;
     }
 
     protected long getStartCode(final Text key,
-        final TreeMap<Text, byte[]> data)
-    throws NumberFormatException, UnsupportedEncodingException {
+        final TreeMap<Text, byte[]> data) {
+
       long startCode = -1L;
       byte [] bytes = data.get(key);
       if(bytes != null && bytes.length != 0) {
-        startCode = Long.valueOf(new String(bytes, UTF8_ENCODING).trim());
+        try {
+          startCode = Long.valueOf(new String(bytes, UTF8_ENCODING).trim());
+          
+        } catch(NumberFormatException e) {
+          assert(false);
+          
+        } catch(UnsupportedEncodingException e) {
+          assert(false);
+        }
       }
       return startCode;
     }
     
     protected void checkAssigned(final HRegionInfo info,
         final String serverName, final long startCode) {
+
+      // Skip region - if ...
+      if(info.offLine                                           // offline
+          || killedRegions.contains(info.regionName)            // queued for offline
+          || regionsToDelete.contains(info.regionName)) {       // queued for delete
+
+        unassignedRegions.remove(info.regionName);
+        assignAttempts.remove(info.regionName);
+        
+        if(LOG.isDebugEnabled()) {
+          LOG.debug("not assigning region: " + info.regionName);
+        }
+        return;
+      }
+      
       HServerInfo storedInfo = null;
       if(serverName != null) {
+        TreeMap<Text, HRegionInfo> regionsToKill = killList.get(serverName);
+        if(regionsToKill != null && regionsToKill.containsKey(info.regionName)) {
+          // Skip if region is on kill list
+          
+          if(LOG.isDebugEnabled()) {
+            LOG.debug("not assigning region (on kill list): " + info.regionName);
+          }
+          return;
+        }
         storedInfo = serversToServerInfo.get(serverName);
       }
       if(storedInfo == null || storedInfo.getStartCode() != startCode) {
@@ -310,10 +351,35 @@
   private Thread rootScannerThread;
   private Integer rootScannerLock = 0;
   
-  private static class MetaRegion {
+  private static class MetaRegion implements Comparable {
     public HServerAddress server;
     public Text regionName;
     public Text startKey;
+
+    @Override
+    public boolean equals(Object o) {
+      return this.compareTo(o) == 0;
+    }
+    
+    @Override
+    public int hashCode() {
+      int result = this.regionName.hashCode();
+      result ^= this.startKey.hashCode();
+      return result;
+    }
+
+    // Comparable
+    
+    public int compareTo(Object o) {
+      MetaRegion other = (MetaRegion)o;
+      
+      int result = this.regionName.compareTo(other.regionName);
+      if(result == 0) {
+        result = this.startKey.compareTo(other.startKey);
+      }
+      return result;
+    }
+    
   }
   
   /** Work for the meta scanner is queued up here */
@@ -456,10 +522,21 @@
   
   private SortedMap<Text, Long> assignAttempts;
 
-  // 'killList' indicates regions that we hope to close and then never reopen 
-  // (because we're merging them, say).
+  // 'killList' indicates regions that we hope to close and not reopen 
+  // (because we're merging them, or taking the table offline, for example).
 
   private SortedMap<String, TreeMap<Text, HRegionInfo>> killList;
+  
+  // 'killedRegions' contains regions that are in the process of being closed
+  
+  private SortedSet<Text> killedRegions;
+  
+  // 'regionsToDelete' contains regions that need to be deleted, but cannot be
+  // until the region server closes it
+  
+  private SortedSet<Text> regionsToDelete;
+  
+  // A map of known server names to server info
 
   private SortedMap<String, HServerInfo> serversToServerInfo =
     Collections.synchronizedSortedMap(new TreeMap<String, HServerInfo>());
@@ -568,6 +645,12 @@
       Collections.synchronizedSortedMap(
           new TreeMap<String, TreeMap<Text, HRegionInfo>>());
     
+    this.killedRegions =
+      Collections.synchronizedSortedSet(new TreeSet<Text>());
+    
+    this.regionsToDelete =
+      Collections.synchronizedSortedSet(new TreeSet<Text>());
+    
     // We're almost open for business
     
     this.closed = false;
@@ -575,8 +658,9 @@
     LOG.info("HMaster initialized on " + address.toString());
   }
   
-  public boolean isMasterRunning() {
-    return !closed;
+  /** returns the HMaster server address */
+  public HServerAddress getMasterAddress() {
+    return address;
   }
 
   public void run() {
@@ -691,25 +775,30 @@
     }
   }
   
-  /** 
-   * Turn off the HMaster.  Sets a flag so that the main thread know to shut
-   * things down in an orderly fashion.
+  /**
+   * Wait until <code>rootRegionLocation</code> has been set or until the
+   * <code>closed</code> flag has been set.
+   * @return True if <code>rootRegionLocation</code> was populated.
    */
-  public void shutdown() {
-    TimerTask tt = new TimerTask() {
-      @Override
-      public void run() {
-        closed = true;
-        synchronized(msgQueue) {
-          msgQueue.clear();                         // Empty the queue
-          msgQueue.notifyAll();                     // Wake main thread
+  private synchronized boolean waitForRootRegionOrClose() {
+    while (!closed && rootRegionLocation == null) {
+      try {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Wait for root region (or close)");
+        }
+        wait();
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Wake from wait for root region (or close)");
+        }
+      } catch(InterruptedException e) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Wake from wait for root region (or close) (IE)");
         }
       }
-    };
-    Timer t = new Timer("Shutdown");
-    t.schedule(tt, 10);
+    }
+    return this.rootRegionLocation == null;
   }
-
+  
   //////////////////////////////////////////////////////////////////////////////
   // HMasterRegionInterface
   //////////////////////////////////////////////////////////////////////////////
@@ -748,10 +837,13 @@
   /** HRegionServers call this method repeatedly. */
   public HMsg[] regionServerReport(HServerInfo serverInfo, HMsg msgs[]) throws IOException {
     String server = serverInfo.getServerAddress().toString().trim();
+    Text serverLabel = new Text(server);
 
-    if (closed) {
-      // We're shutting down. Tell the server to go away.
+    if(closed
+        || (msgs.length == 1 && msgs[0].getMsg() == HMsg.MSG_REPORT_EXITING)) {
+      // We're shutting down. Or the HRegionServer is.
       serversToServerInfo.remove(server);
+      serverLeases.cancelLease(serverLabel, serverLabel);
       HMsg returnMsgs[] = {new HMsg(HMsg.MSG_REGIONSERVER_STOP)};
       return returnMsgs;
     }
@@ -797,7 +889,6 @@
       // This will always succeed; otherwise, the fetch of serversToServerInfo
       // would have failed above.
 
-      Text serverLabel = new Text(server);
       serverLeases.renewLease(serverLabel, serverLabel);
 
       // Refresh the info object
@@ -899,16 +990,21 @@
 
         } else {
           boolean reassignRegion = true;
+          boolean deleteRegion = false;
 
-          if(regionsToKill.containsKey(region.regionName)) {
-            regionsToKill.remove(region.regionName);
-            unassignedRegions.remove(region.regionName);
-            assignAttempts.remove(region.regionName);
+          if(killedRegions.remove(region.regionName)) {
             reassignRegion = false;
           }
+            
+          if(regionsToDelete.remove(region.regionName)) {
+            reassignRegion = false;
+            deleteRegion = true;
+          }
+          unassignedRegions.remove(region.regionName);
+          assignAttempts.remove(region.regionName);
 
           synchronized(msgQueue) {
-            msgQueue.add(new PendingCloseReport(region, reassignRegion));
+            msgQueue.add(new PendingCloseReport(region, reassignRegion, deleteRegion));
             msgQueue.notifyAll();
           }
 
@@ -943,10 +1039,9 @@
     // Process the kill list
     
     if(regionsToKill != null) {
-      for(Iterator<HRegionInfo> i = regionsToKill.values().iterator();
-          i.hasNext(); ) {
-        
-        returnMsgs.add(new HMsg(HMsg.MSG_REGION_CLOSE_AND_DELETE, i.next()));
+      for(HRegionInfo i: regionsToKill.values()) {
+        returnMsgs.add(new HMsg(HMsg.MSG_REGION_CLOSE, i));
+        killedRegions.add(i.regionName);
       }
     }
 
@@ -993,48 +1088,6 @@
     notifyAll();
   }
   
-  /**
-   * Wait until <code>rootRegionLocation</code> has been set or until the
-   * <code>closed</code> flag has been set.
-   * @return True if <code>rootRegionLocation</code> was populated.
-   */
-  private synchronized boolean waitForRootRegionOrClose() {
-    while (!closed && rootRegionLocation == null) {
-      try {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Wait for root region (or close)");
-        }
-        wait();
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Wake from wait for root region (or close)");
-        }
-      } catch(InterruptedException e) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Wake from wait for root region (or close) (IE)");
-        }
-      }
-    }
-    return this.rootRegionLocation == null;
-  }
-  
-  private synchronized void waitForRootRegion() {
-    while (rootRegionLocation == null) {
-      try {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Wait for root region");
-        }
-        wait();
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Wake from wait for root region");
-        }
-      } catch(InterruptedException e) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Wake from wait for root region (IE)");
-        }
-      }
-    }
-  }
-
   //////////////////////////////////////////////////////////////////////////////
   // Some internal classes to manage msg-passing and client operations
   //////////////////////////////////////////////////////////////////////////////
@@ -1057,6 +1110,20 @@
     private String deadServer;
     private long oldStartCode;
     
+    private class ToDoEntry {
+      boolean deleteRegion;
+      boolean regionOffline;
+      HStoreKey key;
+      HRegionInfo info;
+      
+      ToDoEntry(HStoreKey key, HRegionInfo info) {
+        this.deleteRegion = false;
+        this.regionOffline = false;
+        this.key = key;
+        this.info = info;
+      }
+    }
+    
     public PendingServerShutdown(HServerInfo serverInfo) {
       super();
       this.deadServer = serverInfo.getServerAddress().toString();
@@ -1066,7 +1133,7 @@
     private void scanMetaRegion(HRegionInterface server, long scannerId,
         Text regionName) throws IOException {
 
-      Vector<HStoreKey> toDoList = new Vector<HStoreKey>();
+      Vector<ToDoEntry> toDoList = new Vector<ToDoEntry>();
       TreeMap<Text, HRegionInfo> regions = new TreeMap<Text, HRegionInfo>();
 
       DataInputBuffer inbuf = new DataInputBuffer();
@@ -1092,7 +1159,9 @@
 
           TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
           for(int i = 0; i < values.length; i++) {
-            results.put(values[i].getLabel(), values[i].getData().get());
+            byte[] bytes = new byte[values[i].getData().getSize()];
+            System.arraycopy(values[i].getData().get(), 0, bytes, 0, bytes.length);
+            results.put(values[i].getLabel(), bytes);
           }
           
           byte[] bytes = results.get(COL_SERVER); 
@@ -1154,11 +1223,35 @@
           if(LOG.isDebugEnabled()) {
             LOG.debug(serverName + " was serving " + info.regionName);
           }
+
+          ToDoEntry todo = new ToDoEntry(key, info);
+          toDoList.add(todo);
           
-          // Add to our to do lists
+          if(killList.containsKey(deadServer)) {
+            TreeMap<Text, HRegionInfo> regionsToKill = killList.get(deadServer);
+            if(regionsToKill.containsKey(info.regionName)) {
+              regionsToKill.remove(info.regionName);
+              killList.put(deadServer, regionsToKill);
+              unassignedRegions.remove(info.regionName);
+              assignAttempts.remove(info.regionName);
+              
+              if(regionsToDelete.contains(info.regionName)) {
+                // Delete this region
+                
+                regionsToDelete.remove(info.regionName);
+                todo.deleteRegion = true;
+                
+              } else {
+                // Mark region offline
+                
+                todo.regionOffline = true;
+              }
+            }
+          } else {
+            // Get region reassigned
 
-          toDoList.add(key);
-          regions.put(info.regionName, info);
+            regions.put(info.regionName, info);
+          }
         }
 
       } finally {
@@ -1176,18 +1269,28 @@
       // Remove server from root/meta entries
 
       for(int i = 0; i < toDoList.size(); i++) {
-        long lockid = server.startUpdate(regionName, clientId, toDoList.get(i).getRow());
+        ToDoEntry e = toDoList.get(i);
+        long lockid = server.startUpdate(regionName, clientId, e.key.getRow());
+        if(e.deleteRegion) {
+          server.delete(regionName, clientId, lockid, COL_REGIONINFO);
+          
+        } else if(e.regionOffline) {
+          e.info.offLine = true;
+          ByteArrayOutputStream byteValue = new ByteArrayOutputStream();
+          DataOutputStream s = new DataOutputStream(byteValue);
+          e.info.write(s);
+
+          server.put(regionName, clientId, lockid, COL_REGIONINFO,
+              new BytesWritable(byteValue.toByteArray()));
+        }
         server.delete(regionName, clientId, lockid, COL_SERVER);
         server.delete(regionName, clientId, lockid, COL_STARTCODE);
         server.commit(regionName, clientId, lockid);
       }
 
-      // Put all the regions we found on the unassigned region list
-
-      for(Iterator<Map.Entry<Text, HRegionInfo>> i = regions.entrySet().iterator();
-          i.hasNext(); ) {
+      // Get regions reassigned
 
-        Map.Entry<Text, HRegionInfo> e = i.next();
+      for(Map.Entry<Text, HRegionInfo> e: regions.entrySet()) {
         Text region = e.getKey();
         HRegionInfo regionInfo = e.getValue();
 
@@ -1257,13 +1360,17 @@
   private class PendingCloseReport extends PendingOperation {
     private HRegionInfo regionInfo;
     private boolean reassignRegion;
+    private boolean deleteRegion;
     private boolean rootRegion;
     
-    public PendingCloseReport(HRegionInfo regionInfo, boolean reassignRegion) {
+    public PendingCloseReport(HRegionInfo regionInfo, boolean reassignRegion,
+        boolean deleteRegion) {
+      
       super();
 
       this.regionInfo = regionInfo;
       this.reassignRegion = reassignRegion;
+      this.deleteRegion = deleteRegion;
 
       // If the region closing down is a meta region then we need to update
       // the ROOT table
@@ -1312,6 +1419,18 @@
 
         try {
           long lockid = server.startUpdate(metaRegionName, clientId, regionInfo.regionName);
+          if(deleteRegion) {
+            server.delete(metaRegionName, clientId, lockid, COL_REGIONINFO);
+            
+          } else if(!reassignRegion ) {
+            regionInfo.offLine = true;
+            ByteArrayOutputStream byteValue = new ByteArrayOutputStream();
+            DataOutputStream s = new DataOutputStream(byteValue);
+            regionInfo.write(s);
+
+            server.put(metaRegionName, clientId, lockid, COL_REGIONINFO,
+                new BytesWritable(byteValue.toByteArray()));
+          }
           server.delete(metaRegionName, clientId, lockid, COL_SERVER);
           server.delete(metaRegionName, clientId, lockid, COL_STARTCODE);
           server.commit(metaRegionName, clientId, lockid);
@@ -1332,6 +1451,16 @@
         
         unassignedRegions.put(regionInfo.regionName, regionInfo);
         assignAttempts.put(regionInfo.regionName, 0L);
+        
+      } else if(deleteRegion) {
+        try {
+          HRegion.deleteRegion(fs, dir, regionInfo.regionName);
+
+        } catch(IOException e) {
+          LOG.error("failed to delete region " + regionInfo.regionName);
+          LOG.error(e);
+          throw e;
+        }
       }
     }
   }
@@ -1425,18 +1554,35 @@
     }
   }
 
+  private synchronized void waitForRootRegion() {
+    while (rootRegionLocation == null) {
+      try {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Wait for root region");
+        }
+        wait();
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Wake from wait for root region");
+        }
+      } catch(InterruptedException e) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Wake from wait for root region (IE)");
+        }
+      }
+    }
+  }
+
   //////////////////////////////////////////////////////////////////////////////
   // HMasterInterface
   //////////////////////////////////////////////////////////////////////////////
   
-  /** returns the HMaster server address */
-  public HServerAddress getMasterAddress() {
-    return address;
+  public boolean isMasterRunning() {
+    return !closed;
   }
 
   public void createTable(HTableDescriptor desc) throws IOException {
     if (!isMasterRunning()) {
-      throw new IllegalStateException(MASTER_NOT_RUNNING);
+      throw new MasterNotRunningException();
     }
     HRegionInfo newRegion = new HRegionInfo(rand.nextLong(), desc, null, null);
 
@@ -1560,211 +1706,533 @@
     meta.commit(writeid);
   }
   
+  /* (non-Javadoc)
+   * @see org.apache.hadoop.hbase.HMasterInterface#deleteTable(org.apache.hadoop.io.Text)
+   */
   public void deleteTable(Text tableName) throws IOException {
-    if (!isMasterRunning()) {
-      throw new IllegalStateException(MASTER_NOT_RUNNING);
+    new TableDelete(tableName).process();
+    if(LOG.isDebugEnabled()) {
+      LOG.debug("deleted table: " + tableName);
     }
+  }
+  
+  /* (non-Javadoc)
+   * @see org.apache.hadoop.hbase.HMasterInterface#addColumn(org.apache.hadoop.io.Text, org.apache.hadoop.hbase.HColumnDescriptor)
+   */
+  public void addColumn(Text tableName, HColumnDescriptor column) throws IOException {
+    new AddColumn(tableName, column).process();
+  }
+  
+  /* (non-Javadoc)
+   * @see org.apache.hadoop.hbase.HMasterInterface#deleteColumn(org.apache.hadoop.io.Text, org.apache.hadoop.io.Text)
+   */
+  public void deleteColumn(Text tableName, Text columnName) throws IOException {
+    new DeleteColumn(tableName, HStoreKey.extractFamily(columnName)).process();
+  }
+  
+  /* (non-Javadoc)
+   * @see org.apache.hadoop.hbase.HMasterInterface#mergeRegions(org.apache.hadoop.io.Text, org.apache.hadoop.io.Text)
+   */
+  public void mergeRegions(Text regionName1, Text regionName2) throws IOException {
+    //TODO
+  }
+  
+  /* (non-Javadoc)
+   * @see org.apache.hadoop.hbase.HMasterInterface#enableTable(org.apache.hadoop.io.Text)
+   */
+  public void enableTable(Text tableName) throws IOException {
+    new ChangeTableState(tableName, true).process();
+  }
+  
+  /** 
+   * Turn off the HMaster.  Sets a flag so that the main thread know to shut
+   * things down in an orderly fashion.
+   */
+  public void shutdown() {
+    TimerTask tt = new TimerTask() {
+      @Override
+      public void run() {
+        closed = true;
+        synchronized(msgQueue) {
+          msgQueue.clear();                         // Empty the queue
+          msgQueue.notifyAll();                     // Wake main thread
+        }
+      }
+    };
+    Timer t = new Timer("Shutdown");
+    t.schedule(tt, 10);
+  }
 
-    for(int tries = 0; tries < numRetries; tries++) {
-      try {
-        // We can not access any meta region if they have not already been
-        // assigned and scanned.
-
-        metaScanner.waitForMetaScan();
-
-        Text firstMetaRegion = null;
-        if(knownMetaRegions.size() == 1) {
-          firstMetaRegion = knownMetaRegions.firstKey();
-
-        } else if(knownMetaRegions.containsKey(tableName)) {
-          firstMetaRegion = tableName;
+  /* (non-Javadoc)
+   * @see org.apache.hadoop.hbase.HMasterInterface#findRootRegion()
+   */
+  public HServerAddress findRootRegion() {
+    return rootRegionLocation;
+  }
 
-        } else {
-          firstMetaRegion = knownMetaRegions.headMap(tableName).lastKey();
-        }
+  /* (non-Javadoc)
+   * @see org.apache.hadoop.hbase.HMasterInterface#disableTable(org.apache.hadoop.io.Text)
+   */
+  public void disableTable(Text tableName) throws IOException {
+    new ChangeTableState(tableName, false).process();
+  }
+  
+  // Helper classes for HMasterInterface
 
-        synchronized(metaScannerLock) {     // Prevent meta scanner from running
-          for(Iterator<MetaRegion> it =
-              knownMetaRegions.tailMap(firstMetaRegion).values().iterator();
-              it.hasNext(); ) {
+  private abstract class TableOperation {
+    private SortedSet<MetaRegion> metaRegions;
+    protected Text tableName;
+    
+    protected TreeSet<HRegionInfo> unservedRegions;
+    
+    protected TableOperation(Text tableName) throws IOException {
+      if (!isMasterRunning()) {
+        throw new MasterNotRunningException();
+      }
+      this.metaRegions = new TreeSet<MetaRegion>();
+      this.tableName = tableName;
+      this.unservedRegions = new TreeSet<HRegionInfo>();
 
-            // Find all the regions that make up this table
+      // We can not access any meta region if they have not already been
+      // assigned and scanned.
 
-            MetaRegion m = it.next();
-            HRegionInterface server = client.getHRegionConnection(m.server);
+      metaScanner.waitForMetaScan();
 
-            // Rows in the meta table we will need to delete
+      Text firstMetaRegion = null;
+      if(knownMetaRegions.size() == 1) {
+        firstMetaRegion = knownMetaRegions.firstKey();
 
-            Vector<Text> rowsToDelete = new Vector<Text>();
+      } else if(knownMetaRegions.containsKey(tableName)) {
+        firstMetaRegion = tableName;
 
-            // Regions that are being served. We will get the HRegionServers
-            // to delete them for us, but we don't tell them that until after
-            // we are done scanning to prevent lock contention
+      } else {
+        firstMetaRegion = knownMetaRegions.headMap(tableName).lastKey();
+      }
 
-            TreeMap<String, TreeMap<Text, HRegionInfo>> localKillList =
-              new TreeMap<String, TreeMap<Text, HRegionInfo>>();
+      this.metaRegions.addAll(knownMetaRegions.tailMap(firstMetaRegion).values());
+    }
+    
+    public void process() throws IOException {
+      for(int tries = 0; tries < numRetries; tries++) {
+        boolean tableExists = false;
+        try {
+          synchronized(metaScannerLock) {     // Prevent meta scanner from running
+            for(MetaRegion m: metaRegions) {
 
-            // Regions that are not being served. We will have to delete
-            // them ourselves
+              // Get a connection to a meta server
 
-            TreeSet<Text> unservedRegions = new TreeSet<Text>();
+              HRegionInterface server = client.getHRegionConnection(m.server);
 
-            long scannerId = -1L;
-            try {
-              scannerId = server.openScanner(m.regionName, METACOLUMNS, tableName);
+              // Open a scanner on the meta region
+              
+              long scannerId =
+                server.openScanner(m.regionName, METACOLUMNS, tableName);
+              
+              try {
+                DataInputBuffer inbuf = new DataInputBuffer();
+                byte[] bytes;
+                while(true) {
+                  HRegionInfo info = new HRegionInfo();
+                  String serverName = null;
+                  long startCode = -1L;
+                  
+                  LabelledData[] values = null;
+                  HStoreKey key = new HStoreKey();
+                  values = server.next(scannerId, key);
+                  if(values == null || values.length == 0) {
+                    break;
+                  }
+                  boolean haveRegionInfo = false;
+                  for(int i = 0; i < values.length; i++) {
+                    bytes = new byte[values[i].getData().getSize()];
+                    if(bytes.length == 0) {
+                      break;
+                    }
+                    System.arraycopy(values[i].getData().get(), 0, bytes, 0, bytes.length);
+                   
+                    if(values[i].getLabel().equals(COL_REGIONINFO)) {
+                      haveRegionInfo = true;
+                      inbuf.reset(bytes, bytes.length);
+                      info.readFields(inbuf);
+                      
+                    } else if(values[i].getLabel().equals(COL_SERVER)) {
+                      try {
+                        serverName = new String(bytes, UTF8_ENCODING);
+                        
+                      } catch(UnsupportedEncodingException e) {
+                        assert(false);
+                      }
+                      
+                    } else if(values[i].getLabel().equals(COL_STARTCODE)) {
+                      try {
+                        startCode = Long.valueOf(new String(bytes, UTF8_ENCODING));
+                        
+                      } catch(UnsupportedEncodingException e) {
+                        assert(false);
+                      }
+                    }
+                  }
+                  
+                  if(!haveRegionInfo) {
+                    throw new IOException(COL_REGIONINFO + " not found");
+                  }
+                  
+                  if(info.tableDesc.getName().compareTo(tableName) > 0) {
+                    break;               // Beyond any more entries for this table
+                  }
+                  
+                  tableExists = true;
+                  if(!isBeingServed(serverName, startCode)) {
+                    unservedRegions.add(info);
+                  }
+                  processScanItem(serverName, startCode, info);
 
-              DataInputBuffer inbuf = new DataInputBuffer();
-              byte[] bytes;
-              while(true) {
-                LabelledData[] values = null;
-                HStoreKey key = new HStoreKey();
-                values = server.next(scannerId, key);
-                if(values == null || values.length == 0) {
-                  break;
-                }
-                TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
-                for(int i = 0; i < values.length; i++) {
-                  bytes = new byte[values[i].getData().getSize()];
-                  System.arraycopy(values[i].getData().get(), 0, bytes, 0, bytes.length);
-                  results.put(values[i].getLabel(), bytes);
-                }
-                bytes = results.get(COL_REGIONINFO);
-                if(bytes == null || bytes.length == 0) {
-                  break;
-                }
-                inbuf.reset(bytes, bytes.length);
-                HRegionInfo info = new HRegionInfo();
-                info.readFields(inbuf);
+                } // while(true)
+                
+              } finally {
+                if(scannerId != -1L) {
+                  try {
+                    server.close(scannerId);
 
-                if(info.tableDesc.getName().compareTo(tableName) > 0) {
-                  break;               // Beyond any more entries for this table
+                  } catch(IOException e) {
+                    LOG.error(e);
+                  }
                 }
+                scannerId = -1L;
+              }
+              
+              if(!tableExists) {
+                throw new IOException(tableName + " does not exist");
+              }
+              
+              postProcessMeta(m, server);
+              unservedRegions.clear();
+              
+            } // for(MetaRegion m:)
+          } // synchronized(metaScannerLock)
+          
+        } catch(NotServingRegionException e) {
+          if(tries == numRetries - 1) {
+            throw e;
+          }
+          continue;
+        }
+        break;
+      } // for(tries...)
+    }
+    
+    protected boolean isBeingServed(String serverName, long startCode) {
+      boolean result = false;
+      
+      if(serverName != null && startCode != -1L) {
+        HServerInfo s = serversToServerInfo.get(serverName);
+        result = s != null && s.getStartCode() == startCode;
+      }
+      return result;
+    }
+    
+    protected boolean isEnabled(HRegionInfo info) {
+      return !info.offLine;
+    }
+    
+    protected abstract void processScanItem(String serverName, long startCode,
+        HRegionInfo info) throws IOException;
+    
+    protected abstract void postProcessMeta(MetaRegion m, 
+        HRegionInterface server) throws IOException;
+  }
+  
+  private class ChangeTableState extends TableOperation {
+    private boolean online;
+    
+    protected TreeMap<String, TreeSet<HRegionInfo>> servedRegions;
+    protected long lockid;
+    protected long clientId;
+    
+    public ChangeTableState(Text tableName, boolean onLine) throws IOException {
+      super(tableName);
+      this.online = onLine;
+      this.servedRegions = new TreeMap<String, TreeSet<HRegionInfo>>();
+    }
+    
+    protected void processScanItem(String serverName, long startCode,
+        HRegionInfo info) throws IOException {
+      
+      if(isBeingServed(serverName, startCode)) {
+        TreeSet<HRegionInfo> regions = servedRegions.get(serverName);
+        if(regions == null) {
+          regions = new TreeSet<HRegionInfo>();
+        }
+        regions.add(info);
+        servedRegions.put(serverName, regions);
+      }
+    }
+    
+    protected void postProcessMeta(MetaRegion m, HRegionInterface server)
+        throws IOException {
 
-                rowsToDelete.add(info.regionName);
-
-                // Is it being served?
+      // Process regions not being served
+      
+      if(LOG.isDebugEnabled()) {
+        LOG.debug("processing unserved regions");
+      }
+      for(HRegionInfo i: unservedRegions) {
+        
+        // Update meta table
 
-                bytes = results.get(COL_SERVER);
-                if(bytes != null && bytes.length != 0) {
-                  String serverName = new String(bytes, UTF8_ENCODING);
+        if(LOG.isDebugEnabled()) {
+          LOG.debug("updating columns in row: " + i.regionName);
+        }
 
-                  bytes = results.get(COL_STARTCODE);
-                  if(bytes != null && bytes.length != 0) {
-                    long startCode = Long.valueOf(new String(bytes, UTF8_ENCODING));
+        lockid = -1L;
+        clientId = rand.nextLong();
+        try {
+          lockid = server.startUpdate(m.regionName, clientId, i.regionName);
+          updateRegionInfo(server, m.regionName, i);
+          server.delete(m.regionName, clientId, lockid, COL_SERVER);
+          server.delete(m.regionName, clientId, lockid, COL_STARTCODE);
+          server.commit(m.regionName, clientId, lockid);
+          lockid = -1L;
 
-                    HServerInfo s = serversToServerInfo.get(serverName);
-                    if(s != null && s.getStartCode() == startCode) {
+          if(LOG.isDebugEnabled()) {
+            LOG.debug("updated columns in row: " + i.regionName);
+          }
 
-                      // It is being served.
-                      // Tell the server to stop it and not report back.
+        } catch(NotServingRegionException e) {
+          throw e;
 
-                      TreeMap<Text, HRegionInfo> regionsToKill =
-                        localKillList.get(serverName);
+        } catch(IOException e) {
+          LOG.error("column update failed in row: " + i.regionName);
+          LOG.error(e);
 
-                      if(regionsToKill == null) {
-                        regionsToKill = new TreeMap<Text, HRegionInfo>();
-                      }
-                      regionsToKill.put(info.regionName, info);
-                      localKillList.put(serverName, regionsToKill);
-                      continue;
-                    }
-                  }
-                }
+        } finally {
+          try {
+            if(lockid != -1L) {
+              server.abort(m.regionName, clientId, lockid);
+            }
 
-                // Region is not currently being served.
-                // Prevent it from getting assigned and add it to the list of
-                // regions we need to delete here.
-
-                unassignedRegions.remove(info.regionName);
-                assignAttempts.remove(info.regionName);
-                unservedRegions.add(info.regionName);
-              }
+          } catch(IOException iex) {
+            LOG.error(iex);
+          }
+        }
 
-            } finally {
-              if(scannerId != -1L) {
-                try {
-                  server.close(scannerId);
+        if(online) {                            // Bring offline regions on-line
+          if(!unassignedRegions.containsKey(i.regionName)) {
+            unassignedRegions.put(i.regionName, i);
+            assignAttempts.put(i.regionName, 0L);
+          }
+          
+        } else {                                // Prevent region from getting assigned.
+          unassignedRegions.remove(i.regionName);
+          assignAttempts.remove(i.regionName);
+        }
+      }
+      
+      // Process regions currently being served
+      
+      if(LOG.isDebugEnabled()) {
+        LOG.debug("processing regions currently being served");
+      }
+      for(Map.Entry<String, TreeSet<HRegionInfo>> e: servedRegions.entrySet()) {
+        String serverName = e.getKey();
+        
+        if(online) {
+          continue;                             // Already being served
+        }
+        
+        // Cause regions being served to be take off-line and disabled
+        
+        TreeMap<Text, HRegionInfo> localKillList = killList.get(serverName);
+        if(localKillList == null) {
+          localKillList = new TreeMap<Text, HRegionInfo>();
+        }
+        for(HRegionInfo i: e.getValue()) {
+          if(LOG.isDebugEnabled()) {
+            LOG.debug("adding region " + i.regionName + " to local kill list");
+          }
+          localKillList.put(i.regionName, i);
+        }
+        if(localKillList != null && localKillList.size() > 0) {
+          if(LOG.isDebugEnabled()) {
+            LOG.debug("inserted local kill list into kill list for server " + serverName);
+          }
+          killList.put(serverName, localKillList);
+        }
+      }
+      servedRegions.clear();
+    }
+    
+    protected void updateRegionInfo(HRegionInterface server, Text regionName,
+        HRegionInfo i) throws IOException {
+      
+      i.offLine = !online;
+      
+      ByteArrayOutputStream byteValue = new ByteArrayOutputStream();
+      DataOutputStream s = new DataOutputStream(byteValue);
+      i.write(s);
 
-                } catch(IOException e) {
-                  LOG.error(e);
-                }
-              }
-              scannerId = -1L;
-            }
+      server.put(regionName, clientId, lockid, COL_REGIONINFO,
+          new BytesWritable(byteValue.toByteArray()));
+      
+    }
+  }
+  
+  private class TableDelete extends ChangeTableState {
+    
+    public TableDelete(Text tableName) throws IOException {
+      super(tableName, false);
+    }
+    
+    protected void postProcessMeta(MetaRegion m, HRegionInterface server)
+        throws IOException {
 
-            // Wipe the existence of the regions out of the meta table
+      // For regions that are being served, mark them for deletion
+      
+      for(TreeSet<HRegionInfo> s: servedRegions.values()) {
+        for(HRegionInfo i: s) {
+          regionsToDelete.add(i.regionName);
+        }
+      }
 
-            for(Iterator<Text> row = rowsToDelete.iterator(); row.hasNext(); ) {
-              Text rowName = row.next();
-              if(LOG.isDebugEnabled()) {
-                LOG.debug("deleting columns in row: " + rowName);
-              }
-              long lockid = -1L;
-              long clientId = rand.nextLong();
-              try {
-                lockid = server.startUpdate(m.regionName, clientId, rowName);
-                server.delete(m.regionName, clientId, lockid, COL_REGIONINFO);
-                server.delete(m.regionName, clientId, lockid, COL_SERVER);
-                server.delete(m.regionName, clientId, lockid, COL_STARTCODE);
-                server.commit(m.regionName, clientId, lockid);
-                lockid = -1L;
-                if(LOG.isDebugEnabled()) {
-                  LOG.debug("deleted columns in row: " + rowName);
-                }
+      // Unserved regions we can delete now
+      
+      for(HRegionInfo i: unservedRegions) {
+        // Delete the region
 
-              } catch(IOException e) {
-                if(lockid != -1L) {
-                  server.abort(m.regionName, clientId, lockid);
-                }
-                LOG.error("columns deletion failed in row: " + rowName);
-                LOG.error(e);
-                throw e;
-              }
-            }
+        try {
+          HRegion.deleteRegion(fs, dir, i.regionName);
 
-            // Notify region servers that some regions need to be closed and deleted
+        } catch(IOException e) {
+          LOG.error("failed to delete region " + i.regionName);
+          LOG.error(e);
+        }
+      }
+      super.postProcessMeta(m, server);
+    }
+    
+    @Override
+    protected void updateRegionInfo(HRegionInterface server, Text regionName,
+        HRegionInfo i) throws IOException {
+      
+      server.delete(regionName, clientId, lockid, COL_REGIONINFO);
+    }
+  }
+  
+  private abstract class ColumnOperation extends TableOperation {
+    
+    protected ColumnOperation(Text tableName) throws IOException {
+      super(tableName);
+    }
 
-            if(localKillList.size() != 0) {
-              killList.putAll(localKillList);
-            }
+    protected void processScanItem(String serverName, long startCode,
+        HRegionInfo info) throws IOException {
+      
+      if(isEnabled(info)) {
+        throw new TableNotDisabledException(tableName.toString());
+      }
+    }
 
-            // Delete any regions that are not being served
+    protected void updateRegionInfo(HRegionInterface server, Text regionName,
+        HRegionInfo i) throws IOException {
+      
+      ByteArrayOutputStream byteValue = new ByteArrayOutputStream();
+      DataOutputStream s = new DataOutputStream(byteValue);
+      i.write(s);
 
-            for(Iterator<Text> i = unservedRegions.iterator(); i.hasNext(); ) {
-              Text regionName = i.next();
-              try {
-                HRegion.deleteRegion(fs, dir, regionName);
+      long lockid = -1L;
+      long clientId = rand.nextLong();
+      try {
+        lockid = server.startUpdate(regionName, clientId, i.regionName);
+        server.put(regionName, clientId, lockid, COL_REGIONINFO,
+            new BytesWritable(byteValue.toByteArray()));
+      
+        server.commit(regionName, clientId, lockid);
+        lockid = -1L;
 
-              } catch(IOException e) {
-                LOG.error("failed to delete region " + regionName);
-                LOG.error(e);
-                throw e;
-              }
-            }
-          }
+        if(LOG.isDebugEnabled()) {
+          LOG.debug("updated columns in row: " + i.regionName);
         }
+
       } catch(NotServingRegionException e) {
-        if(tries == numRetries - 1) {
-          throw e;
-        }
-        continue;
+        throw e;
 
       } catch(IOException e) {
+        LOG.error("column update failed in row: " + i.regionName);
         LOG.error(e);
-        throw e;
+
+      } finally {
+        if(lockid != -1L) {
+          try {
+            server.abort(regionName, clientId, lockid);
+            
+          } catch(IOException iex) {
+            LOG.error(iex);
+          }
+        }
       }
-      break;
     }
+  }
+  
+  private class DeleteColumn extends ColumnOperation {
+    private Text columnName;
+    
+    public DeleteColumn(Text tableName, Text columnName) throws IOException {
+      super(tableName);
+      this.columnName = columnName;
+    }
+    
+    protected void postProcessMeta(MetaRegion m, HRegionInterface server)
+    throws IOException {
 
-    if(LOG.isDebugEnabled()) {
-      LOG.debug("deleted table: " + tableName);
+      for(HRegionInfo i: unservedRegions) {
+        i.tableDesc.families().remove(columnName);
+        updateRegionInfo(server, m.regionName, i);
+        
+        // Delete the directories used by the column
+
+        try {
+          fs.delete(HStoreFile.getMapDir(dir, i.regionName, columnName));
+          
+        } catch(IOException e) {
+          LOG.error(e);
+        }
+        
+        try {
+          fs.delete(HStoreFile.getInfoDir(dir, i.regionName, columnName));
+          
+        } catch(IOException e) {
+          LOG.error(e);
+        }
+        
+      }
     }
   }
   
-  public HServerAddress findRootRegion() {
-    return rootRegionLocation;
-  }
+  private class AddColumn extends ColumnOperation {
+    private HColumnDescriptor newColumn;
+    
+    public AddColumn(Text tableName, HColumnDescriptor newColumn)
+        throws IOException {
+      
+      super(tableName);
+      this.newColumn = newColumn;
+    }
+   
+    protected void postProcessMeta(MetaRegion m, HRegionInterface server)
+        throws IOException {
 
+      for(HRegionInfo i: unservedRegions) {
+        
+        //TODO: I *think* all we need to do to add a column is add it to
+        // the table descriptor. When the region is brought on-line, it
+        // should find the column missing and create it.
+        
+        i.tableDesc.addFamily(newColumn);
+        updateRegionInfo(server, m.regionName, i);
+      }
+    }
+  }
+  
   //////////////////////////////////////////////////////////////////////////////
   // Managing leases
   //////////////////////////////////////////////////////////////////////////////
@@ -1789,6 +2257,10 @@
     }
   }
 
+  //////////////////////////////////////////////////////////////////////////////
+  // Main program
+  //////////////////////////////////////////////////////////////////////////////
+  
   private static void printUsageAndExit() {
     System.err.println("Usage: java org.apache.hbase.HMaster " +
         "[--bind=hostname:port] start|stop");

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMasterInterface.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMasterInterface.java?view=diff&rev=542592&r1=542591&r2=542592
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMasterInterface.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMasterInterface.java Tue May 29 10:17:44 2007
@@ -41,6 +41,14 @@
   public void createTable(HTableDescriptor desc) throws IOException;
   public void deleteTable(Text tableName) throws IOException;
   
+  public void addColumn(Text tableName, HColumnDescriptor column) throws IOException;
+  public void deleteColumn(Text tableName, Text columnName) throws IOException;
+  
+  public void mergeRegions(Text regionName1, Text regionName2) throws IOException;
+  
+  public void enableTable(Text tableName) throws IOException;
+  public void disableTable(Text tableName) throws IOException;
+  
   /**
    * Shutdown an HBase cluster.
    */

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMsg.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMsg.java?view=diff&rev=542592&r1=542591&r2=542592
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMsg.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMsg.java Tue May 29 10:17:44 2007
@@ -30,12 +30,12 @@
   public static final byte MSG_CALL_SERVER_STARTUP = 4;
   public static final byte MSG_REGIONSERVER_STOP = 5;
   public static final byte MSG_REGION_CLOSE_WITHOUT_REPORT = 6;
-  public static final byte MSG_REGION_CLOSE_AND_DELETE = 7;
 
   public static final byte MSG_REPORT_OPEN = 100;
   public static final byte MSG_REPORT_CLOSE = 101;
   public static final byte MSG_REGION_SPLIT = 102;
   public static final byte MSG_NEW_REGION = 103;
+  public static final byte MSG_REPORT_EXITING = 104;
 
   byte msg;
   HRegionInfo info;

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java?view=diff&rev=542592&r1=542591&r2=542592
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java Tue May 29 10:17:44 2007
@@ -333,12 +333,12 @@
     }
 
     // Load in all the HStores.
-    for(Iterator<Text> it = this.regionInfo.tableDesc.families().iterator();
-        it.hasNext(); ) { 
-      Text colFamily = HStoreKey.extractFamily(it.next());
+    for(Map.Entry<Text, HColumnDescriptor> e :
+        this.regionInfo.tableDesc.families().entrySet()) {
+      
+      Text colFamily = HStoreKey.extractFamily(e.getKey());
       stores.put(colFamily, new HStore(dir, this.regionInfo.regionName,
-          colFamily, this.regionInfo.tableDesc.getMaxVersions(), fs,
-          oldLogFile, conf));
+          e.getValue(), fs, oldLogFile, conf));
     }
 
     // Get rid of any splits or merges that were lost in-progress
@@ -376,14 +376,6 @@
       closed = writestate.closed;
     }
     return closed;
-  }
-  
-  /** Closes and deletes this HRegion. Called when doing a table deletion, for example */
-  public void closeAndDelete() throws IOException {
-    LOG.info("deleting region: " + regionInfo.regionName);
-    close();
-    deleteRegion(fs, dir, regionInfo.regionName);
-    LOG.info("region deleted: " + regionInfo.regionName);
   }
   
   /**

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInfo.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInfo.java?view=diff&rev=542592&r1=542591&r2=542592
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInfo.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInfo.java Tue May 29 10:17:44 2007
@@ -35,6 +35,7 @@
   public Text startKey;
   public Text endKey;
   public Text regionName;
+  public boolean offLine;
   
   public HRegionInfo() {
     this.regionId = 0;
@@ -42,16 +43,12 @@
     this.startKey = new Text();
     this.endKey = new Text();
     this.regionName = new Text();
+    this.offLine = false;
   }
   
-  public HRegionInfo(final byte [] serializedBytes) {
+  public HRegionInfo(final byte [] serializedBytes) throws IOException {
     this();
-    try {
-      readFields(new DataInputStream(
-        new ByteArrayInputStream(serializedBytes)));
-    } catch (IOException e) {
-      throw new RuntimeException(e);
-    }
+    readFields(new DataInputStream(new ByteArrayInputStream(serializedBytes)));
   }
 
   public HRegionInfo(long regionId, HTableDescriptor tableDesc,
@@ -79,6 +76,8 @@
     this.regionName = new Text(tableDesc.getName() + "_" +
       (startKey == null ? "" : startKey.toString()) + "_" +
       regionId);
+    
+    this.offLine = false;
   }
   
   @Override
@@ -98,6 +97,7 @@
     startKey.write(out);
     endKey.write(out);
     regionName.write(out);
+    out.writeBoolean(offLine);
   }
   
   public void readFields(DataInput in) throws IOException {
@@ -106,5 +106,6 @@
     this.startKey.readFields(in);
     this.endKey.readFields(in);
     this.regionName.readFields(in);
+    this.offLine = in.readBoolean();
   }
 }

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java?view=diff&rev=542592&r1=542591&r2=542592
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java Tue May 29 10:17:44 2007
@@ -603,6 +603,13 @@
       }
     }
     try {
+      HMsg[] exitMsg = { new HMsg(HMsg.MSG_REPORT_EXITING) };
+      hbaseMaster.regionServerReport(info, exitMsg);
+      
+    } catch(IOException e) {
+      LOG.warn(e);
+    }
+    try {
       LOG.info("stopping server at: " + info.getServerAddress().toString());
 
       // Send interrupts to wake up threads if sleeping so they notice shutdown.
@@ -747,13 +754,6 @@
             closeRegion(msg.getRegionInfo(), false);
             break;
 
-          case HMsg.MSG_REGION_CLOSE_AND_DELETE:
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("MSG_REGION_CLOSE_AND_DELETE");
-            }
-            closeAndDeleteRegion(msg.getRegionInfo());
-            break;
-
           default:
             throw new IOException("Impossible state during msg processing.  Instruction: " + msg);
           }
@@ -795,27 +795,6 @@
 
       if(reportWhenCompleted) {
         reportClose(region);
-      }
-    }
-  }
-
-  private void closeAndDeleteRegion(HRegionInfo info) throws IOException {
-    this.lock.writeLock().lock();
-    HRegion region = null;
-    try {
-      region = regions.remove(info.regionName);
-    } finally {
-      this.lock.writeLock().unlock();
-    }
-    if(region != null) {
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("deleting region " + info.regionName);
-      }
-      
-      region.closeAndDelete();
-      
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("region " + info.regionName + " deleted");
       }
     }
   }

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegiondirReader.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegiondirReader.java?view=diff&rev=542592&r1=542591&r2=542592
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegiondirReader.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegiondirReader.java Tue May 29 10:17:44 2007
@@ -101,10 +101,10 @@
   private HTableDescriptor getTableDescriptor(final FileSystem fs,
       final Path d, final String tableName)
   throws IOException {
-    HTableDescriptor desc = new HTableDescriptor(tableName, 1);
+    HTableDescriptor desc = new HTableDescriptor(tableName);
     Text [] families = getFamilies(fs, d);
     for (Text f: families) {
-      desc.addFamily(f);
+      desc.addFamily(new HColumnDescriptor(f.toString()));
     }
     return desc;
   }
@@ -163,7 +163,7 @@
   private void dump(final HRegionInfo info) throws IOException {
     HRegion r = new HRegion(this.parentdir, null,
         FileSystem.get(this.conf), conf, info, null, null);
-    Text [] families = info.tableDesc.families().toArray(new Text [] {});
+    Text [] families = info.tableDesc.families().keySet().toArray(new Text [] {});
     HInternalScannerInterface scanner = r.getScanner(families, new Text());
     HStoreKey key = new HStoreKey();
     TreeMap<Text, BytesWritable> results = new TreeMap<Text, BytesWritable>();
@@ -183,8 +183,8 @@
     // followed by cell content.
     while(scanner.next(key, results)) {
       for (Map.Entry<Text, BytesWritable> es: results.entrySet()) {
-    	Text colname = es.getKey();
-    	BytesWritable colvalue = es.getValue();
+      Text colname = es.getKey();
+      BytesWritable colvalue = es.getValue();
         Object value = null;
         byte[] bytes = new byte[colvalue.getSize()];
         if (colname.toString().equals("info:regioninfo")) {
@@ -219,4 +219,4 @@
       }
     }
   }
-}
\ No newline at end of file
+}

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java?view=diff&rev=542592&r1=542591&r2=542592
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java Tue May 29 10:17:44 2007
@@ -52,8 +52,9 @@
 
   Path dir;
   Text regionName;
-  Text colFamily;
-  int maxVersions;
+  HColumnDescriptor family;
+  Text familyName;
+  SequenceFile.CompressionType compression;
   FileSystem fs;
   Configuration conf;
   Path mapdir;
@@ -98,23 +99,37 @@
    * <p>It's assumed that after this constructor returns, the reconstructionLog
    * file will be deleted (by whoever has instantiated the HStore).
    */
-  public HStore(Path dir, Text regionName, Text colFamily, int maxVersions, 
+  public HStore(Path dir, Text regionName, HColumnDescriptor family, 
       FileSystem fs, Path reconstructionLog, Configuration conf)
   throws IOException {  
     this.dir = dir;
     this.regionName = regionName;
-    this.colFamily = colFamily;
-    this.maxVersions = maxVersions;
+    this.family = family;
+    this.familyName = HStoreKey.extractFamily(this.family.getName());
+    this.compression = SequenceFile.CompressionType.NONE;
+    
+    if(family.getCompression() != HColumnDescriptor.CompressionType.NONE) {
+      if(family.getCompression() == HColumnDescriptor.CompressionType.BLOCK) {
+        this.compression = SequenceFile.CompressionType.BLOCK;
+        
+      } else if(family.getCompression() == HColumnDescriptor.CompressionType.RECORD) {
+        this.compression = SequenceFile.CompressionType.RECORD;
+        
+      } else {
+        assert(false);
+      }
+    }
+    
     this.fs = fs;
     this.conf = conf;
 
-    this.mapdir = HStoreFile.getMapDir(dir, regionName, colFamily);
+    this.mapdir = HStoreFile.getMapDir(dir, regionName, familyName);
     fs.mkdirs(mapdir);
-    this.loginfodir = HStoreFile.getInfoDir(dir, regionName, colFamily);
+    this.loginfodir = HStoreFile.getInfoDir(dir, regionName, familyName);
     fs.mkdirs(loginfodir);
 
     if(LOG.isDebugEnabled()) {
-      LOG.debug("starting HStore for " + regionName + "/"+ colFamily);
+      LOG.debug("starting HStore for " + regionName + "/"+ familyName);
     }
     
     // Either restart or get rid of any leftover compaction work.  Either way, 
@@ -123,7 +138,7 @@
 
     this.compactdir = new Path(dir, COMPACTION_DIR);
     Path curCompactStore =
-      HStoreFile.getHStoreDir(compactdir, regionName, colFamily);
+      HStoreFile.getHStoreDir(compactdir, regionName, familyName);
     if(fs.exists(curCompactStore)) {
       processReadyCompaction();
       fs.delete(curCompactStore);
@@ -134,7 +149,7 @@
     // corresponding one in 'loginfodir'. Without a corresponding log info
     // file, the entry in 'mapdir' must be deleted.
     Vector<HStoreFile> hstoreFiles 
-      = HStoreFile.loadHStoreFiles(conf, dir, regionName, colFamily, fs);
+      = HStoreFile.loadHStoreFiles(conf, dir, regionName, familyName, fs);
     for(Iterator<HStoreFile> it = hstoreFiles.iterator(); it.hasNext(); ) {
       HStoreFile hsf = it.next();
       mapFiles.put(hsf.loadInfo(fs), hsf);
@@ -187,7 +202,7 @@
           Text column = val.getColumn();
           if (!key.getRegionName().equals(this.regionName) ||
               column.equals(HLog.METACOLUMN) ||
-              HStoreKey.extractFamily(column).equals(this.colFamily)) {
+              HStoreKey.extractFamily(column).equals(this.familyName)) {
             if (LOG.isDebugEnabled()) {
               LOG.debug("Passing on edit " + key.getRegionName() + ", "
                   + key.getRegionName() + ", " + column.toString() + ": "
@@ -230,12 +245,12 @@
         new MapFile.Reader(fs, e.getValue().getMapFilePath().toString(), conf));
     }
     
-    LOG.info("HStore online for " + this.regionName + "/" + this.colFamily);
+    LOG.info("HStore online for " + this.regionName + "/" + this.familyName);
   }
 
   /** Turn off all the MapFile readers */
   public void close() throws IOException {
-    LOG.info("closing HStore for " + this.regionName + "/" + this.colFamily);
+    LOG.info("closing HStore for " + this.regionName + "/" + this.familyName);
     this.lock.obtainWriteLock();
     try {
       for (MapFile.Reader map: maps.values()) {
@@ -244,7 +259,7 @@
       maps.clear();
       mapFiles.clear();
       
-      LOG.info("HStore closed for " + this.regionName + "/" + this.colFamily);
+      LOG.info("HStore closed for " + this.regionName + "/" + this.familyName);
     } finally {
       this.lock.releaseWriteLock();
     }
@@ -276,13 +291,13 @@
     
     synchronized(flushLock) {
       if(LOG.isDebugEnabled()) {
-        LOG.debug("flushing HStore " + this.regionName + "/" + this.colFamily);
+        LOG.debug("flushing HStore " + this.regionName + "/" + this.familyName);
       }
       
       // A. Write the TreeMap out to the disk
 
       HStoreFile flushedFile 
-        = HStoreFile.obtainNewHStoreFile(conf, dir, regionName, colFamily, fs);
+        = HStoreFile.obtainNewHStoreFile(conf, dir, regionName, familyName, fs);
       
       Path mapfile = flushedFile.getMapFilePath();
       if(LOG.isDebugEnabled()) {
@@ -290,17 +305,17 @@
       }
       
       MapFile.Writer out = new MapFile.Writer(conf, fs, mapfile.toString(), 
-          HStoreKey.class, BytesWritable.class);
+          HStoreKey.class, BytesWritable.class, compression);
       
       try {
         for (Map.Entry<HStoreKey, BytesWritable> es: inputCache.entrySet()) {
           HStoreKey curkey = es.getKey();
-          if (this.colFamily.equals(HStoreKey.extractFamily(curkey.getColumn()))) {
+          if (this.familyName.equals(HStoreKey.extractFamily(curkey.getColumn()))) {
             out.append(curkey, es.getValue());
           }
         }
         if(LOG.isDebugEnabled()) {
-          LOG.debug("HStore " + this.regionName + "/" + this.colFamily + " flushed");
+          LOG.debug("HStore " + this.regionName + "/" + this.familyName + " flushed");
         }
         
       } finally {
@@ -325,7 +340,7 @@
           mapFiles.put(logCacheFlushId, flushedFile);
           if(LOG.isDebugEnabled()) {
             LOG.debug("HStore available for " + this.regionName + "/"
-                + this.colFamily + " flush id=" + logCacheFlushId);
+                + this.familyName + " flush id=" + logCacheFlushId);
           }
         
         } finally {
@@ -373,10 +388,10 @@
   void compactHelper(boolean deleteSequenceInfo) throws IOException {
     synchronized(compactLock) {
       if(LOG.isDebugEnabled()) {
-        LOG.debug("started compaction of " + this.regionName + "/" + this.colFamily);
+        LOG.debug("started compaction of " + this.regionName + "/" + this.familyName);
       }
       
-      Path curCompactStore = HStoreFile.getHStoreDir(compactdir, regionName, colFamily);
+      Path curCompactStore = HStoreFile.getHStoreDir(compactdir, regionName, familyName);
       fs.mkdirs(curCompactStore);
       
       try {
@@ -409,11 +424,11 @@
         }
         
         HStoreFile compactedOutputFile 
-          = new HStoreFile(conf, compactdir, regionName, colFamily, -1);
+          = new HStoreFile(conf, compactdir, regionName, familyName, -1);
         
         if(toCompactFiles.size() == 1) {
           if(LOG.isDebugEnabled()) {
-            LOG.debug("nothing to compact for " + this.regionName + "/" + this.colFamily);
+            LOG.debug("nothing to compact for " + this.regionName + "/" + this.familyName);
           }
           
           HStoreFile hsf = toCompactFiles.elementAt(0);
@@ -426,7 +441,7 @@
 
         MapFile.Writer compactedOut = new MapFile.Writer(conf, fs, 
             compactedOutputFile.getMapFilePath().toString(), HStoreKey.class, 
-            BytesWritable.class);
+            BytesWritable.class, compression);
         
         try {
 
@@ -507,7 +522,7 @@
               timesSeen = 1;
             }
             
-            if(timesSeen <= maxVersions) {
+            if(timesSeen <= family.getMaxVersions()) {
 
               // Keep old versions until we have maxVersions worth.
               // Then just skip them.
@@ -592,7 +607,7 @@
         processReadyCompaction();
         
         if(LOG.isDebugEnabled()) {
-          LOG.debug("compaction complete for " + this.regionName + "/" + this.colFamily);
+          LOG.debug("compaction complete for " + this.regionName + "/" + this.familyName);
         }
 
       } finally {
@@ -625,7 +640,7 @@
     // 1. Acquiring the write-lock
 
 
-    Path curCompactStore = HStoreFile.getHStoreDir(compactdir, regionName, colFamily);
+    Path curCompactStore = HStoreFile.getHStoreDir(compactdir, regionName, familyName);
     this.lock.obtainWriteLock();
     try {
       Path doneFile = new Path(curCompactStore, COMPACTION_DONE);
@@ -714,10 +729,10 @@
       }
       
       HStoreFile compactedFile 
-        = new HStoreFile(conf, compactdir, regionName, colFamily, -1);
+        = new HStoreFile(conf, compactdir, regionName, familyName, -1);
       
       HStoreFile finalCompactedFile 
-        = HStoreFile.obtainNewHStoreFile(conf, dir, regionName, colFamily, fs);
+        = HStoreFile.obtainNewHStoreFile(conf, dir, regionName, familyName, fs);
       
       fs.rename(compactedFile.getMapFilePath(), finalCompactedFile.getMapFilePath());
       

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTableDescriptor.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTableDescriptor.java?view=diff&rev=542592&r1=542591&r2=542592
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTableDescriptor.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTableDescriptor.java Tue May 29 10:17:44 2007
@@ -19,7 +19,9 @@
 import java.io.DataOutput;
 import java.io.IOException;
 import java.util.Iterator;
-import java.util.TreeSet;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.TreeMap;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
@@ -27,101 +29,73 @@
 import org.apache.hadoop.io.WritableComparable;
 
 /**
- * HTableDescriptor contains various facts about an HTable, like
- * column families, maximum number of column versions, etc.
+ * HTableDescriptor contains the name of an HTable, and its
+ * column families.
  */
 public class HTableDescriptor implements WritableComparable {
   Text name;
-  int maxVersions;
-  TreeSet<Text> families = new TreeSet<Text>();
+  TreeMap<Text, HColumnDescriptor> families;
   
   /**
    * Legal table names can only contain 'word characters':
    * i.e. <code>[a-zA-Z_0-9]</code>.
    * 
-   * Lets be restrictive until a reason to be otherwise.
+   * Let's be restrictive until a reason to be otherwise.
    */
   private static final Pattern LEGAL_TABLE_NAME =
     Pattern.compile("[\\w-]+");
   
-  /**
-   * Legal family names can only contain 'word characters' and
-   * end in a colon.
-   */
-  private static final Pattern LEGAL_FAMILY_NAME =
-    Pattern.compile("\\w+:");
-
   public HTableDescriptor() {
     this.name = new Text();
-    this.families.clear();
+    this.families = new TreeMap<Text, HColumnDescriptor>();
   }
 
   /**
    * Constructor.
    * @param name Table name.
-   * @param maxVersions Number of versions of a column to keep.
    * @throws IllegalArgumentException if passed a table name
    * that is made of other than 'word' characters: i.e.
    * <code>[a-zA-Z_0-9]
    */
-  public HTableDescriptor(String name, int maxVersions) {
+  public HTableDescriptor(String name) {
     Matcher m = LEGAL_TABLE_NAME.matcher(name);
     if (m == null || !m.matches()) {
-      throw new IllegalArgumentException("Table names can only " +
-          "contain 'word characters': i.e. [a-zA-Z_0-9");
-    }
-    if (maxVersions <= 0) {
-      // TODO: Allow maxVersion of 0 to be the way you say
-      // "Keep all versions".  Until there is support, consider
-      // 0 -- or < 0 -- a configuration error.
-      throw new IllegalArgumentException("Maximum versions " +
-        "must be positive");
+      throw new IllegalArgumentException(
+          "Table names can only contain 'word characters': i.e. [a-zA-Z_0-9");
     }
     this.name = new Text(name);
-    this.maxVersions = maxVersions;
+    this.families = new TreeMap<Text, HColumnDescriptor>();
   }
 
   public Text getName() {
     return name;
   }
 
-  public int getMaxVersions() {
-    return maxVersions;
-  }
-
   /**
    * Add a column family.
-   * @param family Column family name to add.  Column family names
-   * must end in a <code>:</code>
-   * @throws IllegalArgumentException if passed a table name
-   * that is made of other than 'word' characters: i.e.
-   * <code>[a-zA-Z_0-9]
+   * @param family HColumnDescriptor of familyto add.
    */
-  public void addFamily(Text family) {
-    String familyStr = family.toString();
-    Matcher m = LEGAL_FAMILY_NAME.matcher(familyStr);
-    if (m == null || !m.matches()) {
-      throw new IllegalArgumentException("Family names can " +
-          "only contain 'word characters' and must end with a " +
-          "':'");
-    }
-    families.add(family);
+  public void addFamily(HColumnDescriptor family) {
+    families.put(family.getName(), family);
   }
 
   /** Do we contain a given column? */
   public boolean hasFamily(Text family) {
-    return families.contains(family);
+    return families.containsKey(family);
   }
 
-  /** All the column families in this table. */
-  public TreeSet<Text> families() {
+  /** All the column families in this table.
+   * 
+   *  TODO: What is this used for? Seems Dangerous to let people play with our
+   *  private members.
+   */
+  public TreeMap<Text, HColumnDescriptor> families() {
     return families;
   }
 
   @Override
   public String toString() {
-    return "name: " + this.name.toString() +
-      ", maxVersions: " + this.maxVersions + ", families: " + this.families;
+    return "name: " + this.name.toString() + ", families: " + this.families;
   }
   
   @Override
@@ -133,10 +107,9 @@
   public int hashCode() {
     // TODO: Cache.
     int result = this.name.hashCode();
-    result ^= Integer.valueOf(this.maxVersions).hashCode();
     if (this.families != null && this.families.size() > 0) {
-      for (Text family: this.families) {
-        result ^= family.hashCode();
+      for (Map.Entry<Text,HColumnDescriptor> e: this.families.entrySet()) {
+        result ^= e.hashCode();
       }
     }
     return result;
@@ -148,22 +121,21 @@
 
   public void write(DataOutput out) throws IOException {
     name.write(out);
-    out.writeInt(maxVersions);
     out.writeInt(families.size());
-    for(Iterator<Text> it = families.iterator(); it.hasNext(); ) {
+    for(Iterator<HColumnDescriptor> it = families.values().iterator();
+        it.hasNext(); ) {
       it.next().write(out);
     }
   }
 
   public void readFields(DataInput in) throws IOException {
     this.name.readFields(in);
-    this.maxVersions = in.readInt();
     int numCols = in.readInt();
     families.clear();
     for(int i = 0; i < numCols; i++) {
-      Text t = new Text();
-      t.readFields(in);
-      families.add(t);
+      HColumnDescriptor c = new HColumnDescriptor();
+      c.readFields(in);
+      families.put(c.getName(), c);
     }
   }
 
@@ -172,24 +144,24 @@
   //////////////////////////////////////////////////////////////////////////////
 
   public int compareTo(Object o) {
-    HTableDescriptor htd = (HTableDescriptor) o;
-    int result = name.compareTo(htd.name);
+    HTableDescriptor other = (HTableDescriptor) o;
+    int result = name.compareTo(other.name);
+    
     if(result == 0) {
-      result = maxVersions - htd.maxVersions;
+      result = families.size() - other.families.size();
     }
     
-    if(result == 0) {
-      result = families.size() - htd.families.size();
+    if(result == 0 && families.size() != other.families.size()) {
+      result = Integer.valueOf(families.size()).compareTo(
+          Integer.valueOf(other.families.size()));
     }
     
     if(result == 0) {
-      Iterator<Text> it2 = htd.families.iterator();
-      for(Iterator<Text> it = families.iterator(); it.hasNext(); ) {
-        Text family1 = it.next();
-        Text family2 = it2.next();
-        result = family1.compareTo(family2);
+      for(Iterator<HColumnDescriptor> it = families.values().iterator(),
+          it2 = other.families.values().iterator(); it.hasNext(); ) {
+        result = it.next().compareTo(it2.next());
         if(result != 0) {
-          return result;
+          break;
         }
       }
     }

Added: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/InvalidColumnNameException.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/InvalidColumnNameException.java?view=auto&rev=542592
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/InvalidColumnNameException.java (added)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/InvalidColumnNameException.java Tue May 29 10:17:44 2007
@@ -0,0 +1,29 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+
+public class InvalidColumnNameException extends IOException {
+  private static final long serialVersionUID = 1L << 29 - 1L;
+  public InvalidColumnNameException() {
+    super();
+  }
+
+  public InvalidColumnNameException(String s) {
+    super(s);
+  }
+}

Added: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/MasterNotRunningException.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/MasterNotRunningException.java?view=auto&rev=542592
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/MasterNotRunningException.java (added)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/MasterNotRunningException.java Tue May 29 10:17:44 2007
@@ -0,0 +1,29 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+
+public class MasterNotRunningException extends IOException {
+  private static final long serialVersionUID = 1L << 23 - 1L;
+  public MasterNotRunningException() {
+    super();
+  }
+
+  public MasterNotRunningException(String s) {
+    super(s);
+  }
+}

Added: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/NoServerForRegionException.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/NoServerForRegionException.java?view=auto&rev=542592
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/NoServerForRegionException.java (added)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/NoServerForRegionException.java Tue May 29 10:17:44 2007
@@ -0,0 +1,30 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+
+public class NoServerForRegionException extends IOException {
+  private static final long serialVersionUID = 1L << 11 - 1L;
+
+  public NoServerForRegionException() {
+    super();
+  }
+
+  public NoServerForRegionException(String s) {
+    super(s);
+  }
+}

Added: lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/TableNotDisabledException.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/TableNotDisabledException.java?view=auto&rev=542592
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/TableNotDisabledException.java (added)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/java/org/apache/hadoop/hbase/TableNotDisabledException.java Tue May 29 10:17:44 2007
@@ -0,0 +1,29 @@
+/**
+ * Copyright 2007 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+
+public class TableNotDisabledException extends IOException {
+  private static final long serialVersionUID = 1L << 19 - 1L;
+  public TableNotDisabledException() {
+    super();
+  }
+
+  public TableNotDisabledException(String s) {
+    super(s);
+  }
+}

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/EvaluationClient.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/EvaluationClient.java?view=diff&rev=542592&r1=542591&r2=542592
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/EvaluationClient.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/EvaluationClient.java Tue May 29 10:17:44 2007
@@ -53,8 +53,8 @@
   private static HTableDescriptor tableDescriptor;
   
   static {
-    tableDescriptor = new HTableDescriptor("TestTable", 1);
-    tableDescriptor.addFamily(COLUMN_FAMILY);
+    tableDescriptor = new HTableDescriptor("TestTable");
+    tableDescriptor.addFamily(new HColumnDescriptor(COLUMN_FAMILY.toString()));
   }
   
   private static enum Test {RANDOM_READ,

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestGet.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestGet.java?view=diff&rev=542592&r1=542591&r2=542592
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestGet.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestGet.java Tue May 29 10:17:44 2007
@@ -71,9 +71,9 @@
       Path dir = new Path("/hbase");
       fs.mkdirs(dir);
       
-      HTableDescriptor desc = new HTableDescriptor("test", 1);
-      desc.addFamily(CONTENTS);
-      desc.addFamily(HConstants.COLUMN_FAMILY);
+      HTableDescriptor desc = new HTableDescriptor("test");
+      desc.addFamily(new HColumnDescriptor(CONTENTS.toString()));
+      desc.addFamily(new HColumnDescriptor(HConstants.COLUMN_FAMILY.toString()));
       
       HRegionInfo info = new HRegionInfo(0L, desc, null, null);
       Path regionDir = HStoreFile.getHRegionDir(dir, info.regionName);

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java?view=diff&rev=542592&r1=542591&r2=542592
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHBaseCluster.java Tue May 29 10:17:44 2007
@@ -17,8 +17,8 @@
 
 import java.io.IOException;
 import java.util.Iterator;
+import java.util.Set;
 import java.util.TreeMap;
-import java.util.TreeSet;
 
 import org.apache.hadoop.io.Text;
 
@@ -71,9 +71,9 @@
 
   private void setup() throws IOException {
     client = new HClient(conf);
-    desc = new HTableDescriptor("test", 3);
-    desc.addFamily(new Text(CONTENTS));
-    desc.addFamily(new Text(ANCHOR));
+    desc = new HTableDescriptor("test");
+    desc.addFamily(new HColumnDescriptor(CONTENTS.toString()));
+    desc.addFamily(new HColumnDescriptor(ANCHOR.toString()));
     client.createTable(desc);
   }
       
@@ -182,7 +182,7 @@
     HTableDescriptor[] tables = client.listTables();
     assertEquals(1, tables.length);
     assertEquals(desc.getName(), tables[0].getName());
-    TreeSet<Text> families = tables[0].families();
+    Set<Text> families = tables[0].families().keySet();
     assertEquals(2, families.size());
     assertTrue(families.contains(new Text(CONTENTS)));
     assertTrue(families.contains(new Text(ANCHOR)));
@@ -193,11 +193,5 @@
     // Delete the table we created
 
     client.deleteTable(desc.getName());
-    try {
-      Thread.sleep(30000);                    // Wait for table to be deleted
-
-    } catch(InterruptedException e) {
-    }
   }
-  
 }

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java?view=diff&rev=542592&r1=542591&r2=542592
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java Tue May 29 10:17:44 2007
@@ -102,9 +102,9 @@
     oldlogfile = new Path(parentdir, "oldlogfile");
 
     log = new HLog(fs, newlogdir, conf);
-    desc = new HTableDescriptor("test", 3);
-    desc.addFamily(new Text("contents:"));
-    desc.addFamily(new Text("anchor:"));
+    desc = new HTableDescriptor("test");
+    desc.addFamily(new HColumnDescriptor("contents:"));
+    desc.addFamily(new HColumnDescriptor("anchor:"));
     region = new HRegion(parentdir, log, fs, conf, 
         new HRegionInfo(1, desc, null, null), null, oldlogfile);
   }

Added: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMasterAdmin.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMasterAdmin.java?view=auto&rev=542592
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMasterAdmin.java (added)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestMasterAdmin.java Tue May 29 10:17:44 2007
@@ -0,0 +1,79 @@
+/**
+ * Copyright 2006 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import org.apache.hadoop.io.Text;
+
+public class TestMasterAdmin extends HBaseClusterTestCase {
+  private static final Text COLUMN_NAME = new Text("col1:");
+  private static HTableDescriptor testDesc;
+  static {
+    testDesc = new HTableDescriptor("testadmin1");
+    testDesc.addFamily(new HColumnDescriptor(COLUMN_NAME.toString()));
+  }
+  
+  private HClient client;
+
+  public TestMasterAdmin() {
+    super(true);
+    client = new HClient(conf);
+  }
+  
+  public void testMasterAdmin() {
+    try {
+      client.createTable(testDesc);
+      client.disableTable(testDesc.getName());
+      
+    } catch(Exception e) {
+      e.printStackTrace();
+      fail();
+    }
+
+    try {
+      try {
+        client.openTable(testDesc.getName());
+
+      } catch(IllegalStateException e) {
+        // Expected
+      }
+
+      client.addColumn(testDesc.getName(), new HColumnDescriptor("col2:"));
+      client.enableTable(testDesc.getName());
+      try {
+        client.deleteColumn(testDesc.getName(), new Text("col2:"));
+        
+      } catch(TableNotDisabledException e) {
+        // Expected
+      }
+
+      client.disableTable(testDesc.getName());
+      client.deleteColumn(testDesc.getName(), new Text("col2:"));
+      
+    } catch(Exception e) {
+      e.printStackTrace();
+      fail();
+      
+    } finally {
+      try {
+        client.deleteTable(testDesc.getName());
+        
+      } catch(Exception e) {
+        e.printStackTrace();
+        fail();
+      }
+    }
+  }
+}

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestTable.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestTable.java?view=diff&rev=542592&r1=542591&r2=542592
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestTable.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestTable.java Tue May 29 10:17:44 2007
@@ -50,8 +50,8 @@
       fail();
     }
 
-    HTableDescriptor desc = new HTableDescriptor("test", 1);
-    desc.addFamily(HConstants.COLUMN_FAMILY);
+    HTableDescriptor desc = new HTableDescriptor("test");
+    desc.addFamily(new HColumnDescriptor(HConstants.COLUMN_FAMILY.toString()));
 
     try {
       client.createTable(desc);
@@ -73,5 +73,5 @@
       e.printStackTrace();
       fail();
     }
-}
+  }
 }

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestToString.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestToString.java?view=diff&rev=542592&r1=542591&r2=542592
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestToString.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestToString.java Tue May 29 10:17:44 2007
@@ -15,15 +15,24 @@
   }
   
   public void testHRegionInfo() throws Exception {
-    HTableDescriptor htd = new HTableDescriptor("hank", 10);
-    htd.addFamily(new Text("hankfamily:"));
-    htd.addFamily(new Text("hankotherfamily:"));
-    assertEquals("Table descriptor", htd.toString(),
-     "name: hank, maxVersions: 10, families: [hankfamily:, hankotherfamily:]");
+    HTableDescriptor htd = new HTableDescriptor("hank");
+    htd.addFamily(new HColumnDescriptor("hankfamily:"));
+    htd.addFamily(new HColumnDescriptor(new Text("hankotherfamily:"), 10,
+        HColumnDescriptor.CompressionType.BLOCK, true, 1000, false));
+    assertEquals("Table descriptor", "name: hank, families: "
+        + "{hankfamily:=(hankfamily:, max versions: 3, compression: none, "
+        + "in memory: false, max value length: 2147483647, bloom filter:false), "
+        + "hankotherfamily:=(hankotherfamily:, max versions: 10, "
+        + "compression: block, in memory: true, max value length: 1000, "
+        + "bloom filter:false)}", htd.toString());
     HRegionInfo hri = new HRegionInfo(-1, htd, new Text(), new Text("10"));
     assertEquals("HRegionInfo", 
-        "regionname: hank__-1, startKey: <>, tableDesc: {name: hank, " +
-        "maxVersions: 10, families: [hankfamily:, hankotherfamily:]}",
+        "regionname: hank__-1, startKey: <>, tableDesc: {" + "name: hank, "
+        + "families: {hankfamily:=(hankfamily:, max versions: 3, "
+        + "compression: none, in memory: false, max value length: 2147483647, "
+        + "bloom filter:false), hankotherfamily:=(hankotherfamily:, "
+        + "max versions: 10, compression: block, in memory: true, max value "
+        + "length: 1000, bloom filter:false)}}",
         hri.toString());
   }
 }