You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2008/08/29 07:11:58 UTC

svn commit: r690102 - in /hadoop/hbase/branches/0.2: ./ src/java/org/apache/hadoop/hbase/master/ src/java/org/apache/hadoop/hbase/regionserver/ src/java/org/apache/hadoop/hbase/rest/ src/java/org/apache/hadoop/hbase/util/

Author: stack
Date: Thu Aug 28 22:11:57 2008
New Revision: 690102

URL: http://svn.apache.org/viewvc?rev=690102&view=rev
Log:
HBASE-826 delete table followed by recreation results in honked table

Modified:
    hadoop/hbase/branches/0.2/CHANGES.txt
    hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/master/TableDelete.java
    hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
    hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/regionserver/HStore.java
    hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java
    hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/rest/Dispatcher.java
    hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/rest/MetaHandler.java
    hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/rest/ScannerHandler.java
    hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/rest/TableHandler.java
    hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/util/MetaUtils.java

Modified: hadoop/hbase/branches/0.2/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.2/CHANGES.txt?rev=690102&r1=690101&r2=690102&view=diff
==============================================================================
--- hadoop/hbase/branches/0.2/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.2/CHANGES.txt Thu Aug 28 22:11:57 2008
@@ -42,6 +42,7 @@
    HBASE-768   This message 'java.io.IOException: Install 0.1.x of hbase and run
                its migration first' is useless (Jean-Daniel Cryans via Jim
                Kellerman)
+   HBASE-826   delete table followed by recreation results in honked table
 
   IMPROVEMENTS
    HBASE-801  When a table haven't disable, shell could response in a "user
@@ -54,8 +55,7 @@
    HBASE-752  HBase 0.2.1 should be based on Hadoop 0.17.2
    HBASE-799  Deprecate all remaining methods that take Text as a parameter
               (Jean-Daniel Cryans via Jim Kellerman)
-   HBASE-849  Speed improvement in JenkinsHash (Andrzej Bialecki via Jim
-              Kellerman)
+   HBASE-849  Speed improvement in JenkinsHash (Andrzej Bialecki via Stack)
 
   NEW FEATURES
   OPTIMIZATIONS

Modified: hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/master/TableDelete.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/master/TableDelete.java?rev=690102&r1=690101&r2=690102&view=diff
==============================================================================
--- hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/master/TableDelete.java (original)
+++ hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/master/TableDelete.java Thu Aug 28 22:11:57 2008
@@ -27,10 +27,8 @@
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.RemoteExceptionHandler;
 import org.apache.hadoop.hbase.TableNotDisabledException;
-
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.ipc.HRegionInterface;
+import org.apache.hadoop.hbase.regionserver.HRegion;
 
 /** 
  * Instantiated to delete a table. Table must be offline.

Modified: hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=690102&r1=690101&r2=690102&view=diff
==============================================================================
--- hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java (original)
+++ hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java Thu Aug 28 22:11:57 2008
@@ -1287,9 +1287,10 @@
   /*
    * Get <code>versions</code> keys matching the origin key's
    * row/column/timestamp and those of an older vintage.
+   * Public so available when debugging.
    * @param origin Where to start searching.
-   * @param versions How many versions to return. Pass
-   * {@link HConstants.ALL_VERSIONS} to retrieve all.
+   * @param versions How many versions to return. Pass HConstants.ALL_VERSIONS
+   * to retrieve all.
    * @return Ordered list of <code>versions</code> keys going from newest back.
    * @throws IOException
    */
@@ -1483,7 +1484,7 @@
     checkReadOnly();
     Integer lid = obtainRowLock(row);
     try {
-      // Delete ALL verisons rather than MAX_VERSIONS.  If we just did
+      // Delete ALL versions rather than MAX_VERSIONS.  If we just did
       // MAX_VERSIONS, then if 2* MAX_VERSION cells, subsequent gets would
       // get old stuff.
       deleteMultiple(row, column, ts, ALL_VERSIONS);
@@ -1626,8 +1627,8 @@
     boolean flush = false;
     this.updatesLock.readLock().lock();
     try {
-      this.log.append(regionInfo.getRegionName(),
-        regionInfo.getTableDesc().getName(), updatesByColumn);
+      this.log.append(this.regionInfo.getRegionName(),
+        this.regionInfo.getTableDesc().getName(), updatesByColumn);
       long size = 0;
       for (Map.Entry<HStoreKey, byte[]> e: updatesByColumn.entrySet()) {
         HStoreKey key = e.getKey();
@@ -1684,12 +1685,14 @@
       this.conf, reporter);
   }
 
-  /*
-   * @param column
+  /**
+   * Return HStore instance.
+   * Use with caution.  Exposed for use of fixup utilities.
+   * @param column Name of column family hosted by this region.
    * @return Store that goes with the family on passed <code>column</code>.
    * TODO: Make this lookup faster.
    */
-  protected HStore getStore(final byte [] column) {
+  public HStore getStore(final byte [] column) {
     return this.stores.get(HStoreKey.getFamilyMapKey(column)); 
   }
   

Modified: hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/regionserver/HStore.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/regionserver/HStore.java?rev=690102&r1=690101&r2=690102&view=diff
==============================================================================
--- hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/regionserver/HStore.java (original)
+++ hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/regionserver/HStore.java Thu Aug 28 22:11:57 2008
@@ -1172,7 +1172,7 @@
   /**
    * @return Array of readers ordered oldest to newest.
    */
-  MapFile.Reader [] getReaders() {
+  public MapFile.Reader [] getReaders() {
     return this.readers.values().
       toArray(new MapFile.Reader[this.readers.size()]);
   }
@@ -1306,9 +1306,8 @@
   }
 
   /**
-   * Get <code>versions</code> keys matching the origin key's
+   * Get <code>versions</code> of keys matching the origin key's
    * row/column/timestamp and those of an older vintage.
-   * Default access so can be accessed out of {@link HRegionServer}.
    * @param origin Where to start searching.
    * @param numVersions How many versions to return. Pass
    * {@link HConstants.ALL_VERSIONS} to retrieve all.
@@ -1316,7 +1315,7 @@
    * @return Matching keys.
    * @throws IOException
    */
-  List<HStoreKey> getKeys(final HStoreKey origin, final int versions,
+  protected List<HStoreKey> getKeys(final HStoreKey origin, final int versions,
     final long now)
   throws IOException {
     // This code below is very close to the body of the get method.  Any 
@@ -1334,7 +1333,7 @@
       }
       MapFile.Reader[] maparray = getReaders();
       // Returned array is sorted with the most recent addition last.
-      for(int i = maparray.length - 1;
+      for (int i = maparray.length - 1;
           i >= 0 && keys.size() < versions; i--) {
         MapFile.Reader map = maparray[i];
         synchronized(map) {
@@ -1352,10 +1351,9 @@
           do {
             // if the row matches, we might want this one.
             if (rowMatches(origin, readkey)) {
-              // if the cell matches, then we definitely want this key.
+              // if the cell address matches, then we definitely want this key.
               if (cellMatches(origin, readkey)) {
-                // Store the key if it isn't deleted or superceeded by what's
-                // in the memcache
+                // Store key if isn't deleted or superceded by memcache
                 if (!HLogEdit.isDeleted(readval.get())) {
                   if (notExpiredAndNotInDeletes(this.ttl, readkey, now, deletes)) {
                     keys.add(new HStoreKey(readkey));
@@ -1364,7 +1362,6 @@
                     break;
                   }
                 } else {
-                  // Is this copy necessary?
                   deletes.add(new HStoreKey(readkey));
                 }
               } else {
@@ -1411,8 +1408,6 @@
     // and columns that match those set on the scanner and which have delete
     // values.  If memory usage becomes an issue, could redo as bloom filter.
     Set<HStoreKey> deletes = new HashSet<HStoreKey>();
-    
-    
     this.lock.readLock().lock();
     try {
       // First go to the memcache.  Pick up deletes and candidates.
@@ -1426,7 +1421,8 @@
         rowAtOrBeforeFromMapFile(maparray[i], row, candidateKeys, deletes);
       }
       // Return the best key from candidateKeys
-      byte [] result = candidateKeys.isEmpty()? null: candidateKeys.lastKey().getRow();
+      byte [] result =
+        candidateKeys.isEmpty()? null: candidateKeys.lastKey().getRow();
       return result;
     } finally {
       this.lock.readLock().unlock();
@@ -1551,18 +1547,15 @@
         // as a candidate key
         if (Bytes.equals(readkey.getRow(), searchKey.getRow())) {
           if (!HLogEdit.isDeleted(readval.get())) {
-            if (notExpiredAndNotInDeletes(this.ttl, readkey, now, deletes)) {
-              candidateKeys.put(stripTimestamp(readkey), 
-                  new Long(readkey.getTimestamp()));
+            if (handleNonDelete(readkey, now, deletes, candidateKeys)) {
               foundCandidate = true;
               // NOTE! Continue.
               continue;
             }
           }
-          // Deleted value.
-          deletes.add(readkey);
+          HStoreKey copy = addCopyToDeletes(readkey, deletes);
           if (deletedOrExpiredRow == null) {
-            deletedOrExpiredRow = new HStoreKey(readkey);
+            deletedOrExpiredRow = copy;
           }
         } else if (Bytes.compareTo(readkey.getRow(), searchKey.getRow()) > 0) {
           // if the row key we just read is beyond the key we're searching for,
@@ -1573,16 +1566,15 @@
           // we're seeking yet, so this row is a candidate for closest
           // (assuming that it isn't a delete).
           if (!HLogEdit.isDeleted(readval.get())) {
-            if (notExpiredAndNotInDeletes(this.ttl, readkey, now, deletes)) {
-              candidateKeys.put(stripTimestamp(readkey), 
-                  new Long(readkey.getTimestamp()));
+            if (handleNonDelete(readkey, now, deletes, candidateKeys)) {
               foundCandidate = true;
+              // NOTE: Continue
               continue;
             }
           }
-          deletes.add(readkey);
+          HStoreKey copy = addCopyToDeletes(readkey, deletes);
           if (deletedOrExpiredRow == null) {
-            deletedOrExpiredRow = new HStoreKey(readkey);
+            deletedOrExpiredRow = copy;
           }
         }        
       } while(map.next(readkey, readval) && (knownNoGoodKey == null ||
@@ -1604,6 +1596,18 @@
     // through here.
   }
   
+  /*
+   * @param key Key to copy and add to <code>deletes</code>
+   * @param deletes
+   * @return Instance of the copy added to <code>deletes</code>
+   */
+  private HStoreKey addCopyToDeletes(final HStoreKey key,
+      final Set<HStoreKey> deletes) {
+    HStoreKey copy = new HStoreKey(key);
+    deletes.add(copy);
+    return copy;
+  }
+  
   private void rowAtOrBeforeWithCandidates(final HStoreKey startKey,
     final MapFile.Reader map, final byte[] row,
     final SortedMap<HStoreKey, Long> candidateKeys,
@@ -1632,58 +1636,81 @@
     }
 
     do {
-      HStoreKey strippedKey = null;
       // if we have an exact match on row, and it's not a delete, save this
       // as a candidate key
       if (Bytes.equals(readkey.getRow(), row)) {
-        strippedKey = stripTimestamp(readkey);
-        if (!HLogEdit.isDeleted(readval.get())) {
-          if (notExpiredAndNotInDeletes(this.ttl, readkey, now, deletes)) {
-            candidateKeys.put(strippedKey,
-                new Long(readkey.getTimestamp()));
-          }
-        } else {
-          // If the candidate keys contain any that might match by timestamp,
-          // then check for a match and remove it if it's too young to 
-          // survive the delete 
-          if (candidateKeys.containsKey(strippedKey)) {
-            long bestCandidateTs =
-              candidateKeys.get(strippedKey).longValue();
-            if (bestCandidateTs <= readkey.getTimestamp()) {
-              candidateKeys.remove(strippedKey);
-            } 
-          }
-        }
+        handleKey(readkey, readval.get(), now, deletes, candidateKeys);
       } else if (Bytes.compareTo(readkey.getRow(), row) > 0 ) {
         // if the row key we just read is beyond the key we're searching for,
         // then we're done.
         break;
       } else {
-        strippedKey = stripTimestamp(readkey);
         // So, the row key doesn't match, but we haven't gone past the row
         // we're seeking yet, so this row is a candidate for closest 
         // (assuming that it isn't a delete).
-        if (!HLogEdit.isDeleted(readval.get())) {
-          if (notExpiredAndNotInDeletes(this.ttl, readkey, now, deletes)) {
-            candidateKeys.put(strippedKey, Long.valueOf(readkey.getTimestamp()));
-          }
-        } else {
-          // If the candidate keys contain any that might match by timestamp,
-          // then check for a match and remove it if it's too young to 
-          // survive the delete 
-          if (candidateKeys.containsKey(strippedKey)) {
-            long bestCandidateTs = 
-              candidateKeys.get(strippedKey).longValue();
-            if (bestCandidateTs <= readkey.getTimestamp()) {
-              candidateKeys.remove(strippedKey);
-            } 
-          }
-        }      
+        handleKey(readkey, readval.get(), now, deletes, candidateKeys);
       }
     } while(map.next(readkey, readval));    
   }
   
   /*
+   * @param readkey
+   * @param now
+   * @param deletes
+   * @param candidateKeys
+   */
+  private void handleKey(final HStoreKey readkey, final byte [] value,
+      final long now, final Set<HStoreKey> deletes,
+      final SortedMap<HStoreKey, Long> candidateKeys) {
+    if (!HLogEdit.isDeleted(value)) {
+      handleNonDelete(readkey, now, deletes, candidateKeys);
+    } else {
+      // Pass copy because readkey will change next time next is called.
+      handleDeleted(new HStoreKey(readkey), candidateKeys, deletes);
+    }
+  }
+  
+  /*
+   * @param readkey
+   * @param now
+   * @param deletes
+   * @param candidateKeys
+   * @return True if we added a candidate.
+   */
+  private boolean handleNonDelete(final HStoreKey readkey, final long now,
+      final Set<HStoreKey> deletes, final Map<HStoreKey, Long> candidateKeys) {
+    if (notExpiredAndNotInDeletes(this.ttl, readkey, now, deletes)) {
+      candidateKeys.put(stripTimestamp(readkey),
+        Long.valueOf(readkey.getTimestamp()));
+      return true;
+    }
+    return false;
+  }
+
+  /* Handle keys whose values hold deletes.
+   * Add to the set of deletes and then if the candidate keys contain any that
+   * might match by timestamp, then check for a match and remove it if it's too
+   * young to survive the delete 
+   * @param k Be careful; if key was gotten from a Mapfile, pass in a copy.
+   * Values gotten by 'nexting' out of Mapfiles will change in each invocation.
+   * @param candidateKeys
+   * @param deletes
+   */
+  static void handleDeleted(final HStoreKey k,
+      final SortedMap<HStoreKey, Long> candidateKeys,
+      final Set<HStoreKey> deletes) {
+    deletes.add(k);
+    HStoreKey strippedKey = stripTimestamp(k);
+    if (candidateKeys.containsKey(strippedKey)) {
+      long bestCandidateTs = 
+        candidateKeys.get(strippedKey).longValue();
+      if (bestCandidateTs <= k.getTimestamp()) {
+        candidateKeys.remove(strippedKey);
+      }
+    }
+  }
+
+  /*
    * @param mf MapFile to dig in.
    * @return Final key from passed <code>mf</code>
    * @throws IOException
@@ -1698,8 +1725,8 @@
     return new HStoreKey(key.getRow(), key.getColumn());
   }
     
-  /**
-   * Test that the <i>target</i> matches the <i>origin</i>. If the 
+  /*
+   * Test that the <i>target</i> matches the <i>origin</i> cell address. If the 
    * <i>origin</i> has an empty column, then it's assumed to mean any column 
    * matches and only match on row and timestamp. Otherwise, it compares the
    * keys with HStoreKey.matchesRowCol().
@@ -1708,7 +1735,7 @@
    */
   private boolean cellMatches(HStoreKey origin, HStoreKey target){
     // if the origin's column is empty, then we're matching any column
-    if (Bytes.equals(origin.getColumn(), HConstants.EMPTY_BYTE_ARRAY)){
+    if (Bytes.equals(origin.getColumn(), HConstants.EMPTY_BYTE_ARRAY)) {
       // if the row matches, then...
       if (Bytes.equals(target.getRow(), origin.getRow())) {
         // check the timestamp
@@ -1720,7 +1747,7 @@
     return target.matchesRowCol(origin);
   }
     
-  /**
+  /*
    * Test that the <i>target</i> matches the <i>origin</i>. If the <i>origin</i>
    * has an empty column, then it just tests row equivalence. Otherwise, it uses
    * HStoreKey.matchesRowCol().

Modified: hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java?rev=690102&r1=690101&r2=690102&view=diff
==============================================================================
--- hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java (original)
+++ hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/regionserver/Memcache.java Thu Aug 28 22:11:57 2008
@@ -309,7 +309,7 @@
         if (columns == null || columns.contains(itKey.getColumn())) {
           byte [] val = tailMap.get(itKey);
           if (HLogEdit.isDeleted(val)) {
-            if (!deletes.containsKey(itCol) 
+            if (!deletes.containsKey(itCol)
               || deletes.get(itCol).longValue() < itKey.getTimestamp()) {
               deletes.put(itCol, Long.valueOf(itKey.getTimestamp()));
             }
@@ -395,14 +395,13 @@
         found_key = key_iterator.next();
         if (Bytes.compareTo(found_key.getRow(), row) <= 0) {
           if (HLogEdit.isDeleted(tailMap.get(found_key))) {
-            handleDeleted(found_key, candidateKeys, deletes);
+            HStore.handleDeleted(found_key, candidateKeys, deletes);
             if (deletedOrExpiredRow == null) {
               deletedOrExpiredRow = found_key;
             }
           } else {
             if (HStore.notExpiredAndNotInDeletes(this.ttl, found_key, now, deletes)) {
-              HStoreKey strippedKey = stripTimestamp(found_key);
-              candidateKeys.put(strippedKey,
+              candidateKeys.put(stripTimestamp(found_key),
                 new Long(found_key.getTimestamp()));
             } else {
               if (deletedOrExpiredRow == null) {
@@ -493,7 +492,7 @@
       do {
         HStoreKey found_key = key_iterator.next();
         if (HLogEdit.isDeleted(thisRowTailMap.get(found_key))) {
-          handleDeleted(found_key, candidateKeys, deletes);
+          HStore.handleDeleted(found_key, candidateKeys, deletes);
         } else {
           if (ttl == HConstants.FOREVER ||
               now < found_key.getTimestamp() + ttl ||
@@ -511,20 +510,6 @@
       } while (key_iterator.hasNext());
     }
   }
-
-  private void handleDeleted(final HStoreKey k,
-      final SortedMap<HStoreKey, Long> candidateKeys,
-      final Set<HStoreKey> deletes) {
-    deletes.add(k);
-    HStoreKey strippedKey = stripTimestamp(k);
-    if (candidateKeys.containsKey(strippedKey)) {
-      long bestCandidateTs = 
-        candidateKeys.get(strippedKey).longValue();
-      if (bestCandidateTs <= k.getTimestamp()) {
-        candidateKeys.remove(strippedKey);
-      }
-    }
-  }
   
   static HStoreKey stripTimestamp(HStoreKey key) {
     return new HStoreKey(key.getRow(), key.getColumn());

Modified: hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/rest/Dispatcher.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/rest/Dispatcher.java?rev=690102&r1=690101&r2=690102&view=diff
==============================================================================
--- hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/rest/Dispatcher.java (original)
+++ hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/rest/Dispatcher.java Thu Aug 28 22:11:57 2008
@@ -22,7 +22,6 @@
 import java.io.IOException;
 
 import javax.servlet.ServletException;
-import javax.servlet.http.HttpServlet;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 
@@ -63,6 +62,7 @@
   private static final Log LOG = LogFactory.getLog(Dispatcher.class.getName());
   private MetaHandler metaHandler;
   private TableHandler tableHandler;
+  private RowHandler rowHandler;
   private ScannerHandler scannerHandler;
 
   private static final String SCANNER = "scanner";
@@ -85,6 +85,7 @@
       admin = new HBaseAdmin(conf);
       metaHandler = new MetaHandler(conf, admin);
       tableHandler = new TableHandler(conf, admin);
+      rowHandler = new RowHandler(conf, admin);
       scannerHandler = new ScannerHandler(conf, admin);
     } catch(Exception e){
       throw new ServletException(e);
@@ -100,9 +101,14 @@
       // the entire instance.
       metaHandler.doGet(request, response, pathSegments);
     } else {
-      // otherwise, it must be a GET request suitable for the
-      // table handler.
-      tableHandler.doGet(request, response, pathSegments);
+      if (pathSegments.length >= 2 && pathSegments[0].length() > 0 && pathSegments[1].toLowerCase().equals(ROW)) {
+        // if it has table name and row path segments
+        rowHandler.doGet(request, response, pathSegments);
+      } else {
+        // otherwise, it must be a GET request suitable for the
+        // table handler.
+        tableHandler.doGet(request, response, pathSegments);
+      }
     }
   }
 
@@ -110,15 +116,27 @@
   throws IOException, ServletException {
     String [] pathSegments = getPathSegments(request);
     
-    // there should be at least two path segments (table name and row or scanner)
-    if (pathSegments.length >= 2 && pathSegments[0].length() > 0) {
-      if (pathSegments[1].toLowerCase().equals(SCANNER) &&
-          pathSegments.length >= 2) {
-        scannerHandler.doPost(request, response, pathSegments);
-        return;
-      } else if (pathSegments[1].toLowerCase().equals(ROW) && pathSegments.length >= 3) {
-        tableHandler.doPost(request, response, pathSegments);
-        return;
+    if (pathSegments.length == 0 || pathSegments[0].length() <= 0) {
+      // if it was a root request, it must be a create table request
+      tableHandler.doPost(request, response, pathSegments);
+      return;
+    } else {
+      // there should be at least two path segments (table name and row or
+      // scanner or disable/enable operation)
+      if (pathSegments.length >= 2 && pathSegments[0].length() > 0) {
+        if (pathSegments[1].toLowerCase().equals(SCANNER)
+            && pathSegments.length >= 2) {
+          scannerHandler.doPost(request, response, pathSegments);
+          return;
+        } else if (pathSegments[1].toLowerCase().equals(ROW)
+            && pathSegments.length >= 3) {
+          rowHandler.doPost(request, response, pathSegments);
+          return;
+        } else if ((pathSegments[1].toLowerCase().equals(TableHandler.DISABLE) || pathSegments[1].toLowerCase().equals(TableHandler.ENABLE))
+            && pathSegments.length == 2) {
+          tableHandler.doPost(request, response, pathSegments);
+          return;
+        }
       }
     }
 
@@ -129,8 +147,15 @@
 
   protected void doPut(HttpServletRequest request, HttpServletResponse response)
   throws ServletException, IOException {
-    // Equate PUT with a POST.
-    doPost(request, response);
+    String [] pathSegments = getPathSegments(request);
+    
+    if (pathSegments.length == 1 && pathSegments[0].length() > 0) {
+      // if it has only table name
+      tableHandler.doPut(request, response, pathSegments);
+    } else {
+      // Equate PUT with a POST.
+      doPost(request, response);
+    }
   }
 
   protected void doDelete(HttpServletRequest request,
@@ -138,18 +163,22 @@
   throws IOException, ServletException {
     String [] pathSegments = getPathSegments(request);
     
-    // must be at least two path segments (table name and row or scanner)
-    if (pathSegments.length >= 2 && pathSegments[0].length() > 0) {
-      // DELETE to a scanner requires at least three path segments
+    if (pathSegments.length == 1 && pathSegments[0].length() > 0) {
+      // if it only has only table name
+      tableHandler.doDelete(request, response, pathSegments);
+      return;
+    } else if (pathSegments.length >= 3 && pathSegments[0].length() > 0) {
+      // must be at least two path segments (table name and row or scanner)
       if (pathSegments[1].toLowerCase().equals(SCANNER) &&
           pathSegments.length == 3 && pathSegments[2].length() > 0) {
+        // DELETE to a scanner requires at least three path segments
         scannerHandler.doDelete(request, response, pathSegments);
         return;
       } else if (pathSegments[1].toLowerCase().equals(ROW) &&
           pathSegments.length >= 3) {
-        tableHandler.doDelete(request, response, pathSegments);
+        rowHandler.doDelete(request, response, pathSegments);
         return;
-      } 
+      }
     }
     
     // if we reach this point, then no handler exists for this request.

Modified: hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/rest/MetaHandler.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/rest/MetaHandler.java?rev=690102&r1=690101&r2=690102&view=diff
==============================================================================
--- hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/rest/MetaHandler.java (original)
+++ hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/rest/MetaHandler.java Thu Aug 28 22:11:57 2008
@@ -35,7 +35,7 @@
 
 /**
  * MetaHandler fields all requests for metadata at the instance level. At the
- * momment this is only GET requests to /.
+ * moment this is only GET requests to /.
  */
 public class MetaHandler extends GenericHandler {
 

Modified: hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/rest/ScannerHandler.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/rest/ScannerHandler.java?rev=690102&r1=690101&r2=690102&view=diff
==============================================================================
--- hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/rest/ScannerHandler.java (original)
+++ hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/rest/ScannerHandler.java Thu Aug 28 22:11:57 2008
@@ -23,8 +23,6 @@
 import java.net.URLDecoder;
 import java.util.HashMap;
 import java.util.Map;
-import java.util.SortedMap;
-import java.util.TreeMap;
 
 import javax.servlet.ServletException;
 import javax.servlet.http.HttpServletRequest;
@@ -39,7 +37,6 @@
 import org.apache.hadoop.hbase.util.JenkinsHash;
 import org.apache.hadoop.hbase.io.Cell;
 import org.apache.hadoop.hbase.io.RowResult;
-import org.apache.hadoop.io.Text;
 import org.mortbay.servlet.MultiPartResponse;
 import org.znerd.xmlenc.XMLOutputter;
 

Modified: hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/rest/TableHandler.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/rest/TableHandler.java?rev=690102&r1=690101&r2=690102&view=diff
==============================================================================
--- hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/rest/TableHandler.java (original)
+++ hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/rest/TableHandler.java Thu Aug 28 22:11:57 2008
@@ -21,12 +21,6 @@
 
 import java.io.IOException;
 import java.io.PrintWriter;
-import java.net.URLDecoder;
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeMap;
 
 import javax.servlet.ServletException;
 import javax.servlet.http.HttpServletRequest;
@@ -36,14 +30,11 @@
 
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.HColumnDescriptor.CompressionType;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.io.BatchUpdate;
-import org.apache.hadoop.hbase.io.Cell;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.io.Text;
 import org.w3c.dom.Document;
 import org.w3c.dom.Element;
 import org.w3c.dom.Node;
@@ -57,7 +48,9 @@
  * this handler.
  */
 public class TableHandler extends GenericHandler {
-     
+  public static final String DISABLE = "disable";
+  public static final String ENABLE = "enable";
+  
   public TableHandler(HBaseConfiguration conf, HBaseAdmin admin) 
   throws ServletException{
     super(conf, admin);
@@ -76,10 +69,6 @@
         // get a region list
         getTableRegions(table, request, response);
       }
-      else if (pathSegments[1].toLowerCase().equals(ROW)) {
-        // get a row
-        getRow(table, request, response, pathSegments);
-      }
       else{
         doNotFound(response, "Not handled in TableHandler");
       }
@@ -89,252 +78,31 @@
   public void doPost(HttpServletRequest request, HttpServletResponse response, 
     String[] pathSegments)
   throws ServletException, IOException {
-    putRow(request, response, pathSegments);
+    if (pathSegments.length == 0 || pathSegments[0].length() <= 0) {
+      // if it's a creation operation
+      putTable(request, response, pathSegments);
+    } else {
+      // if it's a disable operation or enable operation
+      String tableName = pathSegments[0];      
+      if (pathSegments[1].toLowerCase().equals(DISABLE)) {
+        admin.disableTable(tableName);
+      } else if (pathSegments[1].toLowerCase().equals(ENABLE)) {
+        admin.enableTable(tableName);
+      }
+      response.setStatus(202);
+    }
   }
-  
+
   public void doPut(HttpServletRequest request, HttpServletResponse response, 
     String[] pathSegments)
   throws ServletException, IOException {
-    doPost(request, response, pathSegments);
+    updateTable(request, response, pathSegments);
   }
   
   public void doDelete(HttpServletRequest request, HttpServletResponse response, 
     String[] pathSegments)
   throws ServletException, IOException {
-    deleteRow(request, response, pathSegments);
-  }
-  
-  /*
-   * @param request
-   * @param response
-   * @param pathSegments info path split on the '/' character.  First segment
-   * is the tablename, second is 'row', and third is the row id.
-   * @throws IOException
-   * Retrieve a row in one of several output formats.
-   */
-  private void getRow(HTable table, final HttpServletRequest request,
-    final HttpServletResponse response, final String [] pathSegments)
-  throws IOException {
-    // pull the row key out of the path
-    String row = URLDecoder.decode(pathSegments[2], HConstants.UTF8_ENCODING);
-
-    String timestampStr = null;
-    if (pathSegments.length == 4) {
-      // A timestamp has been supplied.
-      timestampStr = pathSegments[3];
-      if (timestampStr.equals("timestamps")) {
-        // Not supported in hbase just yet. TODO
-        doMethodNotAllowed(response, "Not yet supported by hbase");
-        return;
-      }
-    }
-    
-    String[] columns = request.getParameterValues(COLUMN);
-        
-    if (columns == null || columns.length == 0) {
-      // They want full row returned. 
-
-      // Presumption is that this.table has already been focused on target table.
-      Map<byte [], Cell> result = timestampStr == null ? 
-        table.getRow(Bytes.toBytes(row)) 
-        : table.getRow(Bytes.toBytes(row), Long.parseLong(timestampStr));
-        
-      if (result == null || result.size() == 0) {
-        doNotFound(response, "Row not found!");
-      } else {
-        switch (ContentType.getContentType(request.getHeader(ACCEPT))) {
-        case XML:
-          outputRowXml(response, result);
-          break;
-        case MIME:
-        default:
-          doNotAcceptable(response, "Unsupported Accept Header Content: " +
-            request.getHeader(CONTENT_TYPE));
-        }
-      }
-    } else {
-      Map<byte [], Cell> prefiltered_result = table.getRow(Bytes.toBytes(row));
-    
-      if (prefiltered_result == null || prefiltered_result.size() == 0) {
-        doNotFound(response, "Row not found!");
-      } else {
-        // create a Set from the columns requested so we can
-        // efficiently filter the actual found columns
-        Set<String> requested_columns_set = new HashSet<String>();
-        for(int i = 0; i < columns.length; i++){
-          requested_columns_set.add(columns[i]);
-        }
-  
-        // output map that will contain the filtered results
-        Map<byte [], Cell> m =
-          new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
-
-        // get an array of all the columns retrieved
-        Set<byte []> columns_retrieved = prefiltered_result.keySet();
-
-        // copy over those cells with requested column names
-        for(byte [] current_column: columns_retrieved) {
-          if (requested_columns_set.contains(Bytes.toString(current_column))) {
-            m.put(current_column, prefiltered_result.get(current_column));            
-          }
-        }
-        
-        switch (ContentType.getContentType(request.getHeader(ACCEPT))) {
-          case XML:
-            outputRowXml(response, m);
-            break;
-          case MIME:
-          default:
-            doNotAcceptable(response, "Unsupported Accept Header Content: " +
-              request.getHeader(CONTENT_TYPE));
-        }
-      }
-    }
-  }
-  
-  /*
-   * Output a row encoded as XML.
-   * @param response
-   * @param result
-   * @throws IOException
-   */
-  private void outputRowXml(final HttpServletResponse response,
-      final Map<byte [], Cell> result)
-  throws IOException {
-    setResponseHeader(response, result.size() > 0? 200: 204,
-        ContentType.XML.toString());
-    XMLOutputter outputter = getXMLOutputter(response.getWriter());
-    outputter.startTag(ROW);
-    outputColumnsXml(outputter, result);
-    outputter.endTag();
-    outputter.endDocument();
-    outputter.getWriter().close();
-  }
-
-  /*
-   * @param response
-   * @param result
-   * Output the results contained in result as a multipart/related response.
-   */
-  // private void outputRowMime(final HttpServletResponse response,
-  //     final Map<Text, Cell> result)
-  // throws IOException {
-  //   response.setStatus(result.size() > 0? 200: 204);
-  //   // This code ties me to the jetty server.
-  //   MultiPartResponse mpr = new MultiPartResponse(response);
-  //   // Content type should look like this for multipart:
-  //   // Content-type: multipart/related;start="<ro...@example.jaxws.sun.com>";type="application/xop+xml";boundary="uuid:94ebf1e6-7eb5-43f1-85f4-2615fc40c5d6";start-info="text/xml"
-  //   String ct = ContentType.MIME.toString() + ";charset=\"UTF-8\";boundary=\"" +
-  //     mpr.getBoundary() + "\"";
-  //   // Setting content type is broken.  I'm unable to set parameters on the
-  //   // content-type; They get stripped.  Can't set boundary, etc.
-  //   // response.addHeader("Content-Type", ct);
-  //   response.setContentType(ct);
-  //   outputColumnsMime(mpr, result);
-  //   mpr.close();
-  // }
-  
-  /*
-   * @param request
-   * @param response
-   * @param pathSegments
-   * Do a put based on the client request.
-   */
-  private void putRow(final HttpServletRequest request,
-    final HttpServletResponse response, final String [] pathSegments)
-  throws IOException, ServletException {
-    HTable table = getTable(pathSegments[0]);
-
-    // pull the row key out of the path
-    String row = URLDecoder.decode(pathSegments[2], HConstants.UTF8_ENCODING);
-    
-    switch(ContentType.getContentType(request.getHeader(CONTENT_TYPE))) {
-      case XML:
-        putRowXml(table, row, request, response, pathSegments);
-        break;
-      case MIME:
-        doNotAcceptable(response, "Don't support multipart/related yet...");
-        break;
-      default:
-        doNotAcceptable(response, "Unsupported Accept Header Content: " +
-          request.getHeader(CONTENT_TYPE));
-    }
-  }
-
-  /*
-   * @param request
-   * @param response
-   * @param pathSegments
-   * Decode supplied XML and do a put to Hbase.
-   */
-  private void putRowXml(HTable table, String row, 
-    final HttpServletRequest request, final HttpServletResponse response, 
-    final String [] pathSegments)
-  throws IOException, ServletException{
-
-    DocumentBuilderFactory docBuilderFactory 
-      = DocumentBuilderFactory.newInstance();  
-    //ignore all comments inside the xml file
-    docBuilderFactory.setIgnoringComments(true);
-
-    DocumentBuilder builder = null;
-    Document doc = null;
-    
-    String timestamp = pathSegments.length >= 4 ? pathSegments[3] : null;
-    
-    try{
-      builder = docBuilderFactory.newDocumentBuilder();
-      doc = builder.parse(request.getInputStream());
-    } catch (javax.xml.parsers.ParserConfigurationException e) {
-      throw new ServletException(e);
-    } catch (org.xml.sax.SAXException e){
-      throw new ServletException(e);
-    }
-
-    BatchUpdate batchUpdate;
-    
-    try{
-      // start an update
-      batchUpdate = timestamp == null ? 
-        new BatchUpdate(row) : new BatchUpdate(row, Long.parseLong(timestamp));
-
-      // set the columns from the xml
-      NodeList columns = doc.getElementsByTagName("column");
-
-      for(int i = 0; i < columns.getLength(); i++){
-        // get the current column element we're working on
-        Element column = (Element)columns.item(i);
-
-        // extract the name and value children
-        Node name_node = column.getElementsByTagName("name").item(0);
-        String name = name_node.getFirstChild().getNodeValue();
-
-        Node value_node = column.getElementsByTagName("value").item(0);
-
-        byte[] value = new byte[0];
-        
-        // for some reason there's no value here. probably indicates that
-        // the consumer passed a null as the cell value.
-        if(value_node.getFirstChild() != null && 
-          value_node.getFirstChild().getNodeValue() != null){
-          // decode the base64'd value
-          value = org.apache.hadoop.hbase.util.Base64.decode(
-            value_node.getFirstChild().getNodeValue());
-        }
-
-        // put the value
-        batchUpdate.put(name, value);
-      }
-
-      // commit the update
-      table.commit(batchUpdate);
-      
-      // respond with a 200
-      response.setStatus(200);      
-    }
-    catch(Exception e){
-      throw new ServletException(e);
-    }
+    deleteTable(request, response, pathSegments);
   }
 
   /*
@@ -438,41 +206,158 @@
     }
   }
   
-  /*
-   * @param request
-   * @param response
-   * @param pathSegments
-   * Delete some or all cells for a row.
-   */
-   private void deleteRow(final HttpServletRequest request,
-    final HttpServletResponse response, final String [] pathSegments)
+  private void putTable(HttpServletRequest request,
+    HttpServletResponse response, String[] pathSegments) 
+  throws IOException, ServletException {
+    switch(ContentType.getContentType(request.getHeader(CONTENT_TYPE))) {
+      case XML:
+        putTableXml(request, response, pathSegments);
+        break;
+      case MIME:
+        doNotAcceptable(response, "Don't support multipart/related yet...");
+        break;
+      default:
+        doNotAcceptable(response, "Unsupported Accept Header Content: " +
+            request.getHeader(CONTENT_TYPE));
+    }
+  } 
+  
+  private void updateTable(HttpServletRequest request,
+    HttpServletResponse response, String[] pathSegments) 
+  throws IOException, ServletException {
+    switch(ContentType.getContentType(request.getHeader(CONTENT_TYPE))) {
+      case XML:
+        updateTableXml(request, response, pathSegments);
+        break;
+      case MIME:
+        doNotAcceptable(response, "Don't support multipart/related yet...");
+        break;
+      default:
+        doNotAcceptable(response, "Unsupported Accept Header Content: " +
+            request.getHeader(CONTENT_TYPE));
+    }
+  }
+  
+  private void deleteTable(HttpServletRequest request,
+      HttpServletResponse response, String[] pathSegments) throws IOException {
+    String tableName = pathSegments[0];
+    admin.deleteTable(tableName);
+    response.setStatus(202);
+  }  
+  
+  private void putTableXml(HttpServletRequest 
+    request, HttpServletResponse response, String[] pathSegments)
   throws IOException, ServletException {
-    // grab the table we're operating on
-    HTable table = getTable(getTableName(pathSegments));
+    DocumentBuilderFactory docBuilderFactory = DocumentBuilderFactory
+        .newInstance();
+    // ignore all comments inside the xml file
+    docBuilderFactory.setIgnoringComments(true);
+
+    DocumentBuilder builder = null;
+    Document doc = null;
+
+    try {
+      builder = docBuilderFactory.newDocumentBuilder();
+      doc = builder.parse(request.getInputStream());
+    } catch (javax.xml.parsers.ParserConfigurationException e) {
+      throw new ServletException(e);
+    } catch (org.xml.sax.SAXException e) {
+      throw new ServletException(e);
+    }
     
-    // pull the row key out of the path
-    String row = URLDecoder.decode(pathSegments[2], HConstants.UTF8_ENCODING);
+    try {
+      Node name_node = doc.getElementsByTagName("name").item(0);
+      String table_name = name_node.getFirstChild().getNodeValue();
+      
+      HTableDescriptor htd = new HTableDescriptor(table_name);
+      NodeList columnfamily_nodes = doc.getElementsByTagName("columnfamily");
+      for (int i = 0; i < columnfamily_nodes.getLength(); i++) {
+        Element columnfamily = (Element)columnfamily_nodes.item(i);
+        htd.addFamily(putColumnFamilyXml(columnfamily));
+      }
+      admin.createTable(htd);      
+    } catch (Exception e) {
+      throw new ServletException(e);
+    }
+  }
 
-    String[] columns = request.getParameterValues(COLUMN);
-        
-    // hack - we'll actually test for the presence of the timestamp parameter
-    // eventually
-    boolean timestamp_present = false;
-    if(timestamp_present){ // do a timestamp-aware delete
-      doMethodNotAllowed(response, "DELETE with a timestamp not implemented!");
-    }
-    else{ // ignore timestamps
-      if(columns == null || columns.length == 0){
-        // retrieve all the columns
-        doMethodNotAllowed(response,
-          "DELETE without specified columns not implemented!");
-      } else{
-        // delete each column in turn      
-        for(int i = 0; i < columns.length; i++){
-          table.deleteAll(row, columns[i]);
-        }
+  private void updateTableXml(HttpServletRequest request,
+      HttpServletResponse response, String[] pathSegments) throws IOException,
+      ServletException {
+    DocumentBuilderFactory docBuilderFactory = DocumentBuilderFactory
+        .newInstance();
+    // ignore all comments inside the xml file
+    docBuilderFactory.setIgnoringComments(true);
+
+    DocumentBuilder builder = null;
+    Document doc = null;
+
+    try {
+      builder = docBuilderFactory.newDocumentBuilder();
+      doc = builder.parse(request.getInputStream());
+    } catch (javax.xml.parsers.ParserConfigurationException e) {
+      throw new ServletException(e);
+    } catch (org.xml.sax.SAXException e) {
+      throw new ServletException(e);
+    }
+
+    try {
+      String tableName = pathSegments[0];
+
+      NodeList columnfamily_nodes = doc.getElementsByTagName("columnfamily");
+      for (int i = 0; i < columnfamily_nodes.getLength(); i++) {
+        Element columnfamily = (Element) columnfamily_nodes.item(i);
+        HColumnDescriptor hcd = putColumnFamilyXml(columnfamily);
+        admin.modifyColumn(tableName, hcd.getNameAsString(), hcd);
       }
-      response.setStatus(202);
+    } catch (Exception e) {
+      throw new ServletException(e);
     }
-  } 
+  }
+  
+  private HColumnDescriptor putColumnFamilyXml(Element columnfamily) {
+    Node name_node = columnfamily.getElementsByTagName("name").item(0);
+    String colname = name_node.getFirstChild().getNodeValue();
+    
+    if (colname.indexOf(":") == -1) {
+      colname += ":";
+    }
+    
+    int max_versions = HColumnDescriptor.DEFAULT_VERSIONS;
+    NodeList max_versions_list = columnfamily.getElementsByTagName("max-versions");
+    if (max_versions_list.getLength() > 0) {
+      max_versions = Integer.parseInt(max_versions_list.item(0).getFirstChild().getNodeValue());
+    }
+    CompressionType compression = HColumnDescriptor.DEFAULT_COMPRESSION;
+    NodeList compression_list = columnfamily.getElementsByTagName("compression");
+    if (compression_list.getLength() > 0) {
+      compression = CompressionType.valueOf(compression_list.item(0).getFirstChild().getNodeValue());
+    }
+    boolean in_memory = HColumnDescriptor.DEFAULT_IN_MEMORY;
+    NodeList in_memory_list = columnfamily.getElementsByTagName("in-memory");
+    if (in_memory_list.getLength() > 0) {
+      in_memory = Boolean.valueOf(in_memory_list.item(0).getFirstChild().getNodeValue());
+    }
+    boolean block_cache = HColumnDescriptor.DEFAULT_BLOCKCACHE;
+    NodeList block_cache_list = columnfamily.getElementsByTagName("block-cache");
+    if (block_cache_list.getLength() > 0) {
+      block_cache = Boolean.valueOf(block_cache_list.item(0).getFirstChild().getNodeValue());
+    }
+    int max_cell_size = HColumnDescriptor.DEFAULT_LENGTH;
+    NodeList max_cell_size_list = columnfamily.getElementsByTagName("max-cell-size");
+    if (max_cell_size_list.getLength() > 0) {
+      max_cell_size = Integer.valueOf(max_cell_size_list.item(0).getFirstChild().getNodeValue());
+    }
+    int ttl = HColumnDescriptor.DEFAULT_TTL;
+    NodeList ttl_list = columnfamily.getElementsByTagName("time-to-live");
+    if (ttl_list.getLength() > 0) {
+      ttl = Integer.valueOf(ttl_list.item(0).getFirstChild().getNodeValue());
+    }
+    boolean bloomfilter = HColumnDescriptor.DEFAULT_BLOOMFILTER;
+    NodeList bloomfilter_list = columnfamily.getElementsByTagName("bloomfilter");
+    if (bloomfilter_list.getLength() > 0) {
+      bloomfilter = Boolean.valueOf(bloomfilter_list.item(0).getFirstChild().getNodeValue());
+    }
+    return new HColumnDescriptor(Bytes.toBytes(colname), max_versions, compression, in_memory, block_cache, max_cell_size, ttl, bloomfilter);
+  }
 }

Modified: hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/util/MetaUtils.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/util/MetaUtils.java?rev=690102&r1=690101&r2=690102&view=diff
==============================================================================
--- hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/util/MetaUtils.java (original)
+++ hadoop/hbase/branches/0.2/src/java/org/apache/hadoop/hbase/util/MetaUtils.java Thu Aug 28 22:11:57 2008
@@ -82,15 +82,9 @@
    * @throws IOException
    */
   private void initialize() throws IOException {
-    this.fs = FileSystem.get(this.conf);              // get DFS handle
+    this.fs = FileSystem.get(this.conf);
     // Get root directory of HBase installation
-    this.rootdir = fs.makeQualified(new Path(this.conf.get(HConstants.HBASE_DIR)));
-    if (!fs.exists(rootdir)) {
-      String message = "HBase root directory " + rootdir.toString() +
-        " does not exist.";
-      LOG.error(message);
-      throw new FileNotFoundException(message);
-    }
+    this.rootdir = FSUtils.getRootDir(this.conf);
   }
 
   /** @return the HLog