You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2011/06/08 02:03:57 UTC

svn commit: r1133209 [2/3] - in /hbase/trunk: ./ bin/ src/main/java/org/apache/hadoop/hbase/ src/main/java/org/apache/hadoop/hbase/catalog/ src/main/java/org/apache/hadoop/hbase/client/ src/main/java/org/apache/hadoop/hbase/io/ src/main/java/org/apache...

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=1133209&r1=1133208&r2=1133209&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java Wed Jun  8 00:03:54 2011
@@ -50,10 +50,7 @@ import java.util.concurrent.locks.Reentr
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.*;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.DroppedSnapshotException;
 import org.apache.hadoop.hbase.HBaseConfiguration;
@@ -199,6 +196,7 @@ public class HRegion implements HeapSize
   final Path regiondir;
   KeyValue.KVComparator comparator;
 
+
   /*
    * Data structure of write state flags used coordinating flushes,
    * compactions and closes.
@@ -239,11 +237,11 @@ public class HRegion implements HeapSize
 
   final WriteState writestate = new WriteState();
 
-  final long memstoreFlushSize;
+  long memstoreFlushSize;
   private volatile long lastFlushTime;
   final RegionServerServices rsServices;
   private List<Pair<Long, Long>> recentFlushes = new ArrayList<Pair<Long,Long>>();
-  private final long blockingMemStoreSize;
+  private long blockingMemStoreSize;
   final long threadWakeFrequency;
   // Used to guard closes
   final ReentrantReadWriteLock lock =
@@ -265,6 +263,8 @@ public class HRegion implements HeapSize
    * Name of the region info file that resides just under the region directory.
    */
   public final static String REGIONINFO_FILE = ".regioninfo";
+  private HTableDescriptor htableDescriptor = null;
+
 
   /**
    * Should only be used for testing purposes
@@ -304,7 +304,8 @@ public class HRegion implements HeapSize
    * is new), then read them from the supplied path.
    * @param rsServices reference to {@link RegionServerServices} or null
    *
-   * @see HRegion#newHRegion(Path, HLog, FileSystem, Configuration, org.apache.hadoop.hbase.HRegionInfo, FlushRequester)
+   * @see HRegion#newHRegion(Path, HLog,
+   * FileSystem, Configuration, org.apache.hadoop.hbase.HRegionInfo, FlushRequester)
    */
   public HRegion(Path tableDir, HLog log, FileSystem fs, Configuration conf,
       HRegionInfo regionInfo, RegionServerServices rsServices) {
@@ -319,14 +320,14 @@ public class HRegion implements HeapSize
         10 * 1000);
     String encodedNameStr = this.regionInfo.getEncodedName();
     this.regiondir = getRegionDir(this.tableDir, encodedNameStr);
-    long flushSize = regionInfo.getTableDesc().getMemStoreFlushSize();
-    if (flushSize == HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE) {
-      flushSize = conf.getLong("hbase.hregion.memstore.flush.size",
-                      HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE);
-    }
-    this.memstoreFlushSize = flushSize;
-    this.blockingMemStoreSize = this.memstoreFlushSize *
-      conf.getLong("hbase.hregion.memstore.block.multiplier", 2);
+    try {
+      LOG.info("Setting table desc from HDFS. Region = "
+        + this.regionInfo.getTableNameAsString());
+      loadHTableDescriptor(tableDir);
+      LOG.info(" This HTD from HDFS  == " + this.htableDescriptor);
+    } catch (IOException ioe) {
+      LOG.error("Could not instantiate region as error loading HTableDescriptor");
+    }
     // don't initialize coprocessors if not running within a regionserver
     // TODO: revisit if coprocessors should load in other cases
     if (rsServices != null) {
@@ -338,6 +339,40 @@ public class HRegion implements HeapSize
     }
   }
 
+  private void loadHTableDescriptor(Path tableDir) throws IOException {
+    LOG.debug("Assigning tabledesc from .tableinfo for region = "
+        + this.regionInfo.getRegionNameAsString());
+    // load HTableDescriptor
+    this.htableDescriptor = FSUtils.getTableDescriptor(tableDir, fs);
+
+    if (this.htableDescriptor != null) {
+      setHTableSpecificConf();
+    } else {
+      throw new IOException("Table description missing in " +
+          ".tableinfo. Cannot create new region."
+          + " current region is == " + this.regionInfo.toString());
+    }
+
+  }
+
+  private void setHTableSpecificConf() {
+    if (this.htableDescriptor != null) {
+      LOG.info("Setting up tabledescriptor config now ...");
+      long flushSize = this.htableDescriptor.getMemStoreFlushSize();
+      if (flushSize == HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE) {
+        flushSize = conf.getLong("hbase.hregion.memstore.flush.size",
+            HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE);
+      }
+      this.memstoreFlushSize = flushSize;
+      this.blockingMemStoreSize = this.memstoreFlushSize *
+          conf.getLong("hbase.hregion.memstore.block.multiplier", 2);
+    }
+  }
+
+  public void setHtableDescriptor(HTableDescriptor htableDescriptor) {
+    this.htableDescriptor = htableDescriptor;
+  }
+
   /**
    * Initialize this region.
    * @return What the next sequence (edit) id should be.
@@ -378,7 +413,7 @@ public class HRegion implements HeapSize
 
     // Load in all the HStores.  Get maximum seqid.
     long maxSeqId = -1;
-    for (HColumnDescriptor c : this.regionInfo.getTableDesc().getFamilies()) {
+    for (HColumnDescriptor c : this.htableDescriptor.getFamilies()) {
       status.setStatus("Instantiating store for column family " + c);
       Store store = instantiateHStore(this.tableDir, c);
       this.stores.put(c.getName(), store);
@@ -398,7 +433,7 @@ public class HRegion implements HeapSize
     SplitTransaction.cleanupAnySplitDetritus(this);
     FSUtils.deleteDirectory(this.fs, new Path(regiondir, MERGEDIR));
 
-    this.writestate.setReadOnly(this.regionInfo.getTableDesc().isReadOnly());
+    this.writestate.setReadOnly(this.htableDescriptor.isReadOnly());
 
     this.writestate.compacting = 0;
     this.lastFlushTime = EnvironmentEdgeManager.currentTimeMillis();
@@ -703,7 +738,7 @@ public class HRegion implements HeapSize
 
   /** @return HTableDescriptor for this region */
   public HTableDescriptor getTableDesc() {
-    return this.regionInfo.getTableDesc();
+    return this.htableDescriptor;
   }
 
   /** @return HLog in use for this region */
@@ -1156,7 +1191,7 @@ public class HRegion implements HeapSize
     //     log-sequence-ids can be safely ignored.
     if (wal != null) {
       wal.completeCacheFlush(this.regionInfo.getEncodedNameAsBytes(),
-        regionInfo.getTableDesc().getName(), completeSequenceId,
+        regionInfo.getTableName(), completeSequenceId,
         this.getRegionInfo().isMetaRegion());
     }
 
@@ -1268,7 +1303,7 @@ public class HRegion implements HeapSize
   void prepareScanner(Scan scan) throws IOException {
     if(!scan.hasFamilies()) {
       // Adding all families to scanner
-      for(byte[] family: regionInfo.getTableDesc().getFamiliesKeys()){
+      for(byte[] family: this.htableDescriptor.getFamiliesKeys()){
         scan.addFamily(family);
       }
     }
@@ -1303,7 +1338,7 @@ public class HRegion implements HeapSize
   private void prepareDelete(Delete delete) throws IOException {
     // Check to see if this is a deleteRow insert
     if(delete.getFamilyMap().isEmpty()){
-      for(byte [] family : regionInfo.getTableDesc().getFamiliesKeys()){
+      for(byte [] family : this.htableDescriptor.getFamiliesKeys()){
         // Don't eat the timestamp
         delete.deleteFamily(family, delete.getTimeStamp());
       }
@@ -1424,8 +1459,8 @@ public class HRegion implements HeapSize
         // single WALEdit.
         WALEdit walEdit = new WALEdit();
         addFamilyMapToWALEdit(familyMap, walEdit);
-        this.log.append(regionInfo, regionInfo.getTableDesc().getName(),
-            walEdit, now);
+        this.log.append(regionInfo, this.htableDescriptor.getName(),
+            walEdit, now, this.htableDescriptor);
       }
 
       // Now make changes to the memstore.
@@ -1683,8 +1718,8 @@ public class HRegion implements HeapSize
       }
 
       // Append the edit to WAL
-      this.log.append(regionInfo, regionInfo.getTableDesc().getName(),
-          walEdit, now);
+      this.log.append(regionInfo, this.htableDescriptor.getName(),
+          walEdit, now, this.htableDescriptor);
 
       // ------------------------------------
       // STEP 4. Write back to memstore
@@ -1937,8 +1972,8 @@ public class HRegion implements HeapSize
       if (writeToWAL) {
         WALEdit walEdit = new WALEdit();
         addFamilyMapToWALEdit(familyMap, walEdit);
-        this.log.append(regionInfo, regionInfo.getTableDesc().getName(),
-           walEdit, now);
+        this.log.append(regionInfo, this.htableDescriptor.getName(),
+            walEdit, now, this.htableDescriptor);
       }
 
       long addedSize = applyFamilyMapToMemstore(familyMap);
@@ -2079,6 +2114,7 @@ public class HRegion implements HeapSize
       final long minSeqId, final CancelableProgressable reporter,
       final MonitoredTask status)
   throws UnsupportedEncodingException, IOException {
+    LOG.info("replayRecoveredEditsIfAny");
     long seqid = minSeqId;
     NavigableSet<Path> files = HLog.getSplitEditFilesSorted(this.fs, regiondir);
     if (files == null || files.isEmpty()) return seqid;
@@ -2128,6 +2164,7 @@ public class HRegion implements HeapSize
   private long replayRecoveredEdits(final Path edits,
       final long minSeqId, final CancelableProgressable reporter)
     throws IOException {
+    LOG.info("ReplayRecoveredEdits");
     String msg = "Replaying edits from " + edits + "; minSequenceid=" + minSeqId;
     LOG.info(msg);
     MonitoredTask status = TaskMonitor.get().createStatus(msg);
@@ -2182,11 +2219,15 @@ public class HRegion implements HeapSize
         // Start coprocessor replay here. The coprocessor is for each WALEdit
         // instead of a KeyValue.
         if (coprocessorHost != null) {
+          LOG.info("Running pre-WAL-restore hook in coprocessors");
+
           status.setStatus("Running pre-WAL-restore hook in coprocessors");
           if (coprocessorHost.preWALRestore(this.getRegionInfo(), key, val)) {
             // if bypass this log entry, ignore it ...
             continue;
           }
+        }  else {
+          LOG.info("CoProc Host is NULL");
         }
 
         if (firstSeqIdInLog == -1) {
@@ -2756,22 +2797,30 @@ public class HRegion implements HeapSize
    * @param info Info for region to create.
    * @param rootDir Root directory for HBase instance
    * @param conf
+   * @param hTableDescriptor
    * @return new HRegion
    *
    * @throws IOException
    */
   public static HRegion createHRegion(final HRegionInfo info, final Path rootDir,
-    final Configuration conf)
-  throws IOException {
+                                      final Configuration conf,
+                                      final HTableDescriptor hTableDescriptor)
+      throws IOException {
+    LOG.info("creating HRegion " + info.getTableNameAsString()
+    + " HTD == " + hTableDescriptor + " RootDir = " + rootDir +
+    " Table name == " + info.getTableNameAsString());
+
     Path tableDir =
-      HTableDescriptor.getTableDir(rootDir, info.getTableDesc().getName());
+        HTableDescriptor.getTableDir(rootDir, info.getTableName());
     Path regionDir = HRegion.getRegionDir(tableDir, info.getEncodedName());
     FileSystem fs = FileSystem.get(conf);
+
     fs.mkdirs(regionDir);
+    FSUtils.createTableDescriptor(fs, hTableDescriptor, tableDir);
     HRegion region = HRegion.newHRegion(tableDir,
-      new HLog(fs, new Path(regionDir, HConstants.HREGION_LOGDIR_NAME),
-          new Path(regionDir, HConstants.HREGION_OLDLOGDIR_NAME), conf),
-      fs, conf, info, null);
+        new HLog(fs, new Path(regionDir, HConstants.HREGION_LOGDIR_NAME),
+            new Path(regionDir, HConstants.HREGION_OLDLOGDIR_NAME), conf),
+        fs, conf, info, null);
     region.initialize();
     return region;
   }
@@ -2802,7 +2851,6 @@ public class HRegion implements HeapSize
    * HRegion#getMinSequenceId() to ensure the log id is properly kept
    * up.  HRegionStore does this every time it opens a new region.
    * @param conf
-   * @param flusher An interface we can request flushes against.
    * @param reporter An interface we can report progress against.
    * @return new HRegion
    *
@@ -2819,12 +2867,52 @@ public class HRegion implements HeapSize
       throw new NullPointerException("Passed region info is null");
     }
     Path dir = HTableDescriptor.getTableDir(FSUtils.getRootDir(conf),
-      info.getTableDesc().getName());
+      info.getTableName());
     HRegion r = HRegion.newHRegion(dir, wal, FileSystem.get(conf), conf, info,
       rsServices);
     return r.openHRegion(reporter);
   }
 
+  public static HRegion openHRegion(Path tableDir, final HRegionInfo info,
+                                    final HLog wal, final Configuration conf)
+      throws IOException {
+    return openHRegion(tableDir, info, wal, conf, null, null);
+  }
+
+  /**
+   * Open a Region.
+   * @param tableDir Table directory
+   * @param info Info for region to be opened.
+   * @param wal HLog for region to use. This method will call
+   * HLog#setSequenceNumber(long) passing the result of the call to
+   * HRegion#getMinSequenceId() to ensure the log id is properly kept
+   * up.  HRegionStore does this every time it opens a new region.
+   * @param conf
+   * @param reporter An interface we can report progress against.
+   * @return new HRegion
+   *
+   * @throws IOException
+   */
+  public static HRegion openHRegion(final Path tableDir, final HRegionInfo info,
+                                    final HLog wal, final Configuration conf,
+                                    final RegionServerServices rsServices,
+                                    final CancelableProgressable reporter)
+      throws IOException {
+    LOG.info("HRegion.openHRegion Region name ==" + info.getRegionNameAsString());
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Opening region: " + info);
+    }
+    if (info == null) {
+      throw new NullPointerException("Passed region info is null");
+    }
+    Path dir = HTableDescriptor.getTableDir(tableDir,
+        info.getTableName());
+    HRegion r = HRegion.newHRegion(dir, wal, FileSystem.get(conf), conf, info,
+        rsServices);
+    return r.openHRegion(reporter);
+  }
+
+
   /**
    * Open HRegion.
    * Calls initialize and sets sequenceid.
@@ -2844,7 +2932,7 @@ public class HRegion implements HeapSize
   }
 
   private void checkCompressionCodecs() throws IOException {
-    for (HColumnDescriptor fam: regionInfo.getTableDesc().getColumnFamilies()) {
+    for (HColumnDescriptor fam: this.htableDescriptor.getColumnFamilies()) {
       CompressionTest.testCompression(fam.getCompression());
       CompressionTest.testCompression(fam.getCompactionCompression());
     }
@@ -2872,6 +2960,11 @@ public class HRegion implements HeapSize
           HConstants.REGIONINFO_QUALIFIER,
           EnvironmentEdgeManager.currentTimeMillis(),
           Writables.getBytes(r.getRegionInfo())));
+      edits.add(new KeyValue(row, HConstants.CATALOG_FAMILY,
+          HConstants.META_MIGRATION_QUALIFIER,
+          EnvironmentEdgeManager.currentTimeMillis(),
+          Bytes.toBytes("true")));
+
       meta.put(HConstants.CATALOG_FAMILY, edits);
     } finally {
       meta.releaseRowLock(lid);
@@ -2910,7 +3003,7 @@ public class HRegion implements HeapSize
    */
   public static Path getRegionDir(final Path rootdir, final HRegionInfo info) {
     return new Path(
-      HTableDescriptor.getTableDir(rootdir, info.getTableDesc().getName()),
+      HTableDescriptor.getTableDir(rootdir, info.getTableName()),
                                    info.getEncodedName());
   }
 
@@ -2988,8 +3081,8 @@ public class HRegion implements HeapSize
    * @throws IOException
    */
   public static HRegion merge(HRegion a, HRegion b) throws IOException {
-    if (!a.getRegionInfo().getTableDesc().getNameAsString().equals(
-        b.getRegionInfo().getTableDesc().getNameAsString())) {
+    if (!a.getRegionInfo().getTableNameAsString().equals(
+        b.getRegionInfo().getTableNameAsString())) {
       throw new IOException("Regions do not belong to the same table");
     }
 
@@ -3042,7 +3135,8 @@ public class HRegion implements HeapSize
          ? b.getEndKey()
          : a.getEndKey());
 
-    HRegionInfo newRegionInfo = new HRegionInfo(tabledesc, startKey, endKey);
+    HRegionInfo newRegionInfo =
+        new HRegionInfo(tabledesc.getName(), startKey, endKey);
     LOG.info("Creating new region " + newRegionInfo.toString());
     String encodedName = newRegionInfo.getEncodedName();
     Path newRegionDir = HRegion.getRegionDir(a.getTableDir(), encodedName);
@@ -3181,7 +3275,7 @@ public class HRegion implements HeapSize
         checkFamily(family);
       }
     } else { // Adding all families to scanner
-      for (byte[] family: regionInfo.getTableDesc().getFamiliesKeys()) {
+      for (byte[] family: this.htableDescriptor.getFamiliesKeys()) {
         get.addFamily(family);
       }
     }
@@ -3387,8 +3481,8 @@ public class HRegion implements HeapSize
 
         // Actually write to WAL now
         if (writeToWAL) {
-          this.log.append(regionInfo, regionInfo.getTableDesc().getName(),
-            walEdits, now);
+          this.log.append(regionInfo, this.htableDescriptor.getName(),
+              walEdits, now, this.htableDescriptor);
         }
 
         size = this.addAndGetGlobalMemstoreSize(size);
@@ -3458,8 +3552,8 @@ public class HRegion implements HeapSize
           long now = EnvironmentEdgeManager.currentTimeMillis();
           WALEdit walEdit = new WALEdit();
           walEdit.add(newKv);
-          this.log.append(regionInfo, regionInfo.getTableDesc().getName(),
-            walEdit, now);
+          this.log.append(regionInfo, this.htableDescriptor.getName(),
+              walEdit, now, this.htableDescriptor);
         }
 
         // Now request the ICV to the store, this will set the timestamp

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java?rev=1133209&r1=1133208&r2=1133209&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java Wed Jun  8 00:03:54 2011
@@ -1348,6 +1348,7 @@ public class HRegionServer implements HR
   public void postOpenDeployTasks(final HRegion r, final CatalogTracker ct,
       final boolean daughter)
   throws KeeperException, IOException {
+    LOG.info("HRS.PostOpenDeployTasks");
     // Do checks to see if we need to compact (references or too many files)
     for (Store s : r.getStores().values()) {
       if (s.hasReferences() || s.needsCompaction()) {
@@ -1357,24 +1358,36 @@ public class HRegionServer implements HR
 
     // Add to online regions if all above was successful.
     addToOnlineRegions(r);
-
+    LOG.info("addToOnlineRegions is done" + r.getRegionInfo());
     // Update ZK, ROOT or META
     if (r.getRegionInfo().isRootRegion()) {
+
+      LOG.info("setRootLocation");
       RootLocationEditor.setRootLocation(getZooKeeper(),
        this.serverNameFromMasterPOV);
     } else if (r.getRegionInfo().isMetaRegion()) {
+      LOG.info("updateMetaLocation");
+
       MetaEditor.updateMetaLocation(ct, r.getRegionInfo(),
         this.serverNameFromMasterPOV);
     } else {
+      LOG.info("updateMetaLocation 111");
+
       if (daughter) {
+        LOG.info("updateMetaLocation 22");
+
         // If daughter of a split, update whole row, not just location.
         MetaEditor.addDaughter(ct, r.getRegionInfo(),
           this.serverNameFromMasterPOV);
       } else {
+        LOG.info("updateMetaLocation 33");
+
         MetaEditor.updateRegionLocation(ct, r.getRegionInfo(),
           this.serverNameFromMasterPOV);
       }
     }
+    LOG.info("END HRS.PostOpenDeployTasks");
+
   }
 
   /**

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java?rev=1133209&r1=1133208&r2=1133209&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java Wed Jun  8 00:03:54 2011
@@ -22,10 +22,7 @@ package org.apache.hadoop.hbase.regionse
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.RemoteExceptionHandler;
-import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.*;
 import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
 import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
@@ -166,6 +163,12 @@ class LogRoller extends Thread implement
   }
 
   @Override
+  public void visitLogEntryBeforeWrite(HTableDescriptor htd, HLogKey logKey,
+                                       WALEdit logEdit) {
+    //Not interested
+  }
+
+  @Override
   public void logCloseRequested() {
     // not interested
   }

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java?rev=1133209&r1=1133208&r2=1133209&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java Wed Jun  8 00:03:54 2011
@@ -170,9 +170,9 @@ public class SplitTransaction {
       return false;
     }
     long rid = getDaughterRegionIdTimestamp(hri);
-    this.hri_a = new HRegionInfo(hri.getTableDesc(), startKey, this.splitrow,
+    this.hri_a = new HRegionInfo(hri.getTableName(), startKey, this.splitrow,
       false, rid);
-    this.hri_b = new HRegionInfo(hri.getTableDesc(), this.splitrow, endKey,
+    this.hri_b = new HRegionInfo(hri.getTableName(), this.splitrow, endKey,
       false, rid);
     return true;
   }

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java?rev=1133209&r1=1133208&r2=1133209&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java Wed Jun  8 00:03:54 2011
@@ -39,11 +39,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.RemoteExceptionHandler;
+import org.apache.hadoop.hbase.*;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.io.hfile.BlockCache;
@@ -195,9 +191,15 @@ public class Store implements HeapSize {
 
     // Check if this is in-memory store
     this.inMemory = family.isInMemory();
+    long maxFileSize = 0L;
+    HTableDescriptor hTableDescriptor = region.getTableDesc();
+    if (hTableDescriptor != null) {
+      maxFileSize = hTableDescriptor.getMaxFileSize();
+    } else {
+      maxFileSize = HConstants.DEFAULT_MAX_FILE_SIZE;
+    }
 
     // By default we split region if a file > HConstants.DEFAULT_MAX_FILE_SIZE.
-    long maxFileSize = info.getTableDesc().getMaxFileSize();
     if (maxFileSize == HConstants.DEFAULT_MAX_FILE_SIZE) {
       maxFileSize = conf.getLong("hbase.hregion.max.filesize",
         HConstants.DEFAULT_MAX_FILE_SIZE);

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java?rev=1133209&r1=1133208&r2=1133209&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java Wed Jun  8 00:03:54 2011
@@ -24,6 +24,7 @@ import java.util.concurrent.atomic.Atomi
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.executor.EventHandler;
@@ -71,9 +72,7 @@ public class OpenRegionHandler extends E
   public void process() throws IOException {
     try {
       final String name = regionInfo.getRegionNameAsString();
-      LOG.debug("Processing open of " + name);
       if (this.server.isStopped() || this.rsServices.isStopping()) {
-        LOG.info("Server stopping or stopped, skipping open of " + name);
         return;
       }
       final String encodedName = regionInfo.getEncodedName();
@@ -182,6 +181,7 @@ public class OpenRegionHandler extends E
         Thread.currentThread().interrupt();
       }
     }
+
     // Was there an exception opening the region?  This should trigger on
     // InterruptedException too.  If so, we failed.
     return !t.interrupted() && t.getException() == null;
@@ -264,6 +264,33 @@ public class OpenRegionHandler extends E
   /**
    * @return Instance of HRegion if successful open else null.
    */
+  HRegion openRegion(Path tableDir) {
+    HRegion region = null;
+    try {
+      // Instantiate the region.  This also periodically tickles our zk OPENING
+      // state so master doesn't timeout this region in transition.
+      region = HRegion.openHRegion(tableDir, this.regionInfo, this.rsServices.getWAL(),
+        this.server.getConfiguration(), this.rsServices,
+        new CancelableProgressable() {
+          public boolean progress() {
+            // We may lose the znode ownership during the open.  Currently its
+            // too hard interrupting ongoing region open.  Just let it complete
+            // and check we still have the znode after region open.
+            return tickleOpening("open_region_progress");
+          }
+        });
+    } catch (IOException e) {
+      // We failed open.  Let our znode expire in regions-in-transition and
+      // Master will assign elsewhere.  Presumes nothing to close.
+      LOG.error("Failed open of region=" +
+        this.regionInfo.getRegionNameAsString(), e);
+    }
+    return region;
+  }
+
+  /**
+   * @return Instance of HRegion if successful open else null.
+   */
   HRegion openRegion() {
     HRegion region = null;
     try {

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java?rev=1133209&r1=1133208&r2=1133209&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java Wed Jun  8 00:03:54 2011
@@ -53,11 +53,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.fs.Syncable;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.*;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ClassSize;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -816,22 +812,6 @@ public class HLog implements Syncable {
     }
   }
 
-   /** Append an entry to the log.
-   *
-   * @param regionInfo
-   * @param logEdit
-   * @param now Time of this edit write.
-   * @throws IOException
-   */
-  public void append(HRegionInfo regionInfo, WALEdit logEdit,
-    final long now,
-    final boolean isMetaRegion)
-  throws IOException {
-    byte [] regionName = regionInfo.getEncodedNameAsBytes();
-    byte [] tableName = regionInfo.getTableDesc().getName();
-    this.append(regionInfo, makeKey(regionName, tableName, -1, now), logEdit);
-  }
-
   /**
    * @param now
    * @param regionName
@@ -851,7 +831,8 @@ public class HLog implements Syncable {
    * @param logKey
    * @throws IOException
    */
-  public void append(HRegionInfo regionInfo, HLogKey logKey, WALEdit logEdit)
+  public void append(HRegionInfo regionInfo, HLogKey logKey, WALEdit logEdit,
+                     HTableDescriptor htd)
   throws IOException {
     if (this.closed) {
       throw new IOException("Cannot append; log is closed");
@@ -866,14 +847,14 @@ public class HLog implements Syncable {
       // is greater than or equal to the value in lastSeqWritten.
       this.lastSeqWritten.putIfAbsent(regionInfo.getEncodedNameAsBytes(),
         Long.valueOf(seqNum));
-      doWrite(regionInfo, logKey, logEdit);
+      doWrite(regionInfo, logKey, logEdit, htd);
       this.numEntries.incrementAndGet();
     }
 
     // Sync if catalog region, and if not then check if that table supports
     // deferred log flushing
     if (regionInfo.isMetaRegion() ||
-        !regionInfo.getTableDesc().isDeferredLogFlush()) {
+        !htd.isDeferredLogFlush()) {
       // sync txn to file system
       this.sync();
     }
@@ -903,7 +884,7 @@ public class HLog implements Syncable {
    * @throws IOException
    */
   public void append(HRegionInfo info, byte [] tableName, WALEdit edits,
-    final long now)
+    final long now, HTableDescriptor htd)
   throws IOException {
     if (edits.isEmpty()) return;
     if (this.closed) {
@@ -921,13 +902,13 @@ public class HLog implements Syncable {
       byte [] hriKey = info.getEncodedNameAsBytes();
       this.lastSeqWritten.putIfAbsent(hriKey, seqNum);
       HLogKey logKey = makeKey(hriKey, tableName, seqNum, now);
-      doWrite(info, logKey, edits);
+      doWrite(info, logKey, edits, htd);
       this.numEntries.incrementAndGet();
     }
     // Sync if catalog region, and if not then check if that table supports
     // deferred log flushing
     if (info.isMetaRegion() ||
-        !info.getTableDesc().isDeferredLogFlush()) {
+        !htd.isDeferredLogFlush()) {
       // sync txn to file system
       this.sync();
     }
@@ -1054,14 +1035,15 @@ public class HLog implements Syncable {
     }
   }
 
-  protected void doWrite(HRegionInfo info, HLogKey logKey, WALEdit logEdit)
+  protected void doWrite(HRegionInfo info, HLogKey logKey, WALEdit logEdit,
+                           HTableDescriptor htd)
   throws IOException {
     if (!this.enabled) {
       return;
     }
     if (!this.listeners.isEmpty()) {
       for (WALObserver i: this.listeners) {
-        i.visitLogEntryBeforeWrite(info, logKey, logEdit);
+        i.visitLogEntryBeforeWrite(htd, logKey, logEdit);
       }
     }
     try {
@@ -1077,12 +1059,12 @@ public class HLog implements Syncable {
       writeOps++;
       if (took > 1000) {
         long len = 0;
-        for(KeyValue kv : logEdit.getKeyValues()) { 
-          len += kv.getLength(); 
+        for(KeyValue kv : logEdit.getKeyValues()) {
+          len += kv.getLength();
         }
         LOG.warn(String.format(
           "%s took %d ms appending an edit to hlog; editcount=%d, len~=%s",
-          Thread.currentThread().getName(), took, this.numEntries.get(), 
+          Thread.currentThread().getName(), took, this.numEntries.get(),
           StringUtils.humanReadableInt(len)));
       }
     } catch (IOException e) {
@@ -1092,6 +1074,7 @@ public class HLog implements Syncable {
     }
   }
 
+
   /** @return How many items have been added to the log */
   int getNumEntries() {
     return numEntries.get();

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALObserver.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALObserver.java?rev=1133209&r1=1133208&r2=1133209&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALObserver.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALObserver.java Wed Jun  8 00:03:54 2011
@@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.regionse
 
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
 
 /**
  * Get notification of {@link HLog}/WAL log events. The invocations are inline
@@ -51,4 +52,14 @@ public interface WALObserver {
   */
  public void visitLogEntryBeforeWrite(HRegionInfo info, HLogKey logKey,
    WALEdit logEdit);
+
+  /**
+   *
+   * @param htd
+   * @param logKey
+   * @param logEdit
+   */
+ public void visitLogEntryBeforeWrite(HTableDescriptor htd, HLogKey logKey,
+   WALEdit logEdit);
+
 }

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java?rev=1133209&r1=1133208&r2=1133209&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java Wed Jun  8 00:03:54 2011
@@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
@@ -132,12 +133,18 @@ public class Replication implements WALO
   @Override
   public void visitLogEntryBeforeWrite(HRegionInfo info, HLogKey logKey,
       WALEdit logEdit) {
+    // Not interested
+  }
+
+  @Override
+  public void visitLogEntryBeforeWrite(HTableDescriptor htd, HLogKey logKey,
+                                       WALEdit logEdit) {
     NavigableMap<byte[], Integer> scopes =
         new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
     byte[] family;
     for (KeyValue kv : logEdit.getKeyValues()) {
       family = kv.getFamily();
-      int scope = info.getTableDesc().getFamily(family).getScope();
+      int scope = htd.getFamily(family).getScope();
       if (scope != REPLICATION_SCOPE_LOCAL &&
           !scopes.containsKey(family)) {
         scopes.put(family, scope);

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java?rev=1133209&r1=1133208&r2=1133209&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java Wed Jun  8 00:03:54 2011
@@ -30,6 +30,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.RemoteExceptionHandler;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -830,4 +831,209 @@ public class FSUtils {
     }
     LOG.info("Finished lease recover attempt for " + p);
   }
+
+
+  public static Map<String, HTableDescriptor> getTableDescriptors(
+    final Configuration config)
+  throws IOException {
+    Path path = getRootDir(config);
+    // since HMaster.getFileSystem() is package private
+    FileSystem fs = path.getFileSystem(config);
+    return getTableDescriptors(fs, path);
+  }
+
+  public static Map<String, HTableDescriptor> getTableDescriptors(
+    final FileSystem fs, final Path hbaseRootDir)
+  throws IOException {
+    Map<String, HTableDescriptor> desc =
+        new HashMap<String, HTableDescriptor>();
+    DirFilter df = new DirFilter(fs);
+    // presumes any directory under hbase.rootdir is a table
+    FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, df);
+    for (FileStatus tableDir : tableDirs) {
+      Path d = tableDir.getPath();
+      String tableName = d.getName();
+
+      if (tableName.equals(HConstants.HREGION_LOGDIR_NAME)
+          || tableName.equals(Bytes.toString(HConstants.ROOT_TABLE_NAME))
+          || tableName.equals(Bytes.toString(HConstants.META_TABLE_NAME))
+          || tableName.equals(HConstants.HREGION_OLDLOGDIR_NAME)
+          ) {
+        continue;
+      }
+      LOG.info("Adding tabledescriptor for table = " + tableName);
+      HTableDescriptor htd = readTableDescriptor(fs, hbaseRootDir,
+          tableName);
+      if (htd != null) {
+        if (!desc.containsKey(tableName)) {
+          desc.put(tableName, htd);
+        }
+      }
+    }
+    return desc;
+  }
+
+  private static Path getTableInfoPath(Path hbaseRootDir, String tableName) {
+    Path tablePath = new Path(hbaseRootDir, tableName);
+    return new Path(tablePath, HConstants.TABLEINFO_NAME);
+  }
+
+  /**
+   * Get table info path for a table.
+   * @param tableName
+   * @return Table info path
+   */
+  private static Path getTableInfoPath(byte[] tableName, Configuration conf) throws IOException {
+    Path tablePath = new Path(getRootDir(conf), Bytes.toString(tableName));
+    Path tableInfoPath = new Path(tablePath, HConstants.TABLEINFO_NAME);
+    return tableInfoPath;
+  }
+
+  private static Path getTablePath(byte[] tableName, Configuration conf) throws IOException {
+    return new Path(getRootDir(conf), Bytes.toString(tableName));
+  }
+
+  private static FileSystem getCurrentFileSystem(Configuration conf) throws IOException {
+    return getRootDir(conf).getFileSystem(conf);
+  }
+
+  /**
+   * Get HTableDescriptor
+   * @param config
+   * @param tableName
+   * @return HTableDescriptor for table
+   * @throws IOException
+   */
+  public static HTableDescriptor getHTableDescriptor(Configuration config,
+                                              String tableName)
+      throws IOException {
+    Path path = getRootDir(config);
+    FileSystem fs = path.getFileSystem(config);
+    return readTableDescriptor(fs, path, tableName);
+  }
+
+  private static HTableDescriptor readTableDescriptor(FileSystem fs,
+                                              Path hbaseRootDir,
+                                              String tableName) {
+    try {
+      FSDataInputStream fsDataInputStream =
+          fs.open(getTableInfoPath(hbaseRootDir, tableName));
+      HTableDescriptor hTableDescriptor = new HTableDescriptor();
+      hTableDescriptor.readFields(fsDataInputStream);
+      fsDataInputStream.close();
+      return hTableDescriptor;
+    } catch (IOException ioe) {
+      LOG.info("Exception during readTableDecriptor. Current table name = " + tableName , ioe);
+    }
+    return null;
+  }
+
+  public static HTableDescriptor getTableDescriptor(Path tableDir, FileSystem fs) {
+    try {
+      LOG.info("Reading table descriptor from .tableinfo. current path = "
+          + tableDir);
+      if (tableDir == null) {
+        LOG.info("Reading table descriptor from .tableinfo current tablename is NULL ");
+        return null;
+      }
+
+      FSDataInputStream fsDataInputStream =
+          fs.open(new Path(tableDir, HConstants.TABLEINFO_NAME));
+      HTableDescriptor hTableDescriptor = new HTableDescriptor();
+      hTableDescriptor.readFields(fsDataInputStream);
+      LOG.info("Current tabledescriptor from .tableinfo is " + hTableDescriptor.toString());
+      fsDataInputStream.close();
+      return hTableDescriptor;
+    } catch (IOException ioe) {
+      LOG.info("Exception during getTableDescriptor ", ioe);
+    }
+    return null;
+  }
+
+    /**
+   * Create new HTableDescriptor in HDFS.
+   * @param htableDescriptor
+   */
+  public static void createTableDescriptor(HTableDescriptor htableDescriptor,
+                                           Configuration conf) {
+    try {
+      Path tableDir = getTablePath(htableDescriptor.getName(), conf);
+      FileSystem fs = getCurrentFileSystem(conf);
+      createTableDescriptor(fs, htableDescriptor, tableDir);
+    } catch(IOException ioe) {
+      LOG.info("IOException while trying to create tableInfo in HDFS", ioe);
+    }
+  }
+
+  public static void createTableDescriptor(FileSystem fs,
+                                           HTableDescriptor htableDescriptor,
+                                           Path tableDir) {
+    try {
+      Path tableInfoPath = new Path(tableDir, HConstants.TABLEINFO_NAME);
+      LOG.info("Current tableInfoPath = " + tableInfoPath
+          + " tableDir = " + tableDir) ;
+      if (fs.exists(tableInfoPath) &&
+          fs.getFileStatus(tableInfoPath).getLen() > 0) {
+        LOG.info("TableInfo already exists.. Skipping creation");
+        return;
+      }
+      writeTableDescriptor(fs, htableDescriptor, tableDir);
+    } catch(IOException ioe) {
+      LOG.info("IOException while trying to create tableInfo in HDFS", ioe);
+    }
+  }
+
+  private static void writeTableDescriptor(FileSystem fs,
+                                           HTableDescriptor hTableDescriptor,
+                                           Path tableDir) throws IOException {
+    // Create in tmpdir and then move into place in case we crash after
+    // create but before close.  If we don't successfully close the file,
+    // subsequent region reopens will fail the below because create is
+    // registered in NN.
+    Path tableInfoPath = new Path(tableDir, HConstants.TABLEINFO_NAME);
+    Path tmpPath = new Path(new Path(tableDir,".tmp"),
+        HConstants.TABLEINFO_NAME);
+    LOG.info("TableInfoPath = " + tableInfoPath + " tmpPath = " + tmpPath);
+    FSDataOutputStream out = fs.create(tmpPath, true);
+    try {
+      hTableDescriptor.write(out);
+      out.write('\n');
+      out.write('\n');
+      out.write(Bytes.toBytes(hTableDescriptor.toString()));
+    } finally {
+      out.close();
+    }
+    if (!fs.rename(tmpPath, tableInfoPath)) {
+      throw new IOException("Unable to rename " + tmpPath + " to " +
+        tableInfoPath);
+    } else {
+      LOG.info("TableDescriptor stored. TableInfoPath = " + tableInfoPath);
+    }
+  }
+
+
+  public static void updateHTableDescriptor(FileSystem fs,
+                                            Configuration conf,
+                                            HTableDescriptor hTableDescriptor) throws IOException
+  {
+    Path tableInfoPath = getTableInfoPath(hTableDescriptor.getName(), conf);
+    FSDataOutputStream out = fs.create(tableInfoPath, true);
+    try {
+      hTableDescriptor.write(out);
+      out.write('\n');
+      out.write('\n');
+      out.write(Bytes.toBytes(hTableDescriptor.toString()));
+      LOG.info("updateHTableDescriptor. Updated tableinfo in HDFS under "
+        + tableInfoPath + " For HTD => "
+        + hTableDescriptor.toString());
+    } finally {
+      out.close();
+    }
+  }
+
+  private static Path getTmpDir(HTableDescriptor htableDescriptor, Configuration configuration)
+      throws IOException {
+    return new Path(getTablePath(htableDescriptor.getName(), configuration), ".tmp");
+  }
+
 }

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java?rev=1133209&r1=1133208&r2=1133209&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java Wed Jun  8 00:03:54 2011
@@ -156,19 +156,24 @@ public class HBaseFsck {
     // get a list of all tables that have not changed recently.
     AtomicInteger numSkipped = new AtomicInteger(0);
     HTableDescriptor[] allTables = getTables(numSkipped);
-    errors.print("Number of Tables: " + allTables.length);
+    errors.print("Number of Tables: " +
+        (allTables == null ? 0 : allTables.length));
     if (details) {
       if (numSkipped.get() > 0) {
         errors.detail("Number of Tables in flux: " + numSkipped.get());
       }
-      for (HTableDescriptor td : allTables) {
+      if (allTables != null && allTables.length > 0) {
+        for (HTableDescriptor td : allTables) {
         String tableName = td.getNameAsString();
         errors.detail("  Table: " + tableName + "\t" +
                            (td.isReadOnly() ? "ro" : "rw") + "\t" +
                            (td.isRootRegion() ? "ROOT" :
                             (td.isMetaRegion() ? "META" : "    ")) + "\t" +
                            " families: " + td.getFamilies().size());
+        }
+
       }
+
     }
 
     // From the master, get a list of all known live region servers
@@ -255,7 +260,7 @@ public class HBaseFsck {
    * @throws KeeperException
    */
   private boolean isTableDisabled(HRegionInfo regionInfo) {
-    return disabledTables.contains(regionInfo.getTableDesc().getName());
+    return disabledTables.contains(regionInfo.getTableName());
   }
 
   /**
@@ -521,7 +526,7 @@ public class HBaseFsck {
       if (hbi.deployedOn.size() == 0) continue;
 
       // We should be safe here
-      String tableName = hbi.metaEntry.getTableDesc().getNameAsString();
+      String tableName = hbi.metaEntry.getTableNameAsString();
       TInfo modTInfo = tablesInfo.get(tableName);
       if (modTInfo == null) {
         modTInfo = new TInfo(tableName);
@@ -652,8 +657,8 @@ public class HBaseFsck {
    * @return tables that have not been modified recently
    * @throws IOException if an error is encountered
    */
-  HTableDescriptor[] getTables(AtomicInteger numSkipped) {
-    TreeSet<HTableDescriptor> uniqueTables = new TreeSet<HTableDescriptor>();
+   HTableDescriptor[] getTables(AtomicInteger numSkipped) {
+    List<String> tableNames = new ArrayList<String>();
     long now = System.currentTimeMillis();
 
     for (HbckInfo hbi : regionInfo.values()) {
@@ -663,15 +668,27 @@ public class HBaseFsck {
       // pick only those tables that were not modified in the last few milliseconds.
       if (info != null && info.getStartKey().length == 0 && !info.isMetaRegion()) {
         if (info.modTime + timelag < now) {
-          uniqueTables.add(info.getTableDesc());
+          tableNames.add(info.getTableNameAsString());
         } else {
           numSkipped.incrementAndGet(); // one more in-flux table
         }
       }
     }
-    return uniqueTables.toArray(new HTableDescriptor[uniqueTables.size()]);
+    return getHTableDescriptors(tableNames);
   }
 
+   HTableDescriptor[] getHTableDescriptors(List<String> tableNames) {
+    HTableDescriptor[] htd = null;
+     try {
+       LOG.info("getHTableDescriptors == tableNames => " + tableNames);
+       htd = new HBaseAdmin(conf).getTableDescriptors(tableNames);
+     } catch (IOException e) {
+       LOG.debug("Exception getting table descriptors", e);
+     }
+     return htd;
+  }
+
+
   /**
    * Gets the entry in regionInfo corresponding to the the given encoded
    * region name. If the region has not been seen yet, a new entry is added

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/HMerge.java?rev=1133209&r1=1133208&r2=1133209&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/HMerge.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/HMerge.java Wed Jun  8 00:03:54 2011
@@ -261,7 +261,7 @@ class HMerge {
               Bytes.toString(HConstants.REGIONINFO_QUALIFIER));
         }
         HRegionInfo region = Writables.getHRegionInfo(regionInfoValue);
-        if (!Bytes.equals(region.getTableDesc().getName(), this.tableName)) {
+        if (!Bytes.equals(region.getTableName(), this.tableName)) {
           return null;
         }
         return region;

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java?rev=1133209&r1=1133208&r2=1133209&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java Wed Jun  8 00:03:54 2011
@@ -322,78 +322,6 @@ public class MetaUtils {
   }
 
   /**
-   * Offline version of the online TableOperation,
-   * org.apache.hadoop.hbase.master.AddColumn.
-   * @param tableName table name
-   * @param hcd Add this column to <code>tableName</code>
-   * @throws IOException e
-   */
-  public void addColumn(final byte [] tableName,
-      final HColumnDescriptor hcd)
-  throws IOException {
-    List<HRegionInfo> metas = getMETARows(tableName);
-    for (HRegionInfo hri: metas) {
-      final HRegion m = getMetaRegion(hri);
-      scanMetaRegion(m, new ScannerListener() {
-        private boolean inTable = true;
-
-        @SuppressWarnings("synthetic-access")
-        public boolean processRow(HRegionInfo info) throws IOException {
-          LOG.debug("Testing " + Bytes.toString(tableName) + " against " +
-            Bytes.toString(info.getTableDesc().getName()));
-          if (Bytes.equals(info.getTableDesc().getName(), tableName)) {
-            this.inTable = false;
-            info.getTableDesc().addFamily(hcd);
-            updateMETARegionInfo(m, info);
-            return true;
-          }
-          // If we got here and we have not yet encountered the table yet,
-          // inTable will be false.  Otherwise, we've passed out the table.
-          // Stop the scanner.
-          return this.inTable;
-        }});
-    }
-  }
-
-  /**
-   * Offline version of the online TableOperation,
-   * org.apache.hadoop.hbase.master.DeleteColumn.
-   * @param tableName table name
-   * @param columnFamily Name of column name to remove.
-   * @throws IOException e
-   */
-  public void deleteColumn(final byte [] tableName,
-      final byte [] columnFamily) throws IOException {
-    List<HRegionInfo> metas = getMETARows(tableName);
-    for (HRegionInfo hri: metas) {
-      final HRegion m = getMetaRegion(hri);
-      scanMetaRegion(m, new ScannerListener() {
-        private boolean inTable = true;
-
-        @SuppressWarnings("synthetic-access")
-        public boolean processRow(HRegionInfo info) throws IOException {
-          if (Bytes.equals(info.getTableDesc().getName(), tableName)) {
-            this.inTable = false;
-            info.getTableDesc().removeFamily(columnFamily);
-            updateMETARegionInfo(m, info);
-            Path tabledir = new Path(rootdir,
-              info.getTableDesc().getNameAsString());
-            Path p = Store.getStoreHomedir(tabledir, info.getEncodedName(),
-              columnFamily);
-            if (!fs.delete(p, true)) {
-              LOG.warn("Failed delete of " + p);
-            }
-            return false;
-          }
-          // If we got here and we have not yet encountered the table yet,
-          // inTable will be false.  Otherwise, we've passed out the table.
-          // Stop the scanner.
-          return this.inTable;
-        }});
-    }
-  }
-
-  /**
    * Update COL_REGIONINFO in meta region r with HRegionInfo hri
    *
    * @param r region
@@ -466,7 +394,7 @@ public class MetaUtils {
 
       public boolean processRow(HRegionInfo info) throws IOException {
         SL_LOG.debug("Testing " + info);
-        if (Bytes.equals(info.getTableDesc().getName(),
+        if (Bytes.equals(info.getTableName(),
             HConstants.META_TABLE_NAME)) {
           result.add(info);
           return false;

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java?rev=1133209&r1=1133208&r2=1133209&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java Wed Jun  8 00:03:54 2011
@@ -560,8 +560,9 @@ public class RegionSplitter {
           if (sk.length == 0)
             sk = splitAlgo.firstRow();
           String startKey = splitAlgo.rowToStr(sk);
+          HTableDescriptor htd = table.getTableDescriptor();
           // check every Column Family for that region
-          for (HColumnDescriptor c : hri.getTableDesc().getFamilies()) {
+          for (HColumnDescriptor c : htd.getFamilies()) {
             Path cfDir = Store.getStoreHomedir(tableDir, hri.getEncodedName(),
                 c.getName());
             if (fs.exists(cfDir)) {

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/Writables.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/Writables.java?rev=1133209&r1=1133208&r2=1133209&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/Writables.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/util/Writables.java Wed Jun  8 00:03:54 2011
@@ -20,6 +20,7 @@
 package org.apache.hadoop.hbase.util;
 
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.migration.HRegionInfo090x;
 import org.apache.hadoop.io.DataInputBuffer;
 import org.apache.hadoop.io.Writable;
 
@@ -62,7 +63,7 @@ public class Writables {
 
   /**
    * Put a bunch of Writables as bytes all into the one byte array.
-   * @param w writable
+   * @param ws writable
    * @return The bytes of <code>w</code> gotten by running its
    * {@link Writable#write(java.io.DataOutput)} method.
    * @throws IOException e
@@ -215,4 +216,16 @@ public class Writables {
     }
     return tgt;
   }
+
+  /**
+   * Get HREgionInfoForMigration serialized from bytes.
+   * @param bytes serialized bytes
+   * @return HRegionInfoForMigration
+   * @throws IOException
+   */
+  public static HRegionInfo090x getHRegionInfoForMigration(final byte [] bytes)
+  throws IOException {
+    return (HRegionInfo090x)getWritable(bytes, new HRegionInfo090x());
+  }
+
 }
\ No newline at end of file

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java?rev=1133209&r1=1133208&r2=1133209&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java Wed Jun  8 00:03:54 2011
@@ -57,9 +57,15 @@ public abstract class HBaseTestCase exte
   /** configuration parameter name for test directory */
   public static final String TEST_DIRECTORY_KEY = "test.build.data";
 
+/*
   protected final static byte [] fam1 = Bytes.toBytes("colfamily1");
   protected final static byte [] fam2 = Bytes.toBytes("colfamily2");
   protected final static byte [] fam3 = Bytes.toBytes("colfamily3");
+*/
+  protected final static byte [] fam1 = Bytes.toBytes("colfamily11");
+  protected final static byte [] fam2 = Bytes.toBytes("colfamily21");
+  protected final static byte [] fam3 = Bytes.toBytes("colfamily31");
+
   protected static final byte [][] COLUMNS = {fam1, fam2, fam3};
 
   private boolean localfs = false;
@@ -159,9 +165,8 @@ public abstract class HBaseTestCase exte
     Path rootdir = filesystem.makeQualified(
         new Path(conf.get(HConstants.HBASE_DIR)));
     filesystem.mkdirs(rootdir);
-
-    return HRegion.createHRegion(new HRegionInfo(desc, startKey, endKey),
-        rootdir, conf);
+    HRegionInfo hri = new HRegionInfo(desc.getName(), startKey, endKey);
+    return HRegion.createHRegion(hri, rootdir, conf, desc);
   }
 
   protected HRegion openClosedRegion(final HRegion closedRegion)
@@ -653,9 +658,10 @@ public abstract class HBaseTestCase exte
   }
 
   protected void createRootAndMetaRegions() throws IOException {
-    root = HRegion.createHRegion(HRegionInfo.ROOT_REGIONINFO, testDir, conf);
+    root = HRegion.createHRegion(HRegionInfo.ROOT_REGIONINFO, testDir,
+        conf, HTableDescriptor.ROOT_TABLEDESC);
     meta = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, testDir,
-        conf);
+        conf, HTableDescriptor.META_TABLEDESC);
     HRegion.addRegionToMETA(root, meta);
   }
 

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java?rev=1133209&r1=1133208&r2=1133209&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java Wed Jun  8 00:03:54 2011
@@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.client.Re
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.migration.HRegionInfo090x;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
@@ -830,7 +831,7 @@ public class HBaseTestingUtility {
     int count = 0;
     for (int i = 0; i < startKeys.length; i++) {
       int j = (i + 1) % startKeys.length;
-      HRegionInfo hri = new HRegionInfo(table.getTableDescriptor(),
+      HRegionInfo hri = new HRegionInfo(table.getTableName(),
         startKeys[i], startKeys[j]);
       Put put = new Put(hri.getRegionName());
       put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
@@ -858,6 +859,65 @@ public class HBaseTestingUtility {
     return count;
   }
 
+  public int createMultiRegionsWithLegacyHRI(final Configuration c,
+                                             final HTableDescriptor htd,
+      final byte[] columnFamily, byte [][] startKeys)
+  throws IOException {
+    Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
+    HTable meta = new HTable(c, HConstants.META_TABLE_NAME);
+    if(!htd.hasFamily(columnFamily)) {
+      HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
+      htd.addFamily(hcd);
+    }
+    List<HRegionInfo090x> newRegions
+        = new ArrayList<HRegionInfo090x>(startKeys.length);
+    int count = 0;
+    for (int i = 0; i < startKeys.length; i++) {
+      int j = (i + 1) % startKeys.length;
+      HRegionInfo090x hri = new HRegionInfo090x(htd,
+        startKeys[i], startKeys[j]);
+      Put put = new Put(hri.getRegionName());
+      put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
+        Writables.getBytes(hri));
+      meta.put(put);
+      LOG.info("createMultiRegions: PUT inserted " + hri.toString());
+
+      newRegions.add(hri);
+      count++;
+    }
+    return count;
+
+  }
+
+  public int createMultiRegionsWithNewHRI(final Configuration c, final HTableDescriptor htd,
+      final byte[] columnFamily, byte [][] startKeys)
+  throws IOException {
+    Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
+    HTable meta = new HTable(c, HConstants.META_TABLE_NAME);
+    if(!htd.hasFamily(columnFamily)) {
+      HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
+      htd.addFamily(hcd);
+    }
+    List<HRegionInfo> newRegions
+        = new ArrayList<HRegionInfo>(startKeys.length);
+    int count = 0;
+    for (int i = 0; i < startKeys.length; i++) {
+      int j = (i + 1) % startKeys.length;
+      HRegionInfo hri = new HRegionInfo(htd.getName(),
+        startKeys[i], startKeys[j]);
+      Put put = new Put(hri.getRegionName());
+      put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
+        Writables.getBytes(hri));
+      meta.put(put);
+      LOG.info("createMultiRegions: PUT inserted " + hri.toString());
+
+      newRegions.add(hri);
+      count++;
+    }
+    return count;
+
+  }
+
   /**
    * Create rows in META for regions of the specified table with the specified
    * start keys.  The first startKey should be a 0 length byte array if you
@@ -878,7 +938,8 @@ public class HBaseTestingUtility {
     int count = 0;
     for (int i = 0; i < startKeys.length; i++) {
       int j = (i + 1) % startKeys.length;
-      HRegionInfo hri = new HRegionInfo(htd, startKeys[i], startKeys[j]);
+      HRegionInfo hri = new HRegionInfo(htd.getName(), startKeys[i],
+          startKeys[j]);
       Put put = new Put(hri.getRegionName());
       put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
         Writables.getBytes(hri));
@@ -922,8 +983,7 @@ public class HBaseTestingUtility {
     for (Result result : s) {
       HRegionInfo info = Writables.getHRegionInfo(
           result.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER));
-      HTableDescriptor desc = info.getTableDesc();
-      if (Bytes.compareTo(desc.getName(), tableName) == 0) {
+      if (Bytes.compareTo(info.getTableName(), tableName) == 0) {
         LOG.info("getMetaTableRows: row -> " +
             Bytes.toStringBinary(result.getRow()));
         rows.add(result.getRow());

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/TestCompare.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/TestCompare.java?rev=1133209&r1=1133208&r2=1133209&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/TestCompare.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/TestCompare.java Wed Jun  8 00:03:54 2011
@@ -31,25 +31,25 @@ public class TestCompare extends TestCas
    * Sort of HRegionInfo.
    */
   public void testHRegionInfo() {
-    HRegionInfo a = new HRegionInfo(new HTableDescriptor("a"), null, null);
-    HRegionInfo b = new HRegionInfo(new HTableDescriptor("b"), null, null);
+    HRegionInfo a = new HRegionInfo(Bytes.toBytes("a"), null, null);
+    HRegionInfo b = new HRegionInfo(Bytes.toBytes("b"), null, null);
     assertTrue(a.compareTo(b) != 0);
     HTableDescriptor t = new HTableDescriptor("t");
     byte [] midway = Bytes.toBytes("midway");
-    a = new HRegionInfo(t, null, midway);
-    b = new HRegionInfo(t, midway, null);
+    a = new HRegionInfo(t.getName(), null, midway);
+    b = new HRegionInfo(t.getName(), midway, null);
     assertTrue(a.compareTo(b) < 0);
     assertTrue(b.compareTo(a) > 0);
     assertEquals(a, a);
     assertTrue(a.compareTo(a) == 0);
-    a = new HRegionInfo(t, Bytes.toBytes("a"), Bytes.toBytes("d"));
-    b = new HRegionInfo(t, Bytes.toBytes("e"), Bytes.toBytes("g"));
+    a = new HRegionInfo(t.getName(), Bytes.toBytes("a"), Bytes.toBytes("d"));
+    b = new HRegionInfo(t.getName(), Bytes.toBytes("e"), Bytes.toBytes("g"));
     assertTrue(a.compareTo(b) < 0);
-    a = new HRegionInfo(t, Bytes.toBytes("aaaa"), Bytes.toBytes("dddd"));
-    b = new HRegionInfo(t, Bytes.toBytes("e"), Bytes.toBytes("g"));
+    a = new HRegionInfo(t.getName(), Bytes.toBytes("aaaa"), Bytes.toBytes("dddd"));
+    b = new HRegionInfo(t.getName(), Bytes.toBytes("e"), Bytes.toBytes("g"));
     assertTrue(a.compareTo(b) < 0);
-    a = new HRegionInfo(t, Bytes.toBytes("aaaa"), Bytes.toBytes("dddd"));
-    b = new HRegionInfo(t, Bytes.toBytes("aaaa"), Bytes.toBytes("eeee"));
+    a = new HRegionInfo(t.getName(), Bytes.toBytes("aaaa"), Bytes.toBytes("dddd"));
+    b = new HRegionInfo(t.getName(), Bytes.toBytes("aaaa"), Bytes.toBytes("eeee"));
     assertTrue(a.compareTo(b) < 0);
   }
 }
\ No newline at end of file

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/TestScanMultipleVersions.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/TestScanMultipleVersions.java?rev=1133209&r1=1133208&r2=1133209&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/TestScanMultipleVersions.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/TestScanMultipleVersions.java Wed Jun  8 00:03:54 2011
@@ -57,10 +57,10 @@ public class TestScanMultipleVersions ex
     this.desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
 
     // Region 0 will contain the key range [,row_0500)
-    INFOS[0] = new HRegionInfo(this.desc, HConstants.EMPTY_START_ROW,
+    INFOS[0] = new HRegionInfo(desc.getName(), HConstants.EMPTY_START_ROW,
         Bytes.toBytes("row_0500"));
     // Region 1 will contain the key range [row_0500,)
-    INFOS[1] = new HRegionInfo(this.desc, Bytes.toBytes("row_0500"),
+    INFOS[1] = new HRegionInfo(desc.getName(), Bytes.toBytes("row_0500"),
         HConstants.EMPTY_END_ROW);
 
     // Create root and meta regions
@@ -68,7 +68,8 @@ public class TestScanMultipleVersions ex
     // Create the regions
     for (int i = 0; i < REGIONS.length; i++) {
       REGIONS[i] =
-        HRegion.createHRegion(this.INFOS[i], this.testDir, this.conf);
+        HRegion.createHRegion(this.INFOS[i], this.testDir, this.conf,
+            this.desc);
       // Insert data
       for (int j = 0; j < TIMESTAMPS.length; j++) {
         Put put = new Put(ROWS[i], TIMESTAMPS[j], null);

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/TestSerialization.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/TestSerialization.java?rev=1133209&r1=1133208&r2=1133209&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/TestSerialization.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/TestSerialization.java Wed Jun  8 00:03:54 2011
@@ -103,8 +103,8 @@ public class TestSerialization {
     HRegionInfo deserializedHri =
       (HRegionInfo)Writables.getWritable(hrib, new HRegionInfo());
     assertEquals(hri.getEncodedName(), deserializedHri.getEncodedName());
-    assertEquals(hri.getTableDesc().getFamilies().size(),
-      deserializedHri.getTableDesc().getFamilies().size());
+    //assertEquals(hri.getTableDesc().getFamilies().size(),
+    //  deserializedHri.getTableDesc().getFamilies().size());
   }
 
   @Test public void testRegionInfos() throws Exception {
@@ -126,7 +126,7 @@ public class TestSerialization {
     for (int i = 0; i < families.length; i++) {
       htd.addFamily(new HColumnDescriptor(families[i]));
     }
-    return new HRegionInfo(htd, HConstants.EMPTY_START_ROW,
+    return new HRegionInfo(htd.getName(), HConstants.EMPTY_START_ROW,
       HConstants.EMPTY_END_ROW);
   }
 

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/TimestampTestBase.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/TimestampTestBase.java?rev=1133209&r1=1133208&r2=1133209&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/TimestampTestBase.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/TimestampTestBase.java Wed Jun  8 00:03:54 2011
@@ -36,7 +36,7 @@ public class TimestampTestBase extends H
   private static final long T1 = 100L;
   private static final long T2 = 200L;
 
-  private static final byte [] FAMILY_NAME = Bytes.toBytes("colfamily1");
+  private static final byte [] FAMILY_NAME = Bytes.toBytes("colfamily11");
   private static final byte [] QUALIFIER_NAME = Bytes.toBytes("contents");
 
   private static final byte [] ROW = Bytes.toBytes("row");

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java?rev=1133209&r1=1133208&r2=1133209&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java Wed Jun  8 00:03:54 2011
@@ -181,7 +181,7 @@ public class TestFromClientSide {
      for (Map.Entry<HRegionInfo, HServerAddress> e: loadedRegions.entrySet()) {
        HRegionInfo hri = e.getKey();
        assertTrue(HConnectionManager.isRegionCached(conf,
-           hri.getTableDesc().getName(), hri.getStartKey()));
+           hri.getTableName(), hri.getStartKey()));
      }
 
      // delete the temp file

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java?rev=1133209&r1=1133208&r2=1133209&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java Wed Jun  8 00:03:54 2011
@@ -118,6 +118,7 @@ public class TestMultipleTimestamps {
 
   @Test
   public void testReseeksWithMultipleColumnOneTimestamp() throws IOException {
+    LOG.info("testReseeksWithMultipleColumnOneTimestamp");
     byte [] TABLE = Bytes.toBytes("testReseeksWithMultiple" +
     "ColumnOneTimestamps");
     byte [] FAMILY = Bytes.toBytes("event_log");
@@ -155,6 +156,8 @@ public class TestMultipleTimestamps {
   @Test
   public void testReseeksWithMultipleColumnMultipleTimestamp() throws
   IOException {
+    LOG.info("testReseeksWithMultipleColumnMultipleTimestamp");
+
     byte [] TABLE = Bytes.toBytes("testReseeksWithMultiple" +
     "ColumnMiltipleTimestamps");
     byte [] FAMILY = Bytes.toBytes("event_log");
@@ -197,6 +200,7 @@ public class TestMultipleTimestamps {
 
   @Test
   public void testReseeksWithMultipleFiles() throws IOException {
+    LOG.info("testReseeksWithMultipleFiles");
     byte [] TABLE = Bytes.toBytes("testReseeksWithMultipleFiles");
     byte [] FAMILY = Bytes.toBytes("event_log");
     byte [][] FAMILIES = new byte[][] { FAMILY };
@@ -262,8 +266,12 @@ public class TestMultipleTimestamps {
   }
 
   public void testWithVersionDeletes(boolean flushTables) throws IOException {
+    LOG.info("testWithVersionDeletes_"+
+        (flushTables ? "flush" : "noflush"));
+
     byte [] TABLE = Bytes.toBytes("testWithVersionDeletes_" +
         (flushTables ? "flush" : "noflush"));
+
     byte [] FAMILY = Bytes.toBytes("event_log");
     byte [][] FAMILIES = new byte[][] { FAMILY };
 
@@ -292,6 +300,8 @@ public class TestMultipleTimestamps {
 
   @Test
   public void testWithMultipleVersionDeletes() throws IOException {
+    LOG.info("testWithMultipleVersionDeletes");
+
     byte [] TABLE = Bytes.toBytes("testWithMultipleVersionDeletes");
     byte [] FAMILY = Bytes.toBytes("event_log");
     byte [][] FAMILIES = new byte[][] { FAMILY };

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java?rev=1133209&r1=1133208&r2=1133209&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java Wed Jun  8 00:03:54 2011
@@ -27,6 +27,7 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.catalog.MetaReader;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.AfterClass;
@@ -89,6 +90,7 @@ public class TestScannerTimeout {
    */
   @Test
   public void test2481() throws Exception {
+    LOG.info("START ************ test2481");
     Scan scan = new Scan();
     HTable table =
       new HTable(new Configuration(TEST_UTIL.getConfiguration()), TABLE_NAME);
@@ -109,6 +111,7 @@ public class TestScannerTimeout {
       return;
     }
     fail("We should be timing out");
+    LOG.info("END ************ test2481");
   }
 
   /**
@@ -118,6 +121,7 @@ public class TestScannerTimeout {
    */
   @Test
   public void test2772() throws Exception {
+    LOG.info("START************ test2772");
     HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TABLE_NAME);
     Scan scan = new Scan();
     // Set a very high timeout, we want to test what happens when a RS
@@ -134,6 +138,8 @@ public class TestScannerTimeout {
     Result[] results = r.next(NB_ROWS);
     assertEquals(NB_ROWS, results.length);
     r.close();
+    LOG.info("END ************ test2772");
+
   }
   
   /**
@@ -143,14 +149,24 @@ public class TestScannerTimeout {
    */
   @Test
   public void test3686a() throws Exception {
+    LOG.info("START ************ TEST3686A---1");
     HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TABLE_NAME);
+    LOG.info("START ************ TEST3686A---1111");
+
     Scan scan = new Scan();
     scan.setCaching(SCANNER_CACHING);
-    
+    LOG.info("************ TEST3686A");
+    MetaReader.fullScanMetaAndPrint(TEST_UTIL.getHBaseCluster().getMaster().getCatalogTracker());
     HTable table = new HTable(TABLE_NAME);
+    LOG.info("START ************ TEST3686A---22");
+
     ResultScanner r = table.getScanner(scan);
+    LOG.info("START ************ TEST3686A---33");
+
     int count = 1;
     r.next();
+    LOG.info("START ************ TEST3686A---44");
+
     // Kill after one call to next(), which got 5 rows.
     rs.abort("die!");
     while(r.next() != null) {
@@ -158,6 +174,7 @@ public class TestScannerTimeout {
     }
     assertEquals(NB_ROWS, count);
     r.close();
+    LOG.info("************ END TEST3686A");
   }
   
   /**
@@ -168,6 +185,7 @@ public class TestScannerTimeout {
    */
   @Test
   public void test3686b() throws Exception {
+    LOG.info("START ************ test3686b");
     HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TABLE_NAME);
     Scan scan = new Scan();
     scan.setCaching(SCANNER_CACHING);
@@ -189,5 +207,7 @@ public class TestScannerTimeout {
     }
     assertEquals(NB_ROWS, count);
     r.close();
+    LOG.info("END ************ END test3686b");
+
   }
 }
\ No newline at end of file

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestTimestamp.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestTimestamp.java?rev=1133209&r1=1133208&r2=1133209&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestTimestamp.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/client/TestTimestamp.java Wed Jun  8 00:03:54 2011
@@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.Timestamp
  * run against an HRegion and against an HTable: i.e. both local and remote.
  */
 public class TestTimestamp extends HBaseClusterTestCase {
-  public static String COLUMN_NAME = "colfamily1";
+  public static String COLUMN_NAME = "colfamily11";
 
   /** constructor */
   public TestTimestamp() {

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java?rev=1133209&r1=1133208&r2=1133209&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java Wed Jun  8 00:03:54 2011
@@ -180,7 +180,8 @@ public class TestCoprocessorInterface ex
 
   HRegion reopenRegion(final HRegion closedRegion, Class<?> implClass)
       throws IOException {
-    HRegion r = new HRegion(closedRegion.getRegionDir(), closedRegion.getLog(),
+    //HRegionInfo info = new HRegionInfo(tableName, null, null, false);
+    HRegion r = new HRegion(closedRegion.getTableDir(), closedRegion.getLog(),
         closedRegion.getFilesystem(), closedRegion.getConf(),
         closedRegion.getRegionInfo(), null);
     r.initialize();
@@ -211,9 +212,9 @@ public class TestCoprocessorInterface ex
     for(byte [] family : families) {
       htd.addFamily(new HColumnDescriptor(family));
     }
-    HRegionInfo info = new HRegionInfo(htd, null, null, false);
+    HRegionInfo info = new HRegionInfo(tableName, null, null, false);
     Path path = new Path(DIR + callingMethod);
-    HRegion r = HRegion.createHRegion(info, path, conf);
+    HRegion r = HRegion.createHRegion(info, path, conf, htd);
 
     // this following piece is a hack.
     RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java?rev=1133209&r1=1133208&r2=1133209&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java Wed Jun  8 00:03:54 2011
@@ -256,7 +256,7 @@ public class TestRegionObserverInterface
     try {
       for (JVMClusterUtil.RegionServerThread t : cluster.getRegionServerThreads()) {
         for (HRegionInfo r : t.getRegionServer().getOnlineRegions()) {
-          if (!Arrays.equals(r.getTableDesc().getName(), tableName)) {
+          if (!Arrays.equals(r.getTableName(), tableName)) {
             continue;
           }
           RegionCoprocessorHost cph = t.getRegionServer().getOnlineRegion(r.getRegionName()).

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverStacking.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverStacking.java?rev=1133209&r1=1133208&r2=1133209&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverStacking.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverStacking.java Wed Jun  8 00:03:54 2011
@@ -91,9 +91,9 @@ public class TestRegionObserverStacking 
     for(byte [] family : families) {
       htd.addFamily(new HColumnDescriptor(family));
     }
-    HRegionInfo info = new HRegionInfo(htd, null, null, false);
+    HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
     Path path = new Path(DIR + callingMethod);
-    HRegion r = HRegion.createHRegion(info, path, conf);
+    HRegion r = HRegion.createHRegion(info, path, conf, htd);
     // this following piece is a hack. currently a coprocessorHost
     // is secretly loaded at OpenRegionHandler. we don't really
     // start a region server here, so just manually create cphost

Modified: hbase/trunk/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java?rev=1133209&r1=1133208&r2=1133209&view=diff
==============================================================================
--- hbase/trunk/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java (original)
+++ hbase/trunk/src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java Wed Jun  8 00:03:54 2011
@@ -138,7 +138,12 @@ public class TestWALObserver {
    */
   @Test
   public void testWALObserverWriteToWAL() throws Exception {
+
     HRegionInfo hri = createBasic3FamilyHRegionInfo(Bytes.toString(TEST_TABLE));
+    final HTableDescriptor htd = createBasic3FamilyHTD(Bytes.toString(TEST_TABLE));
+    HRegion region2 = HRegion.createHRegion(hri,
+            hbaseRootDir, this.conf, htd);
+
     Path basedir = new Path(this.hbaseRootDir, Bytes.toString(TEST_TABLE));
     deleteDir(basedir);
     fs.mkdirs(new Path(basedir, hri.getEncodedName()));
@@ -190,7 +195,7 @@ public class TestWALObserver {
 
     // it's where WAL write cp should occur.
     long now = EnvironmentEdgeManager.currentTimeMillis();
-    log.append(hri, hri.getTableDesc().getName(), edit, now);
+    log.append(hri, hri.getTableName(), edit, now, htd);
 
     // the edit shall have been change now by the coprocessor.
     foundFamily0 = false;
@@ -221,16 +226,25 @@ public class TestWALObserver {
    * Test WAL replay behavior with WALObserver.
    */
   @Test
-  public void testWALObserverReplay() throws Exception {
+  public void testWALCoprocessorReplay() throws Exception {
     // WAL replay is handled at HRegion::replayRecoveredEdits(), which is
     // ultimately called by HRegion::initialize()
     byte[] tableName = Bytes.toBytes("testWALCoprocessorReplay");
+    final HTableDescriptor htd = getBasic3FamilyHTableDescriptor(Bytes.toString(tableName));
+    //final HRegionInfo hri = createBasic3FamilyHRegionInfo(Bytes.toString(tableName));
+    //final HRegionInfo hri1 = createBasic3FamilyHRegionInfo(Bytes.toString(tableName));
+    final HRegionInfo hri = new HRegionInfo(tableName, null, null);
 
-    final HRegionInfo hri = createBasic3FamilyHRegionInfo(Bytes.toString(tableName));
     final Path basedir = new Path(this.hbaseRootDir, Bytes.toString(tableName));
     deleteDir(basedir);
     fs.mkdirs(new Path(basedir, hri.getEncodedName()));
 
+    final Configuration newConf = HBaseConfiguration.create(this.conf);
+
+    HRegion region2 = HRegion.createHRegion(hri,
+        hbaseRootDir, newConf,htd);
+
+
     //HLog wal = new HLog(this.fs, this.dir, this.oldLogDir, this.conf);
     HLog wal = createWAL(this.conf);
     //Put p = creatPutWith2Families(TEST_ROW);
@@ -238,40 +252,46 @@ public class TestWALObserver {
     long now = EnvironmentEdgeManager.currentTimeMillis();
     //addFamilyMapToWALEdit(p.getFamilyMap(), edit);
     final int countPerFamily = 1000;
-    for (HColumnDescriptor hcd: hri.getTableDesc().getFamilies()) {
+    //for (HColumnDescriptor hcd: hri.getTableDesc().getFamilies()) {
+    for (HColumnDescriptor hcd: htd.getFamilies()) {
+          //addWALEdits(tableName, hri, TEST_ROW, hcd.getName(), countPerFamily,
+          //EnvironmentEdgeManager.getDelegate(), wal);
       addWALEdits(tableName, hri, TEST_ROW, hcd.getName(), countPerFamily,
-          EnvironmentEdgeManager.getDelegate(), wal);
+      EnvironmentEdgeManager.getDelegate(), wal, htd);
     }
-    wal.append(hri, tableName, edit, now);
+    wal.append(hri, tableName, edit, now, htd);
     // sync to fs.
     wal.sync();
 
-    final Configuration newConf = HBaseConfiguration.create(this.conf);
     User user = HBaseTestingUtility.getDifferentUser(newConf,
         ".replay.wal.secondtime");
     user.runAs(new PrivilegedExceptionAction() {
       public Object run() throws Exception {
-        runWALSplit(newConf);
+        Path p = runWALSplit(newConf);
+        LOG.info("WALSplit path == " + p);
         FileSystem newFS = FileSystem.get(newConf);
         // Make a new wal for new region open.
         HLog wal2 = createWAL(newConf);
-        HRegion region2 = new HRegion(basedir, wal2, FileSystem.get(newConf),
+        Path tableDir =
+        HTableDescriptor.getTableDir(hbaseRootDir, hri.getTableName());
+        HRegion region = new HRegion(tableDir, wal2, FileSystem.get(newConf),
           newConf, hri, TEST_UTIL.getHBaseCluster().getRegionServer(0));
-        long seqid2 = region2.initialize();
 
+        long seqid2 = region.initialize();
         SampleRegionWALObserver cp2 =
-          (SampleRegionWALObserver)region2.getCoprocessorHost().findCoprocessor(
+          (SampleRegionWALObserver)region.getCoprocessorHost().findCoprocessor(
               SampleRegionWALObserver.class.getName());
         // TODO: asserting here is problematic.
         assertNotNull(cp2);
         assertTrue(cp2.isPreWALRestoreCalled());
         assertTrue(cp2.isPostWALRestoreCalled());
-        region2.close();
+        region.close();
         wal2.closeAndDelete();
         return null;
       }
     });
   }
+
   /**
    * Test to see CP loaded successfully or not. There is a duplication
    * at TestHLog, but the purpose of that one is to see whether the loaded
@@ -301,7 +321,7 @@ public class TestWALObserver {
       HColumnDescriptor a = new HColumnDescriptor(TEST_FAMILY[i]);
       htd.addFamily(a);
     }
-    return new HRegionInfo(htd, null, null, false);
+    return new HRegionInfo(htd.getName(), null, null, false);
   }
 
   /*
@@ -356,7 +376,7 @@ public class TestWALObserver {
   }
   private void addWALEdits (final byte [] tableName, final HRegionInfo hri,
       final byte [] rowName, final byte [] family,
-      final int count, EnvironmentEdge ee, final HLog wal)
+      final int count, EnvironmentEdge ee, final HLog wal, final HTableDescriptor htd)
   throws IOException {
     String familyStr = Bytes.toString(family);
     for (int j = 0; j < count; j++) {
@@ -365,8 +385,30 @@ public class TestWALObserver {
       WALEdit edit = new WALEdit();
       edit.add(new KeyValue(rowName, family, qualifierBytes,
         ee.currentTimeMillis(), columnBytes));
-      wal.append(hri, tableName, edit, ee.currentTimeMillis());
+      wal.append(hri, tableName, edit, ee.currentTimeMillis(), htd);
     }
   }
+  private HTableDescriptor getBasic3FamilyHTableDescriptor(
+      final String tableName) {
+    HTableDescriptor htd = new HTableDescriptor(tableName);
+
+    for (int i = 0; i < TEST_FAMILY.length; i++ ) {
+      HColumnDescriptor a = new HColumnDescriptor(TEST_FAMILY[i]);
+      htd.addFamily(a);
+    }
+    return htd;
+  }
+
+  private HTableDescriptor createBasic3FamilyHTD(final String tableName) {
+    HTableDescriptor htd = new HTableDescriptor(tableName);
+    HColumnDescriptor a = new HColumnDescriptor(Bytes.toBytes("a"));
+    htd.addFamily(a);
+    HColumnDescriptor b = new HColumnDescriptor(Bytes.toBytes("b"));
+    htd.addFamily(b);
+    HColumnDescriptor c = new HColumnDescriptor(Bytes.toBytes("c"));
+    htd.addFamily(c);
+    return htd;
+  }
+
 }