You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by li...@apache.org on 2014/04/19 20:18:29 UTC

svn commit: r1588690 - in /hbase/branches/0.89-fb/src: main/java/org/apache/hadoop/hbase/ main/java/org/apache/hadoop/hbase/client/ main/java/org/apache/hadoop/hbase/master/ main/java/org/apache/hadoop/hbase/master/handler/ main/java/org/apache/hadoop/...

Author: liyin
Date: Sat Apr 19 18:18:28 2014
New Revision: 1588690

URL: http://svn.apache.org/r1588690
Log:
[HBASE-10343] Record the sequence id change when region opens on region server.

Author: everyoung

Summary:
- Add historian to ROOT table.
- Add immutable HRegionSeqidTransition class and TestHRegionSeqidTransition to
  test ser/deser and region reopen.
- Add serverInfo and RegionInfo as Jiqing suggested
- Fix TestDistributedLogSplitting, TestMutationWriteToWAL.
- Add config to enable/disable ROOT_DESCRIPTOR changes.

Test Plan: all tests and a new unit test

Reviewers: aaiyer, liyintang, rshroff, jiqingt

Reviewed By: aaiyer

CC: hbase-dev@, andrewcox, fan

Differential Revision: https://phabricator.fb.com/D1232574

Task ID: 3946433

Added:
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionSeqidTransition.java
    hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionSeqidTransition.java
Modified:
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HConstants.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HMerge.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/TableServers.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ProcessRegionStatusChange.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/RegionManager.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/RootScanner.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ZKClusterStateRecovery.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/handler/MasterOpenRegionHandler.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java
    hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java
    hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
    hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java
    hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java
    hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
    hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
    hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java
    hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestMutationWriteToWAL.java

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HConstants.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HConstants.java?rev=1588690&r1=1588689&r2=1588690&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HConstants.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HConstants.java Sat Apr 19 18:18:28 2014
@@ -549,6 +549,12 @@ public final class HConstants {
   /** The favored nodes column qualifier*/
   public static final byte [] FAVOREDNODES_QUALIFIER = Bytes.toBytes("favorednodes");
 
+  /** The last region seqid qualifier*/
+  public static final byte [] LAST_SEQID_QUALIFIER = Bytes.toBytes("lastregionseqid");
+
+  /** The next region seqid qualifier*/
+  public static final byte [] NEXT_SEQID_QUALIFIER = Bytes.toBytes("nextregionseqid");
+
   // Other constants
 
   /**

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HMerge.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HMerge.java?rev=1588690&r1=1588689&r2=1588690&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HMerge.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HMerge.java Sat Apr 19 18:18:28 2014
@@ -330,9 +330,13 @@ class HMerge {
           HConstants.ROOT_TABLE_NAME);
 
       // Scan root region to find all the meta regions
-
-      root = HRegion.newHRegion(rootTableDir, hlog, fs, conf,
-          HRegionInfo.ROOT_REGIONINFO, null);
+      if (HTableDescriptor.isMetaregionSeqidRecordEnabled(conf)) {
+        root = HRegion.newHRegion(rootTableDir, hlog, fs, conf,
+            HRegionInfo.ROOT_REGIONINFO_WITH_HISTORIAN_COLUMN, null);
+      } else {
+        root = HRegion.newHRegion(rootTableDir, hlog, fs, conf,
+            HRegionInfo.ROOT_REGIONINFO, null);
+      }
       root.initialize();
 
       Scan scan = new Scan();

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java?rev=1588690&r1=1588689&r2=1588690&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java Sat Apr 19 18:18:28 2014
@@ -122,7 +122,12 @@ public class HRegionInfo extends Version
   public static final HRegionInfo ROOT_REGIONINFO =
     new HRegionInfo(0L, HTableDescriptor.ROOT_TABLEDESC);
 
-  /** Encoded name for the root region. This is always the same. */
+  /** HRegionInfo for root region with historian column*/
+  public static final HRegionInfo ROOT_REGIONINFO_WITH_HISTORIAN_COLUMN =
+    new HRegionInfo(0L, HTableDescriptor.ROOT_TABLEDESC_WITH_HISTORIAN_COLUMN);
+
+  /** Encoded name for the root region. This is always the same.
+   * ROOT_REGIONINFO_WITH_HISTORIAN_COLUMN has the same getEncodeName() */
   public static final String ROOT_REGION_ENCODED_NAME_STR =
       HRegionInfo.ROOT_REGIONINFO.getEncodedName();
 

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java?rev=1588690&r1=1588689&r2=1588690&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java Sat Apr 19 18:18:28 2014
@@ -38,6 +38,7 @@ import java.util.TreeSet;
 import com.google.common.collect.ImmutableSet;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -143,6 +144,10 @@ public class HTableDescriptor implements
   private volatile Boolean root = null;
   private Boolean isDeferredLog = null;
 
+ /** Master switch to enable column family in ROOT table */
+ public static final String METAREGION_SEQID_RECORD_ENABLED =
+   "metaregion.seqid.record.enabled";
+
   // Key is hash of the family name.
   public final Map<byte [], HColumnDescriptor> families =
     new TreeMap<byte [], HColumnDescriptor>(Bytes.BYTES_RAWCOMPARATOR);
@@ -821,6 +826,20 @@ public class HTableDescriptor implements
 
   /** Table descriptor for <core>-ROOT-</code> catalog table */
   public static final HTableDescriptor ROOT_TABLEDESC = new HTableDescriptor(
+    HConstants.ROOT_TABLE_NAME,
+    new HColumnDescriptor[] {
+      new HColumnDescriptor(HConstants.CATALOG_FAMILY)
+        // Ten is arbitrary number.  Keep versions to help debugging.
+        .setMaxVersions(10)
+        .setInMemory(true)
+        .setBlocksize(8 * 1024)
+        .setTimeToLive(HConstants.FOREVER)
+        .setScope(HConstants.REPLICATION_SCOPE_LOCAL),
+    });
+
+  /** Table descriptor for <core>-ROOT-</code> catalog table with historian column
+   * introduced to record sequenceid transition of meta table.*/
+  public static final HTableDescriptor ROOT_TABLEDESC_WITH_HISTORIAN_COLUMN = new HTableDescriptor(
       HConstants.ROOT_TABLE_NAME,
       new HColumnDescriptor[] {
           new HColumnDescriptor(HConstants.CATALOG_FAMILY)
@@ -829,6 +848,12 @@ public class HTableDescriptor implements
               .setInMemory(true)
               .setBlocksize(8 * 1024)
               .setTimeToLive(HConstants.FOREVER)
+              .setScope(HConstants.REPLICATION_SCOPE_LOCAL),
+          new HColumnDescriptor(HConstants.CATALOG_HISTORIAN_FAMILY)
+              .setMaxVersions(HConstants.ALL_VERSIONS)
+              .setBlocksize(8 * 1024)
+              // 13 weeks = 3 months TTL
+              .setTimeToLive(13 * HConstants.WEEK_IN_SECONDS)
               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
       });
 
@@ -844,7 +869,8 @@ public class HTableDescriptor implements
           new HColumnDescriptor(HConstants.CATALOG_HISTORIAN_FAMILY)
               .setMaxVersions(HConstants.ALL_VERSIONS)
               .setBlocksize(8 * 1024)
-              .setTimeToLive(HConstants.WEEK_IN_SECONDS)
+              // 13 weeks = 3 months TTL
+              .setTimeToLive(13 * HConstants.WEEK_IN_SECONDS)
               .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
       });
 
@@ -924,4 +950,12 @@ public class HTableDescriptor implements
     }
     return this.serverSet.getServers();
   }
+
+  /**
+   * @param conf
+   * @return true if the meta region seqid recording is enabled
+   */
+  public static boolean isMetaregionSeqidRecordEnabled(Configuration conf) {
+    return conf != null ? conf.getBoolean(METAREGION_SEQID_RECORD_ENABLED, false) : false;
+  }
 }

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/TableServers.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/TableServers.java?rev=1588690&r1=1588689&r2=1588690&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/TableServers.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/client/TableServers.java Sat Apr 19 18:18:28 2014
@@ -584,7 +584,11 @@ public class TableServers implements Ser
   public HTableDescriptor getHTableDescriptor(StringBytes tableName)
       throws IOException {
     if (tableName.equals(HConstants.ROOT_TABLE_NAME_STRINGBYTES)) {
-      return new UnmodifyableHTableDescriptor(HTableDescriptor.ROOT_TABLEDESC);
+      if (HTableDescriptor.isMetaregionSeqidRecordEnabled(conf)) {
+        return new UnmodifyableHTableDescriptor(HTableDescriptor.ROOT_TABLEDESC_WITH_HISTORIAN_COLUMN);
+      } else {
+        return new UnmodifyableHTableDescriptor(HTableDescriptor.ROOT_TABLEDESC);
+      }
     }
     if (tableName.equals(HConstants.META_TABLE_NAME_STRINGBYTES)) {
       return HTableDescriptor.META_TABLEDESC;
@@ -1295,6 +1299,7 @@ private HRegionLocation locateMetaInRoot
         HRegionInterface server = getHRegionConnection(rootRegionAddress);
         // if this works, then we're good, and we have an acceptable address,
         // so we can stop doing retries and return the result.
+        // ROOT_REGIONINFO_WITH_HISTORIAN_COLUMN has the same getRegionName()
         server.getRegionInfo(HRegionInfo.ROOT_REGIONINFO.getRegionName());
         if (LOG.isDebugEnabled()) {
           LOG.debug("Found ROOT at " + rootRegionAddress);
@@ -1329,7 +1334,11 @@ private HRegionLocation locateMetaInRoot
     }
 
     // return the region location
-    return new HRegionLocation(HRegionInfo.ROOT_REGIONINFO, rootRegionAddress);
+    if (HTableDescriptor.isMetaregionSeqidRecordEnabled(conf)) {
+      return new HRegionLocation(HRegionInfo.ROOT_REGIONINFO_WITH_HISTORIAN_COLUMN, rootRegionAddress);
+    } else {
+      return new HRegionLocation(HRegionInfo.ROOT_REGIONINFO, rootRegionAddress);
+    }
   }
 
   @Override

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/HMaster.java?rev=1588690&r1=1588689&r2=1588690&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/HMaster.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/HMaster.java Sat Apr 19 18:18:28 2014
@@ -622,7 +622,9 @@ public class HMaster extends HasThread i
       // created here in bootstap and it'll need to be cleaned up.  Better to
       // not make it in first place.  Turn off block caching for bootstrap.
       // Enable after.
-      HRegionInfo rootHRI = new HRegionInfo(HRegionInfo.ROOT_REGIONINFO);
+      HRegionInfo rootHRI = HTableDescriptor.isMetaregionSeqidRecordEnabled(c) ?
+        new HRegionInfo(HRegionInfo.ROOT_REGIONINFO_WITH_HISTORIAN_COLUMN) :
+        new HRegionInfo(HRegionInfo.ROOT_REGIONINFO);
       setInfoFamilyCaching(rootHRI, false);
       HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
       setInfoFamilyCaching(metaHRI, false);

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java?rev=1588690&r1=1588689&r2=1588690&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ProcessRegionOpen.java Sat Apr 19 18:18:28 2014
@@ -25,9 +25,12 @@ import org.apache.hadoop.hbase.HConstant
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HServerAddress;
 import org.apache.hadoop.hbase.HServerInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.ipc.HRegionInterface;
+import org.apache.hadoop.hbase.regionserver.HRegionSeqidTransition;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Writables;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWrapper;
 
 /**
@@ -37,6 +40,7 @@ import org.apache.hadoop.hbase.zookeeper
  */
 public class ProcessRegionOpen extends ProcessRegionStatusChange {
   protected final HServerInfo serverInfo;
+  protected final HRegionSeqidTransition seqidTransition;
 
   /**
    * @param master
@@ -44,9 +48,21 @@ public class ProcessRegionOpen extends P
    * @param regionInfo
    */
   public ProcessRegionOpen(HMaster master, HServerInfo info,
-      HRegionInfo regionInfo) {
+                           HRegionInfo regionInfo) {
+    this(master, info, regionInfo, null);
+  }
+
+  /**
+   * @param master
+   * @param info
+   * @param regionInfo
+   * @param seqidMsg region seqid jump information
+   */
+  public ProcessRegionOpen(HMaster master, HServerInfo info,
+                           HRegionInfo regionInfo, HRegionSeqidTransition seqidTran) {
     super(master, info.getServerName(), regionInfo);
     this.serverInfo = info;
+    this.seqidTransition = seqidTran;
   }
 
   @Override
@@ -99,6 +115,7 @@ public class ProcessRegionOpen extends P
   void writeToMeta(MetaRegion region) throws IOException {
     HRegionInterface server =
         master.getServerConnection().getHRegionConnection(region.getServer());
+    String seqidLog = null;
     LOG.info(regionInfo.getRegionNameAsString() + " open on " + serverInfo.getServerName());
     // Register the newly-available Region's location.
     Put p = new Put(regionInfo.getRegionName());
@@ -106,11 +123,51 @@ public class ProcessRegionOpen extends P
         Bytes.toBytes(serverInfo.getHostnamePort()));
     p.add(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER,
         Bytes.toBytes(serverInfo.getStartCode()));
+    if (seqidTransition != null) {
+      // if it is not metaTable, OK
+      // if it is metaTable && meta region seqid recording enabled, OK
+      // otherwise, the ROOT will not have the historian family, not appending.
+      if (isMetaTable &&
+          !HTableDescriptor.isMetaregionSeqidRecordEnabled(master.getConfiguration())) {
+        seqidLog = ", sequence id of meta region not enabled, not recording.";
+      } else {
+        StringBuilder sb = new StringBuilder();
+        p.add(HConstants.CATALOG_HISTORIAN_FAMILY, HConstants.SERVER_QUALIFIER,
+              Bytes.toBytes(serverInfo.getHostnamePort()));
+        sb.append(Bytes.toString(HConstants.CATALOG_HISTORIAN_FAMILY))
+          .append(":").append(Bytes.toString(HConstants.SERVER_QUALIFIER))
+          .append("=").append(serverInfo.getHostnamePort());
+        p.add(HConstants.CATALOG_HISTORIAN_FAMILY, HConstants.STARTCODE_QUALIFIER,
+              Bytes.toBytes(serverInfo.getStartCode()));
+        sb.append(", ").append(Bytes.toString(HConstants.CATALOG_HISTORIAN_FAMILY))
+          .append(":").append(Bytes.toString(HConstants.STARTCODE_QUALIFIER))
+          .append("=").append(serverInfo.getStartCode());
+        p.add(HConstants.CATALOG_HISTORIAN_FAMILY,
+              HConstants.LAST_SEQID_QUALIFIER, Bytes.toBytes(seqidTransition.getLastSeqid()));
+        sb.append(", ").append(Bytes.toString(HConstants.CATALOG_HISTORIAN_FAMILY))
+          .append(":").append(Bytes.toString(HConstants.LAST_SEQID_QUALIFIER)).append("=")
+          .append(seqidTransition.getLastSeqid());
+        p.add(HConstants.CATALOG_HISTORIAN_FAMILY,
+              HConstants.NEXT_SEQID_QUALIFIER, Bytes.toBytes(seqidTransition.getNextSeqid()));
+        sb.append(", ").append(Bytes.toString(HConstants.CATALOG_HISTORIAN_FAMILY))
+          .append(":").append(Bytes.toString(HConstants.NEXT_SEQID_QUALIFIER)).append("=")
+          .append(seqidTransition.getNextSeqid());
+        p.add(HConstants.CATALOG_HISTORIAN_FAMILY,
+              HConstants.REGIONINFO_QUALIFIER, Writables.getBytes(regionInfo));
+        sb.append(", ").append(Bytes.toString(HConstants.CATALOG_HISTORIAN_FAMILY))
+          .append(":").append(Bytes.toString(HConstants.REGIONINFO_QUALIFIER)).append("=")
+          .append("[regionInfo object of ").append(regionInfo.getRegionNameAsString()).append("]");
+
+        seqidLog = sb.toString();
+      }
+    }
     server.put(region.getRegionName(), p);
-    LOG.info("Updated row " + regionInfo.getRegionNameAsString() + " in region "
-        + Bytes.toStringBinary(region.getRegionName())
-        + " with startcode=" + serverInfo.getStartCode()
-        + ", server=" + serverInfo.getHostnamePort());
+    LOG.info("Updated row " + regionInfo.getRegionNameAsString() + " in region " +
+        Bytes.toString(region.getRegionName()) + " with " +
+        Bytes.toString(HConstants.CATALOG_FAMILY) + ":startcode=" + serverInfo.getStartCode() +
+        ", " + Bytes.toString(HConstants.CATALOG_FAMILY) +
+        ":server=" + serverInfo.getHostnamePort() +
+        ((seqidLog == null) ? " and NO sequence id transition" : ", " + seqidLog) + ".");
     this.master.getServerManager().getRegionChecker().becameOpened(regionInfo);
   }
 

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ProcessRegionStatusChange.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ProcessRegionStatusChange.java?rev=1588690&r1=1588689&r2=1588690&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ProcessRegionStatusChange.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ProcessRegionStatusChange.java Sat Apr 19 18:18:28 2014
@@ -20,6 +20,7 @@
 package org.apache.hadoop.hbase.master;
 
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
 
 /**
  * Abstract class that performs common operations for
@@ -68,9 +69,14 @@ abstract class ProcessRegionStatusChange
 
   protected MetaRegion getMetaRegion() {
     if (isMetaTable) {
+      // ROOT_REGIONINFO_WITH_HISTORIAN_COLUMN has the same regionName()
       this.metaRegionName = HRegionInfo.ROOT_REGIONINFO.getRegionName();
-      this.metaRegion = new MetaRegion(master.getRegionManager().getRootRegionLocation(),
-          HRegionInfo.ROOT_REGIONINFO);
+      this.metaRegion = HTableDescriptor.isMetaregionSeqidRecordEnabled(
+          master.getConfiguration()) ?
+            new MetaRegion(master.getRegionManager().getRootRegionLocation(),
+                HRegionInfo.ROOT_REGIONINFO_WITH_HISTORIAN_COLUMN) :
+            new MetaRegion(master.getRegionManager().getRootRegionLocation(),
+                HRegionInfo.ROOT_REGIONINFO);
     } else {
       this.metaRegion =
         master.getRegionManager().getFirstMetaRegionForRegion(regionInfo);

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java?rev=1588690&r1=1588689&r2=1588690&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java Sat Apr 19 18:18:28 2014
@@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.HConstant
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HServerAddress;
 import org.apache.hadoop.hbase.HServerInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.RemoteExceptionHandler;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
@@ -347,6 +348,7 @@ class ProcessServerShutdown extends Regi
       scan.addFamily(HConstants.CATALOG_FAMILY);
       scan.setCaching(1000);
       scan.setCacheBlocks(true);
+      // ROOT_REGIONINFO_WITH_HISTORIAN_COLUMN has the same regionName
       return scanMetaRegion(server, scan,
           HRegionInfo.ROOT_REGIONINFO.getRegionName());
     }
@@ -459,7 +461,9 @@ class ProcessServerShutdown extends Regi
       LOG.debug(this.toString() + ". Begin rescan Root ");
       Boolean result = new ScanRootRegion(
           new MetaRegion(master.getRegionManager().getRootRegionLocation(),
-              HRegionInfo.ROOT_REGIONINFO), this.master).doWithRetries();
+              (HTableDescriptor.isMetaregionSeqidRecordEnabled(master.getConfiguration()) ?
+              HRegionInfo.ROOT_REGIONINFO_WITH_HISTORIAN_COLUMN : HRegionInfo.ROOT_REGIONINFO)),
+                  this.master).doWithRetries();
       if (result == null || result.booleanValue() == false) {
         LOG.debug("Root scan failed " + this);
         return RegionServerOperationResult.OPERATION_DELAYED;

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/RegionManager.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/RegionManager.java?rev=1588690&r1=1588689&r2=1588690&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/RegionManager.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/RegionManager.java Sat Apr 19 18:18:28 2014
@@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.HRegionIn
 import org.apache.hadoop.hbase.HServerAddress;
 import org.apache.hadoop.hbase.HServerInfo;
 import org.apache.hadoop.hbase.HServerLoad;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.executor.HBaseEventHandler.HBaseEventType;
 import org.apache.hadoop.hbase.executor.RegionTransitionEventData;
@@ -261,6 +262,7 @@ public class RegionManager {
     unsetRootRegion();
     if (!master.isClusterShutdownRequested()) {
       synchronized (regionsInTransition) {
+        // ROOT_REGIONINFO_WITH_HISTORIAN_COLUMN has the same regionName
         String regionName = HRegionInfo.ROOT_REGIONINFO.getRegionNameAsString();
         byte[] data = null;
         try {
@@ -268,10 +270,17 @@ public class RegionManager {
         } catch (IOException e) {
           LOG.error("Error creating event data for " + HBaseEventType.M2ZK_REGION_OFFLINE, e);
         }
+        // ROOT_REGIONINFO_WITH_HISTORIAN_COLUMN has the same regionName
         zkWrapper.createOrUpdateUnassignedRegion(
             HRegionInfo.ROOT_REGIONINFO.getEncodedName(), data);
         LOG.debug("Created UNASSIGNED zNode " + regionName + " in state " + HBaseEventType.M2ZK_REGION_OFFLINE);
-        RegionState s = new RegionState(HRegionInfo.ROOT_REGIONINFO, RegionState.State.UNASSIGNED);
+        RegionState s;
+        if (HTableDescriptor.isMetaregionSeqidRecordEnabled(master.getConfiguration())) {
+          s = new RegionState(HRegionInfo.ROOT_REGIONINFO_WITH_HISTORIAN_COLUMN,
+                  RegionState.State.UNASSIGNED);
+        } else {
+          s = new RegionState(HRegionInfo.ROOT_REGIONINFO, RegionState.State.UNASSIGNED);
+        }
         regionsInTransition.put(regionName, s);
         LOG.info("ROOT inserted into regionsInTransition");
       }
@@ -590,6 +599,7 @@ public class RegionManager {
 
     // Assign ROOT region if ROOT region is offline.
     synchronized (this.regionsInTransition) {
+      // ROOT_REGIONINFO_WITH_HISTORIAN_COLUMN has the same regionName
       rootState = regionsInTransition.get(HRegionInfo.ROOT_REGIONINFO
           .getRegionNameAsString());
     }
@@ -734,6 +744,7 @@ public class RegionManager {
     int nonPreferredAssignmentCount = 0;
     // Handle if root is unassigned... only assign root if root is offline.
     synchronized (this.regionsInTransition) {
+      // ROOT_REGIONINFO_WITH_HISTORIAN_COLUMN has the same regionName
       rootState = regionsInTransition.get(HRegionInfo.ROOT_REGIONINFO
           .getRegionNameAsString());
     }
@@ -1054,7 +1065,8 @@ public class RegionManager {
             Bytes.toString(HConstants.ROOT_TABLE_NAME));
       }
       metaRegions.add(new MetaRegion(rootRegionLocation.get().getServerAddress(),
-          HRegionInfo.ROOT_REGIONINFO));
+          HTableDescriptor.isMetaregionSeqidRecordEnabled(master.getConfiguration()) ?
+          HRegionInfo.ROOT_REGIONINFO_WITH_HISTORIAN_COLUMN : HRegionInfo.ROOT_REGIONINFO));
     } else {
       if (!areAllMetaRegionsOnline()) {
         throw new NotAllMetaRegionsOnlineException();
@@ -1089,7 +1101,8 @@ public class RegionManager {
     if (row.length > prefixlen &&
      Bytes.compareTo(META_REGION_PREFIX, 0, prefixlen, row, 0, prefixlen) == 0) {
       return new MetaRegion(this.master.getRegionManager().getRootRegionLocation(),
-        HRegionInfo.ROOT_REGIONINFO);
+          HTableDescriptor.isMetaregionSeqidRecordEnabled(master.getConfiguration()) ?
+            HRegionInfo.ROOT_REGIONINFO_WITH_HISTORIAN_COLUMN : HRegionInfo.ROOT_REGIONINFO);
     }
     return this.onlineMetaRegions.floorEntry(row).getValue();
   }
@@ -1703,6 +1716,7 @@ public class RegionManager {
     writeRootRegionLocationToZooKeeper(hsi);
     synchronized (rootRegionLocation) {
       // the root region has been assigned, remove it from transition in ZK
+      // ROOT_REGIONINFO_WITH_HISTORIAN_COLUMN has the same regionName
       zkWrapper.deleteUnassignedRegion(HRegionInfo.ROOT_REGIONINFO.getEncodedName());
       rootRegionLocation.set(new HServerInfo(hsi));
       rootRegionLocation.notifyAll();

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/RootScanner.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/RootScanner.java?rev=1588690&r1=1588689&r2=1588690&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/RootScanner.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/RootScanner.java Sat Apr 19 18:18:28 2014
@@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.master;
 
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HServerAddress;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.RemoteExceptionHandler;
 
 import java.io.IOException;
@@ -52,7 +53,12 @@ class RootScanner extends BaseScanner {
       synchronized(scannerLock) {
         HServerAddress rootRegionLocation = master.getRegionManager().getRootRegionLocation();
         if (rootRegionLocation != null) {
-          scanRegion(new MetaRegion(rootRegionLocation, HRegionInfo.ROOT_REGIONINFO));
+          if (HTableDescriptor.isMetaregionSeqidRecordEnabled(master.getConfiguration())) {
+            scanRegion(new MetaRegion(rootRegionLocation,
+                HRegionInfo.ROOT_REGIONINFO_WITH_HISTORIAN_COLUMN));
+          } else {
+            scanRegion(new MetaRegion(rootRegionLocation, HRegionInfo.ROOT_REGIONINFO));
+          }
         }
       }
     } catch (IOException e) {
@@ -79,4 +85,4 @@ class RootScanner extends BaseScanner {
   protected void maintenanceScan() {
     scanRoot();
   }
-}
\ No newline at end of file
+}

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java?rev=1588690&r1=1588689&r2=1588690&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java Sat Apr 19 18:18:28 2014
@@ -52,6 +52,7 @@ import org.apache.hadoop.hbase.client.Re
 import org.apache.hadoop.hbase.conf.ConfigurationObserver;
 import org.apache.hadoop.hbase.ipc.HRegionInterface;
 import org.apache.hadoop.hbase.master.RegionManager.RegionState;
+import org.apache.hadoop.hbase.regionserver.HRegionSeqidTransition;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.HasThread;
@@ -818,13 +819,14 @@ public class ServerManager implements Co
   /*
    * Region server is reporting that a region is now opened
    * @param serverInfo
-   * @param region
+   * @param hmsg
    * @param returnMsgs
    */
   public void processRegionOpen(HServerInfo serverInfo,
-      HRegionInfo region, ArrayList<HMsg> returnMsgs) {
+      HMsg hmsg, ArrayList<HMsg> returnMsgs) {
 
     boolean duplicateAssignment = false;
+    HRegionInfo region = hmsg.getRegionInfo();
     RegionManager regionManager = master.getRegionManager();
     synchronized (regionManager) {
       if (!regionManager.isUnassigned(region) &&
@@ -894,7 +896,8 @@ public class ServerManager implements Co
           // meta table to be updated.
           regionManager.setOpen(region.getRegionNameAsString());
           RegionServerOperation op =
-              new ProcessRegionOpen(master, serverInfo, region);
+              new ProcessRegionOpen(master, serverInfo, region,
+                  HRegionSeqidTransition.fromBytes(hmsg.getMessage()));
           master.getRegionServerOperationQueue().put(op);
         }
       }

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ZKClusterStateRecovery.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ZKClusterStateRecovery.java?rev=1588690&r1=1588689&r2=1588690&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ZKClusterStateRecovery.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/ZKClusterStateRecovery.java Sat Apr 19 18:18:28 2014
@@ -275,6 +275,7 @@ public class ZKClusterStateRecovery {
       // processing as dead. In that case we do need to reassign. This logic is similar to
       // what is done in BaseScanner.checkAssigned.
       String serverName = rootServerInfo.getServerName();
+      // ROOT_REGIONINFO_WITH_HISTORIAN_COLUMN has the same regionName
       if (regionManager.regionIsInTransition(
           HRegionInfo.ROOT_REGIONINFO.getRegionNameAsString())) {
         // Already in transition, we will wait until it is assigned.

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/handler/MasterOpenRegionHandler.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/handler/MasterOpenRegionHandler.java?rev=1588690&r1=1588689&r2=1588690&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/handler/MasterOpenRegionHandler.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/master/handler/MasterOpenRegionHandler.java Sat Apr 19 18:18:28 2014
@@ -60,8 +60,7 @@ public class MasterOpenRegionHandler ext
    *                         entry in ZK.
    */
   @Override
-  public void process()
-  {
+  public void process() {
     LOG.debug("Event = " + getHBEvent() + ", region = " + regionName);
     if(this.getHBEvent() == HBaseEventType.RS2ZK_REGION_OPENING) {
       handleRegionOpeningEvent();
@@ -89,7 +88,7 @@ public class MasterOpenRegionHandler ext
       serverInfo = HServerInfo.fromServerName(hbEventData.getRsName());
     }
     ArrayList<HMsg> returnMsgs = new ArrayList<HMsg>();
-    serverManager.processRegionOpen(serverInfo, hbEventData.getHmsg().getRegionInfo(), returnMsgs);
+    serverManager.processRegionOpen(serverInfo, hbEventData.getHmsg(), returnMsgs);
     if(returnMsgs.size() > 0) {
       LOG.debug("Open region tried to send message: " + returnMsgs.get(0).getType() + 
                 " about " + returnMsgs.get(0).getRegionInfo().getRegionNameAsString());

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=1588690&r1=1588689&r2=1588690&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java Sat Apr 19 18:18:28 2014
@@ -3304,6 +3304,14 @@ public class HRegion implements HeapSize
     long seqid = r.initialize();
     if (log != null) {
       log.setSequenceNumber(seqid);
+      // add seqid to hlog
+      // to be strict, next sequence id is getSequenceNumber() + 1
+      // because of incrementAndGet in Hlog.java obtainSeqNum method
+      // after calling writeToHLog method nex line.
+      HRegionSeqidTransition seqidTransition =
+          new HRegionSeqidTransition(seqid-1, log.getSequenceNumber() + 1);
+      // no serverInfo b/c outside an HRS context
+      log.writeSeqidTransition(seqidTransition, null, r.getRegionInfo());
     }
     return r;
   }
@@ -3989,7 +3997,12 @@ public class HRegion implements HeapSize
     String metaStr = Bytes.toString(HConstants.META_TABLE_NAME);
     // Currently expects tables have one region only.
     if (p.getName().startsWith(rootStr)) {
-      region = HRegion.newHRegion(p, log, fs, c, HRegionInfo.ROOT_REGIONINFO, null);
+      if (HTableDescriptor.isMetaregionSeqidRecordEnabled(c)) {
+        region = HRegion.newHRegion(p, log, fs, c,
+            HRegionInfo.ROOT_REGIONINFO_WITH_HISTORIAN_COLUMN, null);
+      } else {
+        region = HRegion.newHRegion(p, log, fs, c, HRegionInfo.ROOT_REGIONINFO, null);
+      }
     } else if (p.getName().startsWith(metaStr)) {
       region = HRegion.newHRegion(p, log, fs, c, HRegionInfo.FIRST_META_REGIONINFO,
           null);

Added: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionSeqidTransition.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionSeqidTransition.java?rev=1588690&view=auto
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionSeqidTransition.java (added)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionSeqidTransition.java Sat Apr 19 18:18:28 2014
@@ -0,0 +1,111 @@
+/**
+ * Copyright 2014 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.util.Bytes;
+
+
+/**
+ * HRegionSeqidTransition records the last and next hlog sequence id of region
+ * during region opening and moving
+ */
+public final class HRegionSeqidTransition {
+  private static final Log LOG = LogFactory.getLog(HRegionSeqidTransition.class);
+  private static final long VERSION = 1;
+  private final long lastSeqid;
+  private final long nextSeqid;
+
+  public HRegionSeqidTransition(final long lastSeqid, final long nextSeqid) {
+    if ((lastSeqid >= 0) && (nextSeqid >= 0)) {
+      this.lastSeqid = lastSeqid;
+      this.nextSeqid = nextSeqid;
+    } else {
+      throw new IllegalArgumentException("Seqids cannot be negative!");
+    }
+  }
+
+  public long getLastSeqid() {
+    return lastSeqid;
+  }
+
+  public long getNextSeqid() {
+    return nextSeqid;
+  }
+
+  /**
+   *
+   * @param fromByte a byte array containing version, the last and next seqids
+   * @throws java.lang.IllegalArgumentException
+   */
+  public static HRegionSeqidTransition fromBytes(byte[] fromByte) throws IllegalArgumentException {
+    if (fromByte == null) {
+      return null;
+    }
+    if (fromByte.length >= 3 * Bytes.SIZEOF_LONG) {
+      if (Bytes.toLong(fromByte, 0) == VERSION) {
+        return new HRegionSeqidTransition(
+            Bytes.toLong(fromByte, Bytes.SIZEOF_LONG),
+            Bytes.toLong(fromByte, 2 * Bytes.SIZEOF_LONG));
+      } else {
+        // can be changed to deal versions later
+        throw new IllegalArgumentException("Only version 1 exists!!");
+      }
+    } else {
+      throw new IllegalArgumentException("Bytes array too short!");
+    }
+  }
+
+  /**
+   * @return bytes containing version and both seqids, null if not valid
+   */
+  public static byte[] toBytes(final HRegionSeqidTransition instance) {
+    if (instance == null) {
+      return null;
+    }
+    byte[] outBytes = new byte[3 * Bytes.SIZEOF_LONG];
+    int pos = Bytes.putLong(outBytes, 0, VERSION);
+    pos = Bytes.putLong(outBytes, pos, instance.lastSeqid);
+    Bytes.putLong(outBytes, pos, instance.nextSeqid);
+    return outBytes;
+  }
+
+  public String toString() {
+    return "Region sequence id transition: " + lastSeqid + "->" + nextSeqid +".";
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (this == obj) {
+      return true;
+    }
+    if (obj == null) {
+      return false;
+    }
+    if (getClass() != obj.getClass()) {
+      return false;
+    }
+    HRegionSeqidTransition other = (HRegionSeqidTransition) obj;
+
+    return (VERSION == other.VERSION) && (lastSeqid == other.lastSeqid) &&
+      (nextSeqid == other.nextSeqid);
+  }
+}

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java?rev=1588690&r1=1588689&r2=1588690&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java Sat Apr 19 18:18:28 2014
@@ -825,8 +825,14 @@ public class HRegionServer implements HR
           if (rootServer != null) {
             // By setting the root region location, we bypass the wait imposed
             // on HTable for all regions being assigned.
-            this.connection.setRootRegionLocation(
-                new HRegionLocation(HRegionInfo.ROOT_REGIONINFO, rootServer));
+            if (HTableDescriptor.isMetaregionSeqidRecordEnabled(conf)) {
+              this.connection.setRootRegionLocation(
+                  new HRegionLocation(HRegionInfo.ROOT_REGIONINFO_WITH_HISTORIAN_COLUMN,
+                      rootServer));
+            } else {
+              this.connection.setRootRegionLocation(
+                  new HRegionLocation(HRegionInfo.ROOT_REGIONINFO, rootServer));
+            }
             haveRootRegion.set(true);
           }
         }
@@ -2360,6 +2366,7 @@ public class HRegionServer implements HR
     RSZookeeperUpdater zkUpdater = new RSZookeeperUpdater(
         this.zooKeeperWrapper, serverInfo.getServerName(),
           regionInfo.getEncodedName());
+    HRegionSeqidTransition seqidTransition = null;
     if (region == null) {
       try {
         zkUpdater.startRegionOpenEvent(null, true);
@@ -2371,9 +2378,19 @@ public class HRegionServer implements HR
           hLogIndex = Integer.valueOf((this.currentHLogIndex++) % (this.hlogs.length));
           this.regionNameToHLogIDMap.put(regionInfo.getRegionNameAsString(), hLogIndex);
         }
-        region = instantiateRegion(regionInfo, this.hlogs[hLogIndex.intValue()]);
+
+        ArrayList<HRegionSeqidTransition> seqidTransitionList =
+          new ArrayList<HRegionSeqidTransition>();
+        region = instantiateRegion(regionInfo,
+            this.hlogs[hLogIndex.intValue()], seqidTransitionList);
+
+        if (!seqidTransitionList.isEmpty()) {
+          seqidTransition = seqidTransitionList.get(0);
+        }
         LOG.info("Initiate the region: " + regionInfo.getRegionNameAsString() + " with HLog #" +
-            hLogIndex);
+            hLogIndex + ((seqidTransition == null) ?
+                " and no sequence id transition recorded." :
+                " and recorded " + seqidTransition));
 
         // Set up the favorite nodes for all the HFile for that region
         setFavoredNodes(region, favoredNodes);
@@ -2437,7 +2454,8 @@ public class HRegionServer implements HR
       }
     }
     try {
-      HMsg hmsg = new HMsg(HMsg.Type.MSG_REPORT_OPEN, regionInfo);
+      HMsg hmsg = new HMsg(HMsg.Type.MSG_REPORT_OPEN, regionInfo,
+          HRegionSeqidTransition.toBytes(seqidTransition));
       zkUpdater.finishRegionOpenEvent(hmsg);
     } catch (IOException e) {
       try {
@@ -2465,11 +2483,13 @@ public class HRegionServer implements HR
   /*
    * @param regionInfo RegionInfo for the Region we're to instantiate and
    * initialize.
-   * @param wal Set into here the regions' seqid.
+   * @param hlog Set into here the regions' seqid.
+   * @param seqidTransitionList a list to pass back seqidTransition.
    * @return
    * @throws IOException
    */
-  protected HRegion instantiateRegion(final HRegionInfo regionInfo, final HLog hlog)
+  protected HRegion instantiateRegion(final HRegionInfo regionInfo, final HLog hlog,
+      ArrayList<HRegionSeqidTransition> seqidTransitionList)
   throws IOException {
     Path dir =
       HTableDescriptor.getTableDir(rootDir, regionInfo.getTableDesc().getName());
@@ -2482,9 +2502,33 @@ public class HRegionServer implements HR
       }
     });
     // If a wal and its seqid is < that of new region, use new regions seqid.
-    if (hlog != null) {
-      if (seqid > hlog.getSequenceNumber()) hlog.setSequenceNumber(seqid);
+    if (hlog != null && seqid > hlog.getSequenceNumber()) {
+      hlog.setSequenceNumber(seqid);
+    }
+    // if it is metaRegion and not enable recording metaRegion Seqid, skip
+    if (regionInfo.isMetaTable() && !HTableDescriptor.isMetaregionSeqidRecordEnabled(conf)) {
+      LOG.info("Recording of sequence id of meta region not enabled!");
+    } else {
+      // update seqids. This array will record the last seqid
+      // and the next seqid on RegionServer. It will be recorded in HMaster meta table
+      // note @Jiqing: these lines are not atomic, which means edits can come in
+      // between setSequenceNumber and getSequenceNumber call.
+      // Example:
+      // If hlog.seqid = 100, seqid = 200, after hlog.setSequenceNumber(200),
+      // hlog.getSequenceNumber may return 210 if there're 10 edits inbound.
+      // The transition seqid is still valid
+      HRegionSeqidTransition seqidTransition =
+        new HRegionSeqidTransition(seqid - 1, hlog.getSequenceNumber());
+      LOG.info("Sequence id of region " + regionInfo.getRegionNameAsString() +
+          " has a transition from " + seqidTransition.getLastSeqid() +
+          " to " + seqidTransition.getNextSeqid() +
+          ". Will be recorded in meta/root table under " +
+          Bytes.toString(HConstants.CATALOG_HISTORIAN_FAMILY)  + " family.");
+      // append to hlog
+      hlog.writeSeqidTransition(seqidTransition, serverInfo, regionInfo);
+      seqidTransitionList.add(seqidTransition);
     }
+
     return r;
   }
 

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java?rev=1588690&r1=1588689&r2=1588690&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java Sat Apr 19 18:18:28 2014
@@ -77,6 +77,7 @@ import org.apache.hadoop.hbase.HConstant
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HServerInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.RemoteExceptionHandler;
 import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.ipc.HBaseServer.Call;
@@ -84,6 +85,7 @@ import org.apache.hadoop.hbase.ipc.Profi
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.monitoring.TaskMonitor;
 import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.HRegionSeqidTransition;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ClassSize;
@@ -93,6 +95,7 @@ import org.apache.hadoop.hbase.util.HasT
 import org.apache.hadoop.hbase.util.RuntimeExceptionAbortStrategy;
 import org.apache.hadoop.hbase.util.RuntimeHaltAbortStrategy;
 import org.apache.hadoop.hbase.util.Threads;
+import org.apache.hadoop.hbase.util.Writables;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.util.StringUtils;
 
@@ -2357,4 +2360,51 @@ public class HLog implements Syncable {
     logSyncerThread.syncerShuttingDown = true;
     logSyncerThread.interrupt();
   }
+
+  /**
+   * write seqid transition to hlog
+   * @param seqidTransition
+   * @param serverInfo
+   * @param regionInfo
+   * @return true when operation successes
+   * @throws IOException
+   */
+  public boolean writeSeqidTransition(final HRegionSeqidTransition seqidTransition,
+      final HServerInfo serverInfo, final HRegionInfo regionInfo)
+    throws IOException {
+    if (regionInfo == null || seqidTransition == null) {
+      return false;
+    }
+    long now = EnvironmentEdgeManager.currentTimeMillis();
+    WALEdit walEdit = new WALEdit();
+    // serverInfo might be null as in HRegion$OpenHRegion
+    if (serverInfo != null) {
+      KeyValue kvServer = new KeyValue(regionInfo.getStartKey(), HLog.METAFAMILY,
+          HConstants.SERVER_QUALIFIER, now,
+          Bytes.toBytes(serverInfo.getHostnamePort()));
+      KeyValue kvStartcode = new KeyValue(regionInfo.getStartKey(), HLog.METAFAMILY,
+          HConstants.STARTCODE_QUALIFIER, now,
+          Bytes.toBytes(serverInfo.getStartCode()));
+      walEdit.add(kvServer);
+      walEdit.add(kvStartcode);
+    }
+    KeyValue kvLast = new KeyValue(regionInfo.getStartKey(), HLog.METAFAMILY,
+        HConstants.LAST_SEQID_QUALIFIER, now,
+        Bytes.toBytes(seqidTransition.getLastSeqid()));
+    KeyValue kvNext = new KeyValue(regionInfo.getStartKey(), HLog.METAFAMILY,
+        HConstants.NEXT_SEQID_QUALIFIER, now,
+        Bytes.toBytes(seqidTransition.getNextSeqid()));
+    KeyValue kvRegionInfo = new KeyValue(regionInfo.getStartKey(), HLog.METAFAMILY,
+        HConstants.REGIONINFO_QUALIFIER, now,
+        Writables.getBytes(regionInfo));
+    walEdit.add(kvLast);
+    walEdit.add(kvNext);
+    walEdit.add(kvRegionInfo);
+    long newSeqid = append(regionInfo, regionInfo.getTableDesc().getName(), walEdit, now);
+    LOG.info("Region " + regionInfo.getRegionNameAsString() +
+               " sequence id transition " +
+               seqidTransition.getLastSeqid() + "->" +
+               seqidTransition.getNextSeqid() + " appended to HLog at seqid=" + newSeqid);
+    return true;
+  }
 }

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java?rev=1588690&r1=1588689&r2=1588690&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java Sat Apr 19 18:18:28 2014
@@ -334,9 +334,11 @@ public class FSUtils {
    */
   public static boolean rootRegionExists(FileSystem fs, Path rootdir)
   throws IOException {
-    Path rootRegionDir =
+    Path rootRegionDir1 =
       HRegion.getRegionDir(rootdir, HRegionInfo.ROOT_REGIONINFO);
-    return fs.exists(rootRegionDir);
+    Path rootRegionDir2 =
+      HRegion.getRegionDir(rootdir, HRegionInfo.ROOT_REGIONINFO_WITH_HISTORIAN_COLUMN);
+    return fs.exists(rootRegionDir1) || fs.exists(rootRegionDir2);
   }
 
   /**

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java?rev=1588690&r1=1588689&r2=1588690&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java Sat Apr 19 18:18:28 2014
@@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.HBaseConf
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
@@ -266,8 +267,14 @@ public class MetaUtils {
     if (this.rootRegion != null) {
       return this.rootRegion;
     }
-    this.rootRegion = HRegion.openHRegion(HRegionInfo.ROOT_REGIONINFO,
-      this.rootdir, getLog(), this.conf);
+    if (HTableDescriptor.isMetaregionSeqidRecordEnabled(conf)) {
+      this.rootRegion = HRegion.openHRegion(
+          HRegionInfo.ROOT_REGIONINFO_WITH_HISTORIAN_COLUMN,
+          this.rootdir, getLog(), this.conf);
+    } else {
+      this.rootRegion = HRegion.openHRegion(HRegionInfo.ROOT_REGIONINFO,
+          this.rootdir, getLog(), this.conf);
+    }
     this.rootRegion.compactStores();
     return this.rootRegion;
   }

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java?rev=1588690&r1=1588689&r2=1588690&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java Sat Apr 19 18:18:28 2014
@@ -641,7 +641,11 @@ public abstract class HBaseTestCase exte
   }
 
   protected void createRootAndMetaRegions() throws IOException {
-    root = HRegion.createHRegion(HRegionInfo.ROOT_REGIONINFO, testDir, conf);
+    if (HTableDescriptor.isMetaregionSeqidRecordEnabled(conf)) {
+      root = HRegion.createHRegion(HRegionInfo.ROOT_REGIONINFO_WITH_HISTORIAN_COLUMN, testDir, conf);
+    } else {
+      root = HRegion.createHRegion(HRegionInfo.ROOT_REGIONINFO, testDir, conf);
+    }
     meta = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, testDir,
         conf);
     HRegion.addRegionToMETA(root, meta);

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java?rev=1588690&r1=1588689&r2=1588690&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java Sat Apr 19 18:18:28 2014
@@ -469,6 +469,7 @@ public class MiniHBaseCluster {
    * of HRS carrying regionName. Returns -1 if none found.
    */
   public int getServerWithRoot() {
+    // ROOT_REGIONINFO_WITH_HISTORIAN_COLUMN has the same getRegionName()
     return getServerWith(HRegionInfo.ROOT_REGIONINFO.getRegionName());
   }
 

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java?rev=1588690&r1=1588689&r2=1588690&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java Sat Apr 19 18:18:28 2014
@@ -461,7 +461,11 @@ public class TestAdmin {
   public void testCreateBadTables() throws IOException {
     String msg = null;
     try {
-      this.admin.createTable(HTableDescriptor.ROOT_TABLEDESC);
+      if (HTableDescriptor.isMetaregionSeqidRecordEnabled(TEST_UTIL.getConfiguration())) {
+        this.admin.createTable(HTableDescriptor.ROOT_TABLEDESC_WITH_HISTORIAN_COLUMN);
+      } else {
+        this.admin.createTable(HTableDescriptor.ROOT_TABLEDESC);
+      }
     } catch (IllegalArgumentException e) {
       msg = e.toString();
     }

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java?rev=1588690&r1=1588689&r2=1588690&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java Sat Apr 19 18:18:28 2014
@@ -51,7 +51,7 @@ public class TestChangingEncoding {
   private static final Log LOG = LogFactory.getLog(TestChangingEncoding.class);
 
   static final String CF = "EncodingTestCF";
-  static final byte[] CF_BYTES = Bytes.toBytes(CF);
+  public static final byte[] CF_BYTES = Bytes.toBytes(CF);
 
   private static final int NUM_ROWS_PER_BATCH = 100;
   private static final int NUM_COLS_PER_ROW = 20;
@@ -118,7 +118,7 @@ public class TestChangingEncoding {
         + "_col" + j);
   }
 
-  static void writeTestDataBatch(Configuration conf, String tableName,
+  public static void writeTestDataBatch(Configuration conf, String tableName,
       int batchId) throws Exception {
     LOG.debug("Writing test data batch " + batchId);
     HTable table = new HTable(conf, tableName);
@@ -133,7 +133,20 @@ public class TestChangingEncoding {
     table.close();
   }
 
-  static void verifyTestDataBatch(Configuration conf, String tableName,
+  public static void writeTestDataBatchToRegion(HRegion region, byte[] row,
+      int batchId) throws Exception {
+    LOG.debug("Writing test data batch " + batchId);
+    for (int i = 0; i < NUM_ROWS_PER_BATCH; ++i) {
+      Put put = new Put(row);
+      for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
+        put.add(CF_BYTES, getQualifier(j),
+                getValue(batchId, i, j));
+        region.put(put);
+      }
+    }
+  }
+
+  public static void verifyTestDataBatch(Configuration conf, String tableName,
       int batchId) throws Exception {
     LOG.debug("Verifying test data batch " + batchId);
     HTable table = new HTable(conf, tableName);

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java?rev=1588690&r1=1588689&r2=1588690&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java Sat Apr 19 18:18:28 2014
@@ -211,7 +211,8 @@ public class TestDistributedLogSplitting
       int c = countHLog(files[0].getPath(), fs, conf);
       count += c;
       LOG.info(c + " edits in " + files[0].getPath());
-      assertEquals(NUM_LOG_LINES, count);
+      // one more hlog edit for recording the seqid transition
+      assertEquals(NUM_LOG_LINES+1, count);
     }
   }
 

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java?rev=1588690&r1=1588689&r2=1588690&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java Sat Apr 19 18:18:28 2014
@@ -73,6 +73,7 @@ public class TestRestartCluster {
     } catch (IOException e) {
       LOG.error("Error creating event data for " + hbEventType, e);
     }
+    // ROOT_REGIONINFO_WITH_HISTORIAN_COLUMN has the same regionName
     zkWrapper.createOrUpdateUnassignedRegion(
         HRegionInfo.ROOT_REGIONINFO.getEncodedName(), data);
     zkWrapper.createOrUpdateUnassignedRegion(

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java?rev=1588690&r1=1588689&r2=1588690&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java Sat Apr 19 18:18:28 2014
@@ -146,6 +146,19 @@ public class TestHRegionInfo {
     assertEquals(hregionInfo, hregionInfoCopy);
     assertEquals(hregionInfo.hashCode(), hregionInfoCopy.hashCode());
 
+    // test a root region info with historian column
+    hregionInfo = HRegionInfo.ROOT_REGIONINFO_WITH_HISTORIAN_COLUMN;
+    assertTrue(hregionInfo.getTableDesc().isMetaRegion());
+    codec.write(hregionInfo, protocol);
+    hregionInfoCopy = codec.read(protocol);
+    assertEquals(hregionInfo, hregionInfoCopy);
+    assertEquals(hregionInfo.hashCode(), hregionInfoCopy.hashCode());
+
+    assertTrue(Bytes.equals(HRegionInfo.ROOT_REGIONINFO.getRegionName(),
+                 HRegionInfo.ROOT_REGIONINFO_WITH_HISTORIAN_COLUMN.getRegionName()));
+    assertEquals(HRegionInfo.ROOT_REGIONINFO.getEncodedName(),
+                 HRegionInfo.ROOT_REGIONINFO_WITH_HISTORIAN_COLUMN.getEncodedName());
+
     // test a meta region info
     hregionInfo = HRegionInfo.FIRST_META_REGIONINFO;
     assertTrue(hregionInfo.getTableDesc().isMetaRegion());

Added: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionSeqidTransition.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionSeqidTransition.java?rev=1588690&view=auto
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionSeqidTransition.java (added)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionSeqidTransition.java Sat Apr 19 18:18:28 2014
@@ -0,0 +1,299 @@
+/*
+ * Copyright 2014 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.regionserver;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.io.encoding.TestChangingEncoding;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.ArrayDeque;
+import java.util.Arrays;
+import java.util.Collection;
+
+public class TestHRegionSeqidTransition {
+  private static final Log LOG = LogFactory.getLog(TestHRegionSeqidTransition.class);
+  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  private static final Configuration c = TEST_UTIL.getConfiguration();
+  private static final String TABLENAME = "testHRegionSeqidTransition";
+  private static final byte [][] FAMILIES = new byte [][] {TestChangingEncoding.CF_BYTES};
+  private static final int NUM_HFILE_BATCHES = 2;
+
+
+  /**
+   * Test if the serialization and deserialization of HRegionSeqidTransition
+   * @throws Exception
+   */
+  @Test
+  public void testSerDe() throws Exception {
+    HRegionSeqidTransition idTranOrigin = new HRegionSeqidTransition(10, 100);
+    byte[] buffer = HRegionSeqidTransition.toBytes(idTranOrigin);
+    HRegionSeqidTransition idTranCopy = HRegionSeqidTransition.fromBytes(buffer);
+    assertEquals(idTranOrigin, idTranCopy);
+  }
+
+  @BeforeClass
+  public static void beforeAllTests() throws Exception {
+    c.setBoolean("dfs.support.append", true);
+    c.setInt(HConstants.REGIONSERVER_INFO_PORT, 0);
+    c.setInt("hbase.master.meta.thread.rescanfrequency", 5*1000);
+    // enable the meta region seqid recording
+    c.setBoolean(HTableDescriptor.METAREGION_SEQID_RECORD_ENABLED, true);
+    TEST_UTIL.startMiniCluster(3);
+    TEST_UTIL.createTable(Bytes.toBytes(TABLENAME), FAMILIES);
+    HTable t = new HTable(TEST_UTIL.getConfiguration(), TABLENAME);
+    int countOfRegions = TEST_UTIL.createMultiRegions(t, getTestFamily());
+    TEST_UTIL.waitUntilAllRegionsAssigned(countOfRegions);
+  }
+
+  @AfterClass
+  public static void afterAllTests() throws IOException {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  @Before
+  public void setup() throws IOException {
+    if (TEST_UTIL.getHBaseCluster().getLiveRegionServerThreads().size() < 3) {
+      // Need at least three servers.
+      LOG.info("Started new server=" +
+                 TEST_UTIL.getHBaseCluster().startRegionServer());
+    }
+  }
+
+  @Test(timeout=300000)
+  public void testSeqidTransitionOnRegionShutdown()
+    throws Exception {
+    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
+    LOG.info("Number of region servers = " + cluster.getLiveRegionServerThreads().size());
+
+    HRegionServer regionServer0 = cluster.getRegionServer(0);
+    HRegionServer regionServer1 = cluster.getRegionServer(1);
+    HRegionServer regionServer2 = cluster.getRegionServer(2);
+
+    Collection<HRegion> regions = regionServer1.getOnlineRegions();
+    LOG.debug("RS " + regionServer1.getServerInfo().getHostnamePort() + " has "
+                + regions.size() + " online regions");
+    HRegion region = null;
+    for (HRegion oneRegion : regions) {
+      if (!oneRegion.getRegionInfo().isMetaRegion()) {
+        region = oneRegion;
+        break;
+      }
+    }
+    assert(region != null);
+    // write KVs
+    for (int i = 0; i < NUM_HFILE_BATCHES; ++i) {
+      TestChangingEncoding.writeTestDataBatchToRegion(region, region.getStartKey(), i);
+    }
+    // normal shutdown
+    LOG.debug("Stopping RS " + regionServer1.getServerInfo().getHostnamePort());
+    cluster.stopRegionServer(1);
+
+    regions = regionServer2.getOnlineRegions();
+    LOG.debug("RS " + regionServer2.getServerInfo().getHostnamePort() + " has "
+                + regions.size() + " online regions");
+    HRegion region1 = null;
+    for (HRegion oneRegion : regions) {
+      if (!oneRegion.getRegionInfo().isMetaRegion()) {
+        region1 = oneRegion;
+        break;
+      }
+    }
+    assert(region1 != null);
+    // write KVs
+    for (int i = 0; i < NUM_HFILE_BATCHES; ++i) {
+      TestChangingEncoding.writeTestDataBatchToRegion(region1, region1.getStartKey(), i);
+    }
+    // abnormal shutdown
+    LOG.debug("Aborting RS " + regionServer2.getServerInfo().getHostnamePort());
+    cluster.abortRegionServer(2);
+
+
+    regions = regionServer0.getOnlineRegions();
+    LOG.debug("RS " + regionServer0.getServerInfo().getHostnamePort() + " has "
+                + regions.size() + " online regions");
+    HRegion region0 = null;
+    for (HRegion oneRegion : regions) {
+      if (!oneRegion.getRegionInfo().isMetaRegion()) {
+        region0 = oneRegion;
+        break;
+      }
+    }
+    assert(region0 != null);
+    // write KVs
+    for (int i = 0; i < NUM_HFILE_BATCHES; ++i) {
+      TestChangingEncoding.writeTestDataBatchToRegion(region0, region0.getStartKey(), i);
+    }
+    // wait until two regions online on regionServer0
+    while (regionServer0.getOnlineRegion(region.getRegionName()) == null) {
+      Thread.sleep(500);
+    }
+    LOG.debug("Region " + region.getRegionNameAsString() + " opened on RS "
+                + regionServer0.getServerInfo().getHostnamePort());
+    while (regionServer0.getOnlineRegion(region1.getRegionName()) == null) {
+      Thread.sleep(500);
+    }
+    LOG.debug("Region " + region1.getRegionNameAsString() + " opened on RS "
+                + regionServer0.getServerInfo().getHostnamePort());
+
+    // start testing
+    HRegion[] testRegions = {region, region1};
+    int[] regionHLogNums = {0, 0};
+
+    // check hlog
+    FileSystem fs = FileSystem.get(c);
+    final Path baseDir = new Path(c.get(HConstants.HBASE_DIR));
+    final Path logDir = new Path(baseDir, HConstants.HREGION_LOGDIR_NAME);
+    int nLogFilesRead = 0;
+    ArrayDeque<FileStatus> checkQueue = new ArrayDeque<FileStatus>(
+      java.util.Arrays.asList(fs.listStatus(logDir)));
+    while (!checkQueue.isEmpty()) {
+      FileStatus logFile = checkQueue.pop();
+      if (logFile.isDir()) {
+        checkQueue.addAll(java.util.Arrays.asList(fs.listStatus(logFile.getPath())));
+        continue;
+      }
+      HLog.Reader r = HLog.getReader(fs, logFile.getPath(), c);
+      LOG.info("Reading HLog: " + logFile.getPath());
+      HLog.Entry entry = null;
+      while ((entry = r.next(entry)) != null) {
+        HLogKey key = entry.getKey();
+        WALEdit edit = entry.getEdit();
+        for (int regionIndex = 0; regionIndex < testRegions.length; ++regionIndex) {
+          if (Bytes.equals(key.getRegionName(), testRegions[regionIndex].getRegionName()) &&
+            Bytes.equals(key.getTablename(),
+                         testRegions[regionIndex].getTableDesc().getName())) {
+            int count = 0;
+            long lastSeqid = -1, nextSeqid = -1;
+            for (KeyValue kv : edit.getKeyValues()) {
+              if (Bytes.equals(kv.getRow(), testRegions[regionIndex].getStartKey()) &&
+                Bytes.equals(kv.getFamily(), HLog.METAFAMILY)) {
+                byte[] qualifier = kv. getQualifier();
+                if (Bytes.equals(qualifier, HConstants.LAST_SEQID_QUALIFIER)) {
+                  lastSeqid = Bytes.toLong(kv.getValue());
+                  ++count;
+                  continue;
+                }
+                if (Bytes.equals(qualifier, HConstants.NEXT_SEQID_QUALIFIER)) {
+                  nextSeqid = Bytes.toLong(kv.getValue());
+                  ++count;
+                  continue;
+                }
+                if (Bytes.equals(qualifier, HConstants.SERVER_QUALIFIER) ||
+                    Bytes.equals(qualifier, HConstants.STARTCODE_QUALIFIER) ||
+                    Bytes.equals(qualifier, HConstants.REGIONINFO_QUALIFIER)) {
+                  ++count;
+                  continue;
+                }
+              } else {
+                break;
+              }
+            }
+            if ((count == 5) || (count == 3)) {
+              ++regionHLogNums[regionIndex];
+              HRegionSeqidTransition transition = new HRegionSeqidTransition(lastSeqid, nextSeqid);
+              LOG.info("Found hlog records for region " +
+                  testRegions[regionIndex].getRegionNameAsString() + ": " + transition);
+            }
+          }
+        }
+      }
+      r.close();
+      ++nLogFilesRead;
+    }
+    LOG.info("Processed " + nLogFilesRead +" log files and found " +
+               regionHLogNums[0] + " hlogs for" + testRegions[0].getRegionNameAsString() + ", " +
+               regionHLogNums[1] + " hlogs for" + testRegions[1].getRegionNameAsString());
+    assertTrue(nLogFilesRead > 0);
+    assertTrue(regionHLogNums[0] > 0);
+    assertTrue(regionHLogNums[1] > 0);
+
+    // check meta table
+    HTable meta = new HTable(TEST_UTIL.getConfiguration(), HConstants.META_TABLE_NAME);
+    Scan scan = new Scan();
+    scan.addColumn(HConstants.CATALOG_HISTORIAN_FAMILY, HConstants.SERVER_QUALIFIER);
+    scan.addColumn(HConstants.CATALOG_HISTORIAN_FAMILY, HConstants.STARTCODE_QUALIFIER);
+    scan.addColumn(HConstants.CATALOG_HISTORIAN_FAMILY, HConstants.LAST_SEQID_QUALIFIER);
+    scan.addColumn(HConstants.CATALOG_HISTORIAN_FAMILY, HConstants.NEXT_SEQID_QUALIFIER);
+    scan.addColumn(HConstants.CATALOG_HISTORIAN_FAMILY, HConstants.REGIONINFO_QUALIFIER);
+    ResultScanner s = meta.getScanner(scan);
+    int[] rows = {0, 0};
+    for (Result result; (result = s.next()) != null;) {
+      for (int regionIndex = 0; regionIndex < testRegions.length; ++regionIndex) {
+        if (Bytes.equals(result.getRow(), testRegions[regionIndex].getRegionName())) {
+          byte[] server = result.getValue(HConstants.CATALOG_HISTORIAN_FAMILY,
+                                          HConstants.SERVER_QUALIFIER);
+          byte[] startcode = result.getValue(HConstants.CATALOG_HISTORIAN_FAMILY,
+                                             HConstants.STARTCODE_QUALIFIER);
+          byte[] lastSeqid = result.getValue(HConstants.CATALOG_HISTORIAN_FAMILY,
+                                             HConstants.LAST_SEQID_QUALIFIER);
+          byte[] nextSeqid = result.getValue(HConstants.CATALOG_HISTORIAN_FAMILY,
+                                             HConstants.NEXT_SEQID_QUALIFIER);
+          byte[] regionInfo = result.getValue(HConstants.CATALOG_HISTORIAN_FAMILY,
+                                              HConstants.REGIONINFO_QUALIFIER);
+          if (server != null && startcode != null &&
+              lastSeqid != null && nextSeqid != null &&
+              regionInfo!= null) {
+            ++rows[regionIndex];
+            HRegionSeqidTransition transition = new HRegionSeqidTransition(
+              Bytes.toLong(lastSeqid), Bytes.toLong(nextSeqid));
+            LOG.info("Found meta records for regions[" + regionIndex +
+                "] with a sequence id transition: " + transition);
+          }
+        }
+      }
+    }
+
+    LOG.info("Found " + rows[0] + " meta records for " + testRegions[0].getRegionNameAsString() +
+               "and " + rows[1] + " meta records for " + testRegions[1].getRegionNameAsString());
+    assertTrue(rows[0] > 0);
+    assertTrue(rows[1] > 0);
+  }
+
+  private static byte [] getTestFamily() {
+    return FAMILIES[0];
+  }
+}

Modified: hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestMutationWriteToWAL.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestMutationWriteToWAL.java?rev=1588690&r1=1588689&r2=1588690&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestMutationWriteToWAL.java (original)
+++ hbase/branches/0.89-fb/src/test/java/org/apache/hadoop/hbase/thrift/TestMutationWriteToWAL.java Sat Apr 19 18:18:28 2014
@@ -168,7 +168,7 @@ public class TestMutationWriteToWAL exte
           continue;
         }
         for (KeyValue kv : entry.getEdit().getKeyValues()) {
-          if (Bytes.equals(kv.getRow(), HLog.METAROW)) {
+          if (Bytes.equals(kv.getRow(), HLog.METAROW) || Bytes.equals(kv.getFamily(), HLog.METAFAMILY)) {
             continue;
           }
           actualLogEntries.add(Bytes.toStringBinary(kv.getRow()) + "," +