You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by en...@apache.org on 2014/08/20 03:45:53 UTC

[3/3] git commit: HBASE-11512 Write region open/close events to WAL

HBASE-11512 Write region open/close events to WAL


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d44e7df5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d44e7df5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d44e7df5

Branch: refs/heads/master
Commit: d44e7df5dc1e701fae4b611b9858dfd80499ee35
Parents: aeecd20
Author: Enis Soztutar <en...@apache.org>
Authored: Tue Aug 19 18:45:21 2014 -0700
Committer: Enis Soztutar <en...@apache.org>
Committed: Tue Aug 19 18:45:21 2014 -0700

----------------------------------------------------------------------
 .../hadoop/hbase/protobuf/ProtobufUtil.java     |   26 +
 .../hbase/protobuf/generated/WALProtos.java     | 2455 +++++++++++++++++-
 hbase-protocol/src/main/protobuf/WAL.proto      |   23 +
 .../hadoop/hbase/regionserver/HRegion.java      |   52 +
 .../hadoop/hbase/regionserver/wal/HLogUtil.java |   17 +
 .../hadoop/hbase/regionserver/wal/WALEdit.java  |   16 +
 .../master/TestDistributedLogSplitting.java     |   38 +-
 .../hadoop/hbase/regionserver/TestHRegion.java  |  136 +
 .../hbase/regionserver/wal/TestWALReplay.java   |   21 +-
 9 files changed, 2755 insertions(+), 29 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/d44e7df5/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index 4033fb5..86fe515 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -120,6 +120,8 @@ import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.Regio
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor;
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor;
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.FlushAction;
+import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor;
+import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.EventType;
 import org.apache.hadoop.hbase.security.access.Permission;
 import org.apache.hadoop.hbase.security.access.TablePermission;
 import org.apache.hadoop.hbase.security.access.UserPermission;
@@ -2529,6 +2531,30 @@ public final class ProtobufUtil {
     return desc.build();
   }
 
+  public static RegionEventDescriptor toRegionEventDescriptor(
+      EventType eventType, HRegionInfo hri, long seqId, ServerName server,
+      Map<byte[], List<Path>> storeFiles) {
+    RegionEventDescriptor.Builder desc = RegionEventDescriptor.newBuilder()
+        .setEventType(eventType)
+        .setTableName(ByteStringer.wrap(hri.getTable().getName()))
+        .setEncodedRegionName(ByteStringer.wrap(hri.getEncodedNameAsBytes()))
+        .setLogSequenceNumber(seqId)
+        .setServer(toServerName(server));
+
+    for (Map.Entry<byte[], List<Path>> entry : storeFiles.entrySet()) {
+      RegionEventDescriptor.StoreDescriptor.Builder builder
+        = RegionEventDescriptor.StoreDescriptor.newBuilder()
+          .setFamilyName(ByteStringer.wrap(entry.getKey()))
+          .setStoreHomeDir(Bytes.toString(entry.getKey()));
+      for (Path path : entry.getValue()) {
+        builder.addStoreFile(path.getName());
+      }
+
+      desc.addStores(builder);
+    }
+    return desc.build();
+  }
+
   /**
    * Return short version of Message toString'd, shorter than TextFormat#shortDebugString.
    * Tries to NOT print out data both because it can be big but also so we do not have data in our